Ejemplo n.º 1
0
	def depth_first_search(self):
		"""
		Depht-first search.

		@rtype:  tuple
		@return:  tupple containing a dictionary and two lists:
			1. Generated spanning tree
			2. Graph's preordering
			3. Graph's postordering
		"""
		return searching.depth_first_search(self)
Ejemplo n.º 2
0
	def depth_first_search(self):
		"""
		Depht-first search.

		@rtype:  tuple
		@return: A tupple containing tree lists:
			1. Generated spanning tree
			2. Graph's preordering
			3. Graph's postordering
		"""
		return searching.depth_first_search(self)
Ejemplo n.º 3
0
def main():
    st = time.perf_counter()  #Start a time counter.

    if len(sys.argv) == 4:  #If the length of the keyword arguments is four...
        method = sys.argv[
            1]  #The second argument is the method/algorithm used to find a solution.
        input_file = sys.argv[
            2]  #The third argument is a .txt file containing the initial and final state of the problem.
        output_file = sys.argv[
            3]  #The fourth argument is a .txt file containing the solution of the problem.

        initial_state, goal_state = read_input_file(
            filename=input_file
        )  #Read the input file and return two state objects.

        if method == 'breadth':  #Check which method is selected and solve the problem accordingly.
            solution = breadth_first_search(current_state=initial_state,
                                            goal_state=goal_state,
                                            timeout=300)
        elif method == 'depth':
            solution = depth_first_search(current_state=initial_state,
                                          goal_state=goal_state,
                                          timeout=300)
        elif method == 'best':
            solution = heuristic_search(current_state=initial_state,
                                        goal_state=goal_state,
                                        method='best',
                                        timeout=300)
        elif method == 'astar':
            solution = heuristic_search(current_state=initial_state,
                                        goal_state=goal_state,
                                        method='astar',
                                        timeout=300)
        else:  #If the method argument is none of the above, print a usage message.
            solution = None
            print(
                'Usage: python bw.py <method> <input filename> <output filename>'
            )

        if solution == goal_state:  #If the solution is equal to the goal state...
            number_of_moves = write_output_file(
                solution=solution, filename=output_file
            )  #Write the solution file and return the number of moves.

            print('Solution found!')
            print('Number of blocks:', len(initial_state.layout.keys()))
            print('Method:', method)
            print('Number of moves:', number_of_moves)
            print('Execution time:', str(round(time.perf_counter() - st, 4)))
    else:  #Else, if the length of the keyword arguments is not equal to four, print a usage message.
        print(
            'Usage: python bw.py <method> <input filename> <output filename>')
Ejemplo n.º 4
0
	def depth_first_search(self, root=None):
		"""
		Depht-first search.
		
		@type  root: node
		@param root: Optional root node (will explore only root's connected component)

		@rtype:  tuple
		@return:  tupple containing a dictionary and two lists:
			1. Generated spanning tree
			2. Graph's preordering
			3. Graph's postordering
		"""
		return searching.depth_first_search(self, root)
Ejemplo n.º 5
0
 def find_path(self):
   filt = find("t")
   st, _, _ = depth_first_search(self.graph, "s", filter=filt)
   if "t" not in st:
     return None
   else:
     path = []
     sink = "t"
     
     # build up path from spanning tree returned from dfs 
     while sink != "s":
       src = st[sink]
       new_edge = (src, sink, self.graph.edge_weight((src, sink)))
       path.insert(0, new_edge)
       sink = src
     return path
Ejemplo n.º 6
0
    def find_path(self):
        filt = find("t")
        st, _, _ = depth_first_search(self.graph, "s", filter=filt)
        if "t" not in st:
            return None
        else:
            path = []
            sink = "t"

            # build up path from spanning tree returned from dfs
            while sink != "s":
                src = st[sink]
                new_edge = (src, sink, self.graph.edge_weight((src, sink)))
                path.insert(0, new_edge)
                sink = src
            return path
Ejemplo n.º 7
0
def find_path(graph, source):
    filt = find("t")
    st, po, _ = depth_first_search(graph, source, filter=filt)

    # no path from s -> source -> t exists
    if "t" not in st or not graph.has_edge(("s", source)):
        return None
    else:  # construct path from st (spanning tree)
        path = []
        sink = "t"
        while sink != source:
            src = st[sink]
            new_edge = (src, sink, graph.edge_weight((src, sink)))
            path.insert(0, new_edge)
            sink = src
        source_edge = ("s", source, graph.edge_weight(("s", source)))
        path.insert(0, source_edge)
        return path
Ejemplo n.º 8
0
def find_path(graph, source):
  filt = find("t")
  st, po, _ = depth_first_search(graph, source, filter=filt)
  
  # no path from s -> source -> t exists
  if "t" not in st or not graph.has_edge(("s", source)):
    return None
  else: # construct path from st (spanning tree)
    path = []
    sink = "t"
    while sink != source:
      src = st[sink]
      new_edge = (src, sink, graph.edge_weight((src, sink)))
      path.insert(0, new_edge)
      sink = src
    source_edge = ("s", source, graph.edge_weight(("s", source)))
    path.insert(0, source_edge)
    return path
Ejemplo n.º 9
0
def run(in_graph_file):
    # converts a graph in an acceptable form into a representation
    # that is usable in MapReduce
    mr_file_name = "mr_max_flow.txt"
    original_graph = file_to_graph(in_graph_file)
    mr_graph = mr_graph_convert(original_graph)
    dict_to_graph_file(mr_graph, mr_file_name)

    augmented_edges = {}

    # counters to keep track of convergence of MapReduce jobs
    converge_count = 5
    previous_count = -1

    while converge_count != 0:
        infile = open(mr_file_name, "r")

        # uncomment to run on emr
        # mr_job = max_flow.MRFlow(args=['-r', 'emr'])

        mr_job = max_flow.MRFlow()
        mr_job.stdin = infile

        with mr_job.make_runner() as runner:
            # perform iteration of MapReduce
            runner.run()

            # process map reduce output
            out_buffer = []
            for line in runner.stream_output():
                # print str(counter) + ": " + line
                key, value = extract_key_value(line)

                if key == "A_p":
                    A_p = value
                    merge_edge_flow(augmented_edges, A_p)
                else:
                    out_buffer.append(line)

            # write map reduce output to file for next iteration
            outfile = open(mr_file_name, "w")
            for line in out_buffer:
                key, value = extract_key_value(line)
                value.append(A_p)
                # value.append(1)
                new_line = json.dumps(key) + "\t" + json.dumps(value) + "\n"
                outfile.write(new_line)

            # check for convergence
            move_counts = runner.counters()[0]["move"]
            if move_counts["source"] == previous_count:
                converge_count -= 1
            else:
                converge_count = 5
            previous_count = move_counts["source"]

        infile.close()
        outfile.close()

    # augment graph based on max flow
    # original_graph = dict_graph_to_python_graph(original_graph_dict)
    augmented_graph = augment_graph(original_graph, augmented_edges)

    # find cut
    spanning_tree, preordering, postordering = depth_first_search(
        augmented_graph, "s")
    min_cut = find_max_flow(original_graph, preordering)

    return min_cut, preordering
Ejemplo n.º 10
0
def run(in_graph_file):
  # converts a graph in an acceptable form into a representation 
  # that is usable in MapReduce
  mr_file_name = "mr_max_flow.txt"
  original_graph = file_to_graph(in_graph_file)
  mr_graph = mr_graph_convert(original_graph)
  dict_to_graph_file(mr_graph, mr_file_name)
  
  augmented_edges = {}
  
  # counters to keep track of convergence of MapReduce jobs
  converge_count = 5
  previous_count = -1
  
  while converge_count != 0:
   infile = open(mr_file_name, "r")

   # uncomment to run on emr
   # mr_job = max_flow.MRFlow(args=['-r', 'emr'])
   
   mr_job = max_flow.MRFlow()
   mr_job.stdin = infile

   with mr_job.make_runner() as runner:
     # perform iteration of MapReduce
     runner.run()
     
     # process map reduce output
     out_buffer = []
     for line in runner.stream_output():
       # print str(counter) + ": " + line
       key, value = extract_key_value(line)

       if key == "A_p":
         A_p = value
         merge_edge_flow(augmented_edges, A_p)
       else:
         out_buffer.append(line)

     # write map reduce output to file for next iteration
     outfile = open(mr_file_name, "w")
     for line in out_buffer:
       key, value = extract_key_value(line)
       value.append(A_p)
       # value.append(1)
       new_line = json.dumps(key) + "\t" + json.dumps(value) + "\n"
       outfile.write(new_line)

     # check for convergence
     move_counts = runner.counters()[0]["move"]
     if move_counts["source"] == previous_count:
      converge_count -= 1
     else:
       converge_count = 5
     previous_count = move_counts["source"]

   infile.close()    
   outfile.close()
  
  # augment graph based on max flow
  # original_graph = dict_graph_to_python_graph(original_graph_dict)
  augmented_graph = augment_graph(original_graph, augmented_edges)
  
  # find cut
  spanning_tree, preordering, postordering = depth_first_search(augmented_graph, "s")
  min_cut = find_max_flow(original_graph, preordering)
  
  return min_cut, preordering