def test_do_not_find_unconnected_element(self): """ Does an unconnected node remain unexplored? """ # Adjacency List of graph G G = {} G[0] = [1, 2] G[1] = [0, 3] G[2] = [0, 3, 4] G[3] = [1, 2, 4, 5] G[4] = [2, 3, 5] G[5] = [4, 5] G[6] = [7] G[7] = [6] # Start node s = 0 exploredList = DFS.DFS(G, s) expExploredList = { 0: 1, 1: 1, 2: 1, 3: 1, 4: 1, 5: 1, 6: 0, 7: 0 } # i.e. 6 & 7 # unexplored self.assertEqual(expExploredList, exploredList)
def main(): Adj_List = DFS.Construct_data_array(connections_data) Cordinate_List = DFS.Construct_data_array(location_data) sorted_adj_list = DFS.Sort_Adjency_List(Adj_List) """ print("Adjency List:") for item in Adj_List: print(item) print("Sorted Adjency List") for item in sorted_adj_list: print(item) """ """ print("TESTS") print(DFS.Get_Connections("A1",sorted_adj_list)) print(DFS.Get_Locations("A1",Cordinate_List)) print(DFS.Get_Index("A1", sorted_adj_list)) print(DFS.distance_calc("A1", "A2",Cordinate_List)) """ """ print("Cordinate List:") for item in Cordinate_List: print(item) """ path = DFS.DFS("C1", "B1", sorted_adj_list, DFS.Get_Connections) if path == False: print("Path not found") else: print("\n") DFS.PrintPathStack(path, Cordinate_List)
def test_find_connected_elements(self): """ Is a connected node explored? """ # Adjacency List of graph G G = {} G[0] = [1, 2] G[1] = [0, 3] G[2] = [0, 3, 4] G[3] = [1, 2, 4, 5] G[4] = [2, 3, 5] G[5] = [4, 5] # Start node s = 0 exploredList = DFS.DFS(G, s) expExploredList = { 0: 1, 1: 1, 2: 1, 3: 1, 4: 1, 5: 1 } # i.e. all explored self.assertEqual(expExploredList, exploredList)
def test(matrix): V = [Graph.Node(i) for i in range(0, len(matrix))] E = [[] for i in range(0, len(matrix))] G = Graph.Graph(V, E) Et = [[] for i in range(0, len(matrix))] Vt = [Graph.Node(i) for i in range(0, len(matrix))] Gt = Graph.Graph(Vt, Et) for i in range(0, len(matrix)): for j in range(0, len(matrix)): if matrix[i][j] is 1: E[i].append(Graph.Arch(i, j)) V[i].addAdj(V[j]) Et[j].append(Graph.Arch(j, i)) Vt[j].addAdj(Vt[i]) DFS.DFS(G) sccList = [[] for i in range(0, len(Gt.V))] DFS.SCC(Gt, sccList) count = 0 length = [] for i in range(0, len(sccList)): if (len(sccList[i]) is not 0): count += 1 length.append(len(sccList[i])) #print "SCC["+str(i)+"] : " + str(sccList[i]) print "Ci sono in totale " + str(count) + " componenti fortemente connesse" print "Con il seguente numero di vertici al loro interno: ", length
def run_DFS(): N = 5 vertices = [(0, 1), (0, 4), (1, 2), (1, 3), (1, 4), (2, 3), (3, 4)] G = gr.Graph(N, vertices) G.create_adj_dict() start = 1 adj_dict = G.get_adj_dict() dfs = dfs_search.DFS(adj_dict) path = dfs(start) return adj_dict, path
def find_game_mode(self, game_mode): if game_mode == DFS_MODE: return DFS.DFS() elif game_mode == A_STAR_MODE: return A_STAR.A_STAR() elif game_mode == BFS_MODE: return BFS.BFS() raise Exception( "Attention: Only DFS and A-STAR modes are available currently.")
def aplica_profundidade(): if sys.version_info[0] < 3: pathDoArquivo = tk.Open().show() else: pathDoArquivo = filedialog.askopenfilename() G = nx.read_gexf(pathDoArquivo) G = dfs.DFS(G, G.nodes()[0]) pathDoArquivo = pathDoArquivo.replace(".gexf", "_DFS.gexf") nx.write_gexf(G, pathDoArquivo) nx.draw(G) plt.show()
def getAlgo(type, Matrix, startX, startY, n, grid, size): if type == "DFS": print("DFS") result = DFS(Matrix, startX, startY, n - 1, n - 1, grid, n, size) return result if type == "BFS": return BFS(Matrix, startX, startY, n - 1, n - 1, grid, n, size) if type == "AStar": return AStar(Matrix, startX, startY, n - 1, n - 1, grid, n, size, "Manhattan")
def FirstPlot(): algo_name = "DFS search" input_data = [] exec_time = [] for n in range(100, 1100, 100): graphList = [] graph = DFS.Graph(g=graphList) vertexList = [] DFS.createGraph(graph, vertexList, n) vertexNum = random.randint(0, n - 1) start_time = time.clock() DFS.DFS(graph) end_time = time.clock() exec_time.append((end_time - start_time) * 1000) input_data.append(n) CreatePlot(input_data, exec_time, algo_name)
def plot_extensions(dataset_path, num_extensions): allpaths = DFS.DFS(dataset_path) p = Path(os.getcwd()).parent dataset_name = path_utilities.get_last_dir_from_path(dataset_path) write_path = os.path.join(p, "outputs/", dataset_name + "--output/") if not os.path.isdir(write_path): os.mkdir(write_path) # a list of all the file names (without the paths) filenames = [] for path in allpaths: filenames.append(path_utilities.get_fname_from_path(path)) filenames_no_ext, exts = remove_all_extensions(filenames) plot_extension_pie(exts, num_extensions, write_path, dataset_path) '''
def main(): data = pd.read_csv("/Users/babak_khorrami/Downloads/soc-Epinions1.txt", header=0, sep="\t") dt = np.array(data) nodes = set(dt[:, 0:2].ravel()) g = Graph() for n in nodes: g.add_node(n) for i in range(dt.shape[0]): g.add_edge(dt[i, 0], dt[i, 1], 1) print(g.get_node_count()) print("------ Graph Created -------") dfs_small = DFS(g) dfs_small.dfs(0) dfs_small.print_dfs()
def shuffle(dataset_path): if (confirm(prompt="Warning, this will scramble the directory " + "structure of all files and folders in " + dataset_path + ". Are you sure you want to do this? ")): print("Ok.") else: exit() if (confirm(prompt="Really sure, though?")): print("Ok.") else: exit() if (confirm(prompt="Super duper sure???")): print("Ok.") else: exit() # get a list of the paths to every file in the dataset # rooted at "dataset_path" filepaths = DFS.DFS(dataset_path) num_files = len(filepaths) # list of the parent directories of every file in # "filepaths". directory_list = [] # for each file for filepath in filepaths: # get its parent directory directory = remove_path_end(filepath) # and add it to our list of parent directories directory_list.append(directory) # generate a permutation of the number of files perm = np.random.permutation(num_files) # for each index for i in range(num_files): # get the image of the index under our permutation permuted_index = perm[i] # get the file we're moving next_file = filepaths[i] # get the randomly chosen destination directory dest_dir = directory_list[permuted_index] # move the file print(next_file) os.system("mv " + next_file + " " + dest_dir)
def runSearchAlgorithm(self, alg): if(self.startNode == None or self.endNode == None): return if alg.get() == "DFS": dfs = DFS(self.g,self.startNode, self.endNode, GUI = self) dfs.run() elif alg.get() == "BFS": bfs = BFS(self.g,self.startNode, self.endNode, GUI = self) bfs.run() elif alg.get() == "A*": a_star = A_Star(self.g, self.startNode, self.endNode, GUI = self) a_star.run() elif alg.get() == "WA*": w = simpledialog.askinteger("Input", "Choose a weight for WA*", parent=self.win, minvalue=0, maxvalue=10000) wa_star = WA_Star(self.g, self.startNode, self.endNode, weight = w, GUI = self) wa_star.run() else: dijkstra = Dijkstra(self.g, self.startNode, self.endNode, GUI=self) dijkstra.run()
def main(): a = SemFeature() user_counter = 0 for user in a.docs: user_counter += 1 print "\rUser_%i" % user_counter, user, "creating concept pair..." tagged_corpus = a.posTagging(a.docs[user]) target_corpus = a.parseTag2Rel(tagged_corpus) pair = a.createConceptPair(a.docs[user], target_corpus) print "Loading graph..." time_0 = time.time() G = nx.read_gpickle('cn.pkl') # create_graph() print "Complete in %fs" % (time.time() - time_0) dfs = DFS(G) for ps in pair: #t0 = time.time() for p in ps: t0 = time.time() print "find (%s %s)" % (p[0], p[1]) dfs.find(p[0], p[1]) print "Search time: %fs" % (time.time() - t0) raw_input()
def aplica_todos(): if sys.version_info[0] < 3: pathDoArquivo = tk.Open().show() else: pathDoArquivo = filedialog.askopenfilename() G = nx.read_gexf(pathDoArquivo) M = krl.Kruskal(G) pathDoArquivoNovo = pathDoArquivo.replace(".gexf", "_MST_Kruskal.gexf") nx.write_gexf(M, pathDoArquivoNovo) M = pr.Prim(G, G.nodes()[0]) pathDoArquivoNovo = pathDoArquivo.replace(".gexf", "_MST_Prim.gexf") nx.write_gexf(G, pathDoArquivoNovo) M = bfs.BFS(G, G.nodes()[0]) pathDoArquivoNovo = pathDoArquivo.replace(".gexf", "_BFS.gexf") nx.write_gexf(M, pathDoArquivoNovo) M = dfs.DFS(G, G.nodes()[0]) pathDoArquivoNovo = pathDoArquivo.replace(".gexf", "_DFS.gexf") nx.write_gexf(M, pathDoArquivoNovo) M = dks.Dijkstra(G, G.nodes()[0]) pathDoArquivoNovo = pathDoArquivo.replace(".gexf", "_Dijkstra.gexf") nx.write_gexf(M, pathDoArquivoNovo) M = wp.WelshPowell(G, G.nodes()[0]) pathDoArquivoNovo = pathDoArquivo.replace(".gexf", "_WelshPowell.gexf") nx.write_gexf(M, pathDoArquivoNovo)
for j in range(itr): count+=1 print(count) Matrix = Utility.matrixGenerator(n,p) gridObj = GridGenerator(Matrix,"DFS") grid = []#gridObj.generate_grid(n,size) ##gridObj1 = GridGenerator(Matrix,"BFS") ##grid1 = gridObj1.generate_grid(n,size) #gridObj2 = GridGenerator(Matrix,"AStar Euclidean") #grid2 = gridObj2.generate_grid(n,size) #gridObj3 = GridGenerator(Matrix,"AStar Manhatten") #grid3 = gridObj3.generate_grid(n,size) #print(Matrix) print("DFS") dfsNodesExp=[] analyseDFS=DFS(Matrix, startX, startY ,n-1,n-1,grid,n,size,p).solve().to_dict() if(analyseDFS['Maze Solved']==True): print("Maze solved DFS: ",analyseDFS['Maze Solved']) plist.append(analyseDFS['Probability']) dfsNodesExp.append(analyseDFS['Nodes Explored']) analyzerObjectDFS.append(analyseDFS) bfsNodesExp = [] analyseBFS = BFS(Matrix, startX, startY, n - 1, n - 1, grid, n, size, p, False).solve().to_dict() if (analyseBFS['Maze Solved'] == True): print("Maze solved DFS: ", analyseBFS['Maze Solved']) bfsNodesExp.append(analyseBFS['Nodes Explored']) analyzerObjectBFS.append(analyseBFS) #nodesExpDFS=[] #nodesExpDFS.append(DFS(Matrix, startX, startY ,n-1,n-1,grid,n,size,p).solve().to_dict()['Nodes Explored']) #print("BFS")
from Node import * from DFS import * from BFS import * from config import originate, target import time if __name__ == '__main__': Node1 = Node(None, originate, 0) Node2 = Node(None, target, 0) DFS = DFS(Node1, Node2, 10, 3) BFS = BFS(Node1, Node2, 10, 3) a_star = a_star(Node1, Node2, 10, 3) #深度优先 start_d = time.time() flag_d = DFS.search() end_d = time.time() cost_d = end_d - start_d #广度优先 start_b = time.time() flag_b = BFS.search() end_b = time.time() cost_b = end_b - start_b if (flag_d): print('The result of DFS') DFS.showLine() print('Spent time:%f s\n\n' % (cost_d)) else: print('error')
def main(): a = SemFeature() user_counter = 0 word_count = np.load('wordcount.npy')[()] loc_dict = np.load('loc_dict.npy')[()] #print "Loading graph..." #time_0 = time.time() #G = nx.read_gpickle('cn.pkl') # create_graph() #dfs = DFS(G) #print "Complete in %fs" % (time.time()- time_0) #print len(a.docs) for user in a.docs: print "Loading graph..." time_0 = time.time() G = nx.read_gpickle('cn.pkl') # create_graph() dfs = DFS(G) print "Complete in %fs" % (time.time()- time_0) print "Creating %s tfidf dict..." % user corpus = a.docs[user] tfidf = {} text = [] [ text.extend(el) for el in corpus] local_count = Counter(text) for word in local_count.keys(): tf = float(local_count[word])/word_count[word] idf = math.log10(42.0/(1 + len(loc_dict.get(word, 0)))) score = tf*idf tfidf[word] = (1 + score) #print tfidf user_counter += 1 print "\rUser_%i" % user_counter, user, "creating concept pair..." tagged_corpus = a.posTagging(a.docs[user]) target_corpus = a.parseTag2Rel(tagged_corpus) pair = a.createConceptPair(a.docs[user], target_corpus) num = 0.0 for ps in pair: #t0 = time.time() num += 1 a.flush_print(num,len(pair)) #print num,"/",tot for p in ps: t0 = time.time() #print "find (%s %s)" % (p[0], p[1]) path = dfs.find(p[0], p[1]) #print "Found :" , path #print "Search time: %fs" % (time.time()-t0) if len(path) > 0: new_weight = tfidf.get(path[0], 1.0) else: continue neigh = G.neighbors(p[0]) # Diffuse for n in neigh: G[p[0]][n][0]['weight'] *= new_weight # Infiltrate for i in range(len(path)): if i == len(path)-1: break #print "%s new_w : %f" % (path[i], new_weight) G[path[i]][path[i+1]][0]['weight'] *= new_weight #raw_input() print "Save %s graph to pkl..." % user nx.write_gpickle(G, "Graph/mix/update_%s.graph"%user) print "Save complete!"
from DFS import * if __name__ == '__main__': with open("inputFile.txt", "r") as fp: data = fp.readlines() for line in data: word = line.split() G[word[0]].append(word[1]) DFS() x_sorted = sorted(finished.items(), key=lambda y: y[1], reverse=True) answer = '' for i in x_sorted: answer += i[0] print 'The topological sort of the given graph is: \t', answer
print("No path found using tree search!") else: print("Path:", result_node.path()) print("Path Cost:", result_node.path_cost) print("Solution:", result_node.solution()) print("Nodes searched with BFS:", myBFSSearch.nodesSearched) print("Time Spent with BFS:", myBFSSearch.timeSpent) print("==============") print("DEPTH FIRST SEARCH") print("\x1b[0;30;41m" + "NOTE -- NEEDS TO BE IMPLEMENTED -- CURRENTLY BFS" + "\x1b[0m") # search using DFS Search myDFSSearch = DFS(eight_puzzle) result_node = myDFSSearch.search() if (result_node is None): print("No path found using DFS search!") else: print("Path:", result_node.path()) print("Path Cost:", result_node.path_cost) print("Solution:", result_node.solution()) print("Nodes searched with DFS:", myDFSSearch.nodesSearched) print("Time Spent with DFS:", myDFSSearch.timeSpent) print("==============") print("==============") print("ITERATIVE DEEPENING SEARCH")
while(endNode == startNode): print("End node must not be start node") startNode = input("What is your start node? (0 to " + str(g.nodesWide * g.nodesTall - 1) + ")") startNode = int(startNode) endNode = input("What is your end node? (0 to " + str(g.nodesWide * g.nodesTall - 1) + ")") endNode = int(endNode) alg = input("What algorithm do you want to use? (1: BFS, 2: DFS, 3: Dijkstra's, 4: A*, 5: WA*)") while(alg not in ["1", "2", "3", "4", "5"]): alg = input("What algorithm do you want to use? (1: BFS, 2: DFS, 3: Dijkstra's)") if alg == "1": bfs = BFS(g, startNode, endNode) bfs.run() elif alg == "2": dfs = DFS(g, startNode, endNode) dfs.run() elif alg == "3": dijkstra = Dijkstra(g, startNode, endNode) dijkstra.run() elif alg == "4": a_star = A_Star(g, startNode, endNode) a_star.run() elif alg == "5": w = input("What weight would you like to use for WA* (1 to 10000)") w = int(w) while (w < 1 or w > 10000): print("Weight must be between 0 and 10000") w = input("What weight would you like to use for WA* (1 to 10000)") w = int(w) wa_star = WA_Star(g, startNode, endNode, weight=w)
def shuffle(dataset_path, shuffle_ratio, warning, old_shuffle_tracker, filepaths): if warning == True: if (confirm(prompt="Warning, this will scramble the directory " + "structure of all files and folders in " + dataset_path + ". Are you sure you want to do this? ")): print("Ok.") else: exit() if (confirm(prompt="Really sure, though?")): print("Ok.") else: exit() if (confirm(prompt="Super duper sure???")): print("Ok.") else: exit() # get a list of the paths to every file in the dataset # rooted at "dataset_path" new_filepaths = DFS.DFS(dataset_path) num_files = len(new_filepaths) print("Number of files: ", num_files) while len(old_shuffle_tracker) < len(filepaths): old_shuffle_tracker.append(0) # we randomly shuffle the list of filepaths num_to_shuffle = math.floor(num_files * shuffle_ratio) print(num_to_shuffle) # only shuffle part of the dataset paths_to_shuffle = new_filepaths[0:num_to_shuffle] # generate a permutation of the number of files perm = np.random.permutation(num_to_shuffle) perm2 = np.random.permutation(num_to_shuffle) # "num_to_shuffle" randomly chosen parent directories directory_list = [] # for each file for i in range(num_to_shuffle): # get the image of the index under our permutation permuted_index = perm[i] # get its parent directory directory = remove_path_end(new_filepaths[permuted_index]) # and add it to our list of parent directories directory_list.append(directory) # moves a random file somewhere in "directory_list" for i in range(num_to_shuffle): # get the image of the index under our permutation permuted_index2 = perm2[i] # get the file we're moving next_file = "iiiiiiiiiiiiiiiiii" files_checked = 0 while old_shuffle_tracker[permuted_index2] == 1: next_file = new_filepaths[permuted_index2] files_checked += 1 if files_checked > 2000: break # get the randomly chosen destination directory dest_dir = directory_list[i] # move the file, only if dest dir isn't parent of next_file if remove_path_end(next_file) != dest_dir: os.system("mv \"" + next_file + "\" \"" + dest_dir + "\"") # create shuffle tracker shuffled_DFS = DFS.DFS(dataset_path) shuffle_tracker = [] for i in range(min([len(shuffled_DFS), len(filepaths)])): if shuffled_DFS[i] != filepaths[i]: shuffle_tracker.append(1) else: shuffle_tracker.append(old_shuffle_tracker[i]) return shuffle_tracker
def openDFS(): dfs = DFS.DFS() create_window(dfs, "dfs")
def main(): print("ARGUMENTS: ") args = load_arguments() print("Arguments loaded. ") dataset_path = args.dataset_path dest = os.path.join(dataset_path, "../") num_clusters = args.num_clusters num_top_exts = args.num_extensions num_processes = args.num_processes overwrite_dist = 'y' overwrite_plot = 'y' fill_threshold = 0.4 # check if destination is valid, get its absolute path check_valid_dir(dest) dest = os.path.abspath(dest) # check if dataset is valid, get its absolute path check_valid_dir(dataset_path) dataset_path = os.path.abspath(dataset_path) # the name of the top-level directory of the dataset dataset_name = get_last_dir_from_path(dataset_path) # define the write path for the entire program write_path = "../../cluster-datalake-outputs/" + dataset_name + "--output/" if not os.path.isdir(write_path): os.system("mkdir " + write_path) print("All results printing to " + write_path) # get absolute path write_path = os.path.abspath(write_path) # write results to a text file f = open(os.path.join(write_path, 'shuffle_test_' + dataset_name + '.txt'), 'w') f.write("shuffle_ratio" + "," + "freqdrop_score" + "," + "silhouette_score" + "," + "naive_tree_dist_score" + "," + "\n") #=================================================================== #=#: Shuffle and cluster, recording the ensemble score. #=================================================================== shuffle_tracker = [] # get a list of the paths to every file in the dataset # rooted at "dataset_path" filepaths = DFS.DFS(dataset_path) # generate path to the new root of our test dataset shuffled_dataset_name = "shuffled_" + dataset_name shuffled_dataset_path = os.path.join(dest, shuffled_dataset_name) print("clustering: ", shuffled_dataset_path) # copy dataset to this new location os.system("cp -r " + dataset_path + " " + shuffled_dataset_path) # we gradually increase the proportion of the test dataset # which is shuffled shuffle_ratio = 0.0 while shuffle_ratio <= 1.0: # define the write path for the entire program write_path = "../../cluster-datalake-outputs/" + shuffled_dataset_name + "--output/" # get converted file location and output location out_dir = os.path.join(shuffled_dataset_path, "../" + "converted-" + shuffled_dataset_name) if not os.path.isdir(write_path): os.system("mkdir " + write_path) if not os.path.isdir(out_dir): os.system("mkdir " + out_dir) csv_path = os.path.join(out_dir, "csv/") if not os.path.isdir(csv_path): os.system("mkdir " + csv_path) txt_path = os.path.join(out_dir, "txt/") if not os.path.isdir(txt_path): os.system("mkdir " + txt_path) # shuffle and convert the test dataset shuffle_tracker = shuffle(shuffled_dataset_path, shuffle_ratio, False, shuffle_tracker, filepaths) DFS.extension_indexer(shuffled_dataset_path, num_top_exts, write_path) # cluster the shuffled test dataset scores = schema_clustering.runflow(shuffled_dataset_path, num_clusters, overwrite_dist, overwrite_plot, fill_threshold) # print results print("Shuffle ratio: ", shuffle_ratio, "Freqdrop score: ", scores[0], "Silhouette score: ", scores[1], "Naive score: ", scores[2]) f.write( format(shuffle_ratio, '.3f') + "," + format(scores[0], '.3f') + "," + format(scores[1], '.3f') + "," + format(scores[2], '.3f') + "," + '\n') # delete the shuffled dataset, outputs, and converted files os.system("rm -r " + write_path) os.system("rm -r " + out_dir) shuffle_ratio += args.step f.close() return
def get_components_DFS(self): tmp = DFS.DFS(copy.deepcopy(self)) return tmp.DFS_components()
import ASTAR_EUC import sys import Start if __name__ == "__main__": print "script_name", sys.argv[0] for i in range(1, len(sys.argv)): print "argument", i, sys.argv[i] print('start initialize') # set the size and density of this matrix size = 10 start = Start.Start(size, 0.3) # start.print_matrix() start.paint_random() # init all the algorithm dfs = DFS.DFS() bfs = BFS.BFS() a_mht = ASTAR_MHT.ASTAR() a_euc = ASTAR_EUC.ASTAR() print('start run') print "DIM, T_DFS, T_BFS, T_MHT, T_EUC" while 1: print size, start = Start.Start(size, 0.3) start.paint_random() while dfs.dfs_route(start.get_matrix(), size)[0] == 0: start.paint_random() # set timer for each algorithm start_time = time.clock() #DFS dfs.dfs_route(start.get_matrix(), size)
# path.append(neigh[max_idx]) # DFS_search(G, neigh[max_idx], target, path, depth-1) print "Loading graph..." time_0 = time.time() G = nx.read_gpickle('ConceptNet.pkl') # create_graph() #G = nx.read_gml('ConceptNett.xml') print "Complete! ", time.time() - time_0 print "Load corpus..." #corpus = np.load('../../../divisi/update_cn/corpus/user/jaxsk_corpus.npy') corpus = np.load("user/1000/articles_mass.npy") #time_st = time.time() dfs = DFS(G) #node_list = G.nodes() #dfs.find('show','glee') counter = 0 for sentence in corpus: #raw_input() counter += 1 print counter for i in range(len(sentence)): #raw_input() time_st = time.time() if i == len(sentence) - 2: break print "Finding (", sentence[i], sentence[i + 1], ")"
from AStarAlgo import * from DFS import * from BFS import * from IDFS import * from GreadySearch import * from GenerateState import GenerateState from Node import Node import numpy as np import time a = AStarAlgo() d = DFS() b = BFS() i = IDFS() g = GreadySearch() def batch(state, depth = 10): # A* Search. start = time.time() sol = a.a_star_algorithm(state) print(f'Time: {time.time() - start}') print(sol) time.sleep(5) # Gready Search. start = time.time() g.gready_search_algorithm(state) print(f'Time: {time.time() - start}') print(sol)
from DFS import * # Defining vertexes VERTEX1 = Vertex(1) VERTEX2 = Vertex(2) VERTEX3 = Vertex(3) VERTEX4 = Vertex(4) VERTEX5 = Vertex(5) # Defining Neighbours for those vertexes VERTEX1.neighbour_list.append(VERTEX2) VERTEX1.neighbour_list.append(VERTEX3) VERTEX3.neighbour_list.append(VERTEX4) VERTEX4.neighbour_list.append(VERTEX5) vertex_list = list() vertex_list.append(VERTEX1) vertex_list.append(VERTEX2) vertex_list.append(VERTEX3) vertex_list.append(VERTEX4) vertex_list.append(VERTEX5) dfs = DFS() dfs.dfs(vertex_list)
print('The number of arguments is incorrect') strategyName = argv[1] strategyParameter = argv[2] file = FileManager(argv[3]) solutionFilename = argv[4] infoFilename = argv[5] file.readFile() solution = GenerateSolution(file.rowNumber, file.colNumber) if (strategyName == 'bfs'): solver = BFS(file.array, solution, strategyParameter) elif (strategyName == 'dfs'): solver = DFS(file.array, solution, strategyParameter) elif (strategyName == 'astr'): solver = AStar(file.array, solution, strategyParameter) else: print('Invalid strategy name') exit() path, visited, processed, maxDepth, solvingTime = solver.solve() print(path) if (path != -1): file.writeSolution(solutionFilename, path) file.writeInfo(infoFilename, len(path), visited, processed, maxDepth, solvingTime) else: file.writeNotFound(solutionFilename) file.writeInfo(infoFilename, path, visited, processed, maxDepth,