def _boilerplate_test(self, item_list, parent_of_new_item, to_be_inserted_item, desired_input_bfs_list, desired_input_dfs_list, desired_output_bfs_list, desired_output_dfs_list): ''' Common code required to write test function. Does the test running mechanism. ''' with captured_output() as (raw_output, error): tree, item_dict = make_nary_tree_with_dict(item_list) bfs(tree) dfs(tree) if parent_of_new_item is not None: insert(item_dict[parent_of_new_item], to_be_inserted_item) else: tree = insert(None, to_be_inserted_item) bfs(tree) dfs(tree) received_output = raw_output.getvalue().strip() test_case_output = cook_string_for_list(desired_input_bfs_list) \ + ' ' + linesep + \ cook_string_for_list(desired_input_dfs_list) \ + ' ' + linesep + \ cook_string_for_list(desired_output_bfs_list) \ + ' ' + linesep + \ cook_string_for_list(desired_output_dfs_list) self.assertEqual(test_case_output.strip(), received_output)
def _test_operation_insert(self, tree, parent_of_new_item, item, item_dict, desired_output_bfs_list, desired_output_dfs_list): ''' Performs insert operation and also checks for its correctness. ''' items_node = None with captured_output() as (raw_output, error): if parent_of_new_item is not None: items_node = insert(item_dict[parent_of_new_item], item) else: items_node = insert(None, item) tree = items_node item_dict[item] = items_node bfs(tree) dfs(tree) received_output = raw_output.getvalue().strip() test_case_output = cook_string_for_list(desired_output_bfs_list) \ + ' ' + linesep + \ cook_string_for_list(desired_output_dfs_list) self.assertEqual(test_case_output.strip(), received_output) return tree
def _boilerplate_test(self, item_list, to_be_deleted_item, desired_input_bfs_list, desired_input_dfs_list, desired_output_bfs_list, desired_output_dfs_list): ''' Common code required to write test function. Does the test running mechanism. ''' with captured_output() as (raw_output, error): tree, item_dict = make_nary_tree_with_dict(item_list) bfs(tree) dfs(tree) if to_be_deleted_item is not None: tree = delete(item_dict[to_be_deleted_item]) else: tree = delete(None) bfs(tree) dfs(tree) received_output = raw_output.getvalue().strip() test_case_output = cook_string_for_list(desired_input_bfs_list) \ + ' ' + linesep + \ cook_string_for_list(desired_input_dfs_list) \ + ' ' + linesep + \ cook_string_for_list(desired_output_bfs_list) \ + ' ' + linesep + \ cook_string_for_list(desired_output_dfs_list) self.assertEqual(test_case_output.strip(), received_output)
def debug(): ''' Function for debugging module. ''' from traverse import bfs from traverse import dfs item_list = [(None, 1)] tree, item_dict = make_nary_tree_with_dict(item_list) insert(tree, 2) bfs(tree) dfs(tree)
def auto_pilot(starting_room, target): ''' Create path with bfs ''' path = bfs(starting_room, target) # travel to target auto_move(path)
def _test_operation_delete(self, item, item_dict, desired_output_bfs_list, desired_output_dfs_list): ''' Performs delete operation and also checks for its correctness. ''' tree = None with captured_output() as (raw_output, error): if item is not None: tree = delete(item_dict[item]) del item_dict[item] else: tree = delete(None) bfs(tree) dfs(tree) received_output = raw_output.getvalue().strip() test_case_output = cook_string_for_list(desired_output_bfs_list) \ + ' ' + linesep + \ cook_string_for_list(desired_output_dfs_list) self.assertEqual(test_case_output.strip(), received_output) return tree
def main(): # Heuristic Bench marks misses = 0 incrementor = 0 aStarEuclideanList = [] aStarChebyshevList = [] greedyEuclideanList = [] greedyChebyshevList = [] startTime = time.time(); # generate 100 valid maps while incrementor < 100: startPoint = 0 endPoint = 0 # generate unique start and goal points while startPoint == endPoint: startPoint = chr(random.randint(0, 25) + ord('A')) endPoint = chr(random.randint(0, 25) + ord('A')) # generate graph and eliminate edges based on shortest euclidean distances mapValue = graphgeneration.randomMapGeneration() euclideanMap = graphgeneration.euclideanMapModify(mapValue) weightedGraph = graphgeneration.pruneEuclideanMap(euclideanMap) weightedGraphMapping = graphgeneration.euclideanMapValues(mapValue) unWeightedGraph = graphgeneration.createUnweightedGrapth(weightedGraph) # create map for heuristic use graph = Graph() graph.edges = unWeightedGraph graph.weights = weightedGraphMapping previous, currentCost, aStarTimeComplexity, aStarSpaceComplexity = heuristics.aStarSearch(graph, startPoint, endPoint, 1) path = heuristics.reconstructPath(previous, startPoint, endPoint) # check if path is invalid if path == None: misses = misses + 1 continue # place results in list to use later aStarEuclideanList.append([path, aStarTimeComplexity, aStarSpaceComplexity]) previous, currentCost, aStarTimeComplexity, aStarSpaceComplexity = heuristics.aStarSearch(graph, startPoint, endPoint, 2) path = heuristics.reconstructPath(previous, startPoint, endPoint) aStarChebyshevList.append([path, aStarTimeComplexity, aStarSpaceComplexity]) previous, greedyTimeComplexity, greedySpaceComplexity = heuristics.greedyFirstSearch(graph, startPoint, endPoint, 1) path = heuristics.reconstructPath(previous, startPoint, endPoint) greedyEuclideanList.append([path, greedyTimeComplexity, greedySpaceComplexity]) previous, greedyTimeComplexity, greedySpaceComplexity = heuristics.greedyFirstSearch(graph, startPoint, endPoint, 2) path = heuristics.reconstructPath(previous, startPoint, endPoint) greedyChebyshevList.append([path, greedyTimeComplexity, greedySpaceComplexity]) incrementor = incrementor + 1 totalTime = time.time() - startTime; # calculate execution time, invalid paths, average space and time complexity for A* and Greedy print "Total Execution Time" print totalTime print "Total Number of Invalid Paths" print misses averageTime = 0 averageSpace = 0 for i in aStarEuclideanList: averageTime = averageTime + i[1] averageSpace = averageSpace + i[2] print "Average A* Euclidean Time Complexity" print averageTime / len(aStarEuclideanList) print "Average A* Euclidean Space Complexity" print averageSpace / len(aStarEuclideanList) averageTime = 0 averageSpace = 0 for i in aStarChebyshevList: averageTime = averageTime + i[1] averageSpace = averageSpace + i[2] print "Average A* Chebyshev Time Complexity" print averageTime / len(aStarChebyshevList) print "Average A* Chebyshev Space Complexity" print averageSpace / len(aStarChebyshevList) averageTime = 0 averageSpace = 0 for i in greedyEuclideanList: averageTime = averageTime + i[1] averageSpace = averageSpace + i[2] print "Average Greedy Euclidean Time Complexity" print averageTime / len(greedyEuclideanList) print "Average Greedy Euclidean Space Complexity" print averageSpace / len(greedyEuclideanList) averageTime = 0 averageSpace = 0 for i in greedyChebyshevList: averageTime = averageTime + i[1] averageSpace = averageSpace + i[2] print "Average Greedy Chebyshev Time Complexity" print averageTime / len(greedyChebyshevList) print "Average Greedy Chebyshev Space Complexity" print averageSpace / len(greedyChebyshevList) # Get 10 test results of each heuristic print "A* Euclidean Heuristic" for i in aStarEuclideanList[:10]: print "Path: ", i[0] print "Time Complexity: ", i[1] print "Space Complexity: ", i[2] print "A* Chebyshev Heuristic" for i in aStarChebyshevList[:10]: print "Path: ", i[0] print "Time Complexity: ", i[1] print "Space Complexity: ", i[2] print "Greedy Euclidean Heuristic" for i in greedyEuclideanList[:10]: print "Path: ", i[0] print "Time Complexity: ", i[1] print "Space Complexity: ", i[2] print "Greedy Chebyshev Heuristic" for i in greedyChebyshevList[:10]: print "Path: ", i[0] print "Time Complexity: ", i[1] print "Space Complexity: ", i[2] # BFS, DFS, IDDFS benchmarks breadthFirstSearchList = [] depthFirstSearchList = [] iterativeDeepeningList = [] misses = 0 incrementor = 0 # find execution time startTime = time.time(); # generate 100 valid maps while incrementor < 100: startPoint = 0 endPoint = 0 # create unique start and goal points while startPoint == endPoint: startPoint = chr(random.randint(0, 25) + ord('A')) endPoint = chr(random.randint(0, 25) + ord('A')) # generate random map and remove edges based on shortest euclidean distance mapValue = graphgeneration.randomMapGeneration() euclideanMap = graphgeneration.euclideanMapModify(mapValue) weightedGraph = graphgeneration.pruneEuclideanMap(euclideanMap) unWeightedGraph = graphgeneration.createUnweightedSetGrapth(weightedGraph) breadthFirstSearch, breadthFirstTimeComplexity, breadthFirstSearchSpaceComplexity = traverse.bfs(unWeightedGraph, startPoint, endPoint) # repeat map generation if invalid map is generated if breadthFirstSearch == None: misses = misses + 1 continue breadthFirstSearchList.append([breadthFirstSearch, breadthFirstTimeComplexity, breadthFirstSearchSpaceComplexity]) depthFirstSearch, depthFirstSearchTimeComplexity, depthFirstSearchSpaceComplexity = traverse.dfs(unWeightedGraph, startPoint, endPoint) depthFirstSearchList.append([depthFirstSearch, depthFirstSearchTimeComplexity, depthFirstSearchSpaceComplexity]) iterativeDeeping, iterativeDeepingTimeComplexity, iterativeDeepingSpaceComplexity = traverse.id_dfs(unWeightedGraph, startPoint, endPoint) iterativeDeepeningList.append([iterativeDeeping, iterativeDeepingTimeComplexity, iterativeDeepingSpaceComplexity]) incrementor = incrementor + 1 totalTime = time.time() - startTime; # get execution time, total invalid maps, average space and time complexity for BFS, DFS, and IDDFS print "Total Execution Time" print totalTime print "Total Number of Invalid Paths" print misses averageTimeBFS = 0 averageSpaceBFS = 0 for breadth in breadthFirstSearchList: averageTimeBFS = averageTimeBFS + breadth[1] averageSpaceBFS = averageSpaceBFS + breadth[2] print "Average BFS Time Complexity" print averageTimeBFS / len(breadthFirstSearchList) print "Average BFS Space Complexity" print averageSpaceBFS / len(breadthFirstSearchList) averageTimeDFS = 0 averageSpaceDFS = 0 for depth in depthFirstSearchList: averageTimeDFS = averageTimeDFS + depth[1] averageSpaceDFS = averageSpaceDFS + depth[2] print "Average DFS Time Complexity" print averageTimeDFS / len(depthFirstSearchList) print "Average DFS Space Complexity" print averageSpaceDFS / len(depthFirstSearchList) averageTimeIDDFS = 0 averageSpaceIDDFS = 0 for iddfs in iterativeDeepeningList: averageTimeIDDFS = averageTimeIDDFS + iddfs[1] averageSpaceIDDFS = averageSpaceIDDFS + iddfs[2] print "Average IDDFS Time Complexity" print averageTimeIDDFS / len(iterativeDeepeningList) print "Average IDDFS Space Complexity" print averageSpaceIDDFS / len(iterativeDeepeningList) # Get 10 results of each benchmark print "Breadth First Search" for breadth in breadthFirstSearchList[:10]: print "Path: ", breadth[0] print "Time Complexity: ", breadth[1] print "Space Complexity: ", breadth[2] print "Depth First Search" for depth in depthFirstSearchList[:10]: print "Path: ", depth[0] print "Time Complexity: ", depth[1] print "Space Complexity: ", depth[2] print "Iterative Deepening First Search" for iddfs in iterativeDeepeningList[:10]: print "Path: ", iddfs[0] print "Time Complexity: ", iddfs[1] print "Space Complexity: ", iddfs[2]