def do(searcher, problem): p = search.InstrumentedProblem(problem) goalNode = searcher(p) return p, goalNode.path_cost
# This should be commented out for larger tasks # print task print( "\n******************************START TEST******************************" ) tic = time.time() # Define a sub-class of the Problem class, make an instance for the task and call the search # You should then set the variables: # final_state - the final state at the end of the plan # plan - a list of actions representing the plan # cost - the cost of the plan # Your planner_v2 here p = search.InstrumentedProblem(PlanProblem(task)) # soln = search.astar_search(p, lambda n: 0) soln = search.astar_search(p, lambda n: p.h_g(n)) # soln = search.astar_search(p, lambda n: p.hmax(n)) # soln = search.astar_search(p, lambda n: p.hsum(n)) # soln = search.astar_search(p, lambda n: p.hff(n)) print('search stats', p) if soln is None: print('no solution found') exit() plan = soln.solution() cost = soln.path_cost final_state = soln.state toc = time.time()
print('Gathering data for depth ' + str(depth) + '...') path_lengths[depth] = { 'BFS': [], 'IDS': [], 'A*-mis': [], 'A*-Man': [] } state_counts[depth] = { 'BFS': [], 'IDS': [], 'A*-mis': [], 'A*-Man': [] } for trial in range(trials): puzzle = EightPuzzle(depth) p = search.InstrumentedProblem(puzzle) path_lengths[depth]['BFS'].append( len(search.breadth_first_search(p).path())) state_counts[depth]['BFS'].append(p.states) p = search.InstrumentedProblem(puzzle) path_lengths[depth]['IDS'].append( len(search.iterative_deepening_search(p).path())) state_counts[depth]['IDS'].append(p.states) p = search.InstrumentedProblem(puzzle) path_lengths[depth]['A*-mis'].append( len(search.astar_search(p, misplaced).path())) state_counts[depth]['A*-mis'].append(p.states) p = search.InstrumentedProblem(puzzle) path_lengths[depth]['A*-Man'].append( len(search.astar_search(p, manhattan).path())) state_counts[depth]['A*-Man'].append(p.states)
# print("new route:",new_route) #self.state = new_route return new_route def cost(self, state): # arvuta (või leia muul viisil) praeguse marsruudi kogupikkus. Ära unusta, et marsruut on suletud. cost = 0 # print(self.matrix1[1][1]) for i in range(0, len(state) - 1): # print(i,"cost",self.matrix1[state[i]][state[i+1]]) cost = cost + int(self.matrix1[i][i + 1]) # print(cost) return cost def value(self, state): # kuna valmis otsingufunktsioonid arvavad, et mida suurem väärtus, seda parem, siis meie minimeerimisülesande TSP # lahendamiseks tuleb teepikkusest pöördväärtus võtta. return 1 / (self.cost(state) + 1) p = search.InstrumentedProblem(TSP("gr17")) g = search.simulated_annealing(p) #g = search.simulated_annealing(p, search.exp_schedule(limit=10000)) #g = search.hill_climbing(p) print("p", p) print("g", g) print(g.state) print(p.cost(g.state))