def hill(self): currentState = self.startState nextEval = Heuristic(currentState).attacks() i = 0 while i < self.iterate and nextEval != 0: newState = self.neighbor.generateState() currentEval = Heuristic(newState).attacks() if self.update_states: print Heuristic(currentState).queensPosition( ), " -> ", Heuristic(newState).queensPosition() if currentEval <= nextEval: currentState = newState nextEval = Heuristic(currentState).attacks() i += 1 self.neighbor = Neighbor(currentState) file.write(Heuristic(currentState).queensPosition(), self.neighbor.createBoard(), url="./resource/newBoard.txt") print "Hill Comum > Iteracao : ", i print "Posicao Inicial das ", len( self.startState), " rainhas : ", Heuristic( self.startState).queensPosition() print "Posicao Final das ", len( self.startState), " rainhas : ", Heuristic( currentState).queensPosition() print "\tNumero de rainhas atacando : ", Heuristic( currentState).attacks() self.startState = currentState return Heuristic(currentState).attacks()
def MakePlan(Parameters): STARTTIME = time.time() #makedb() #print "dbCreated: ", time.time()-STARTTIME Data=DataClass(Parameters) #print time.time()-STARTTIME emptyRoute=np.empty((Data.DAYS,2), dtype=int) for i in range (Parameters.days): for j in range (2): emptyRoute[i,j]=Data.n+2*i+j rmvd=[[]]*Data.DAYS Plan=PlanVariables(emptyRoute,Data) heuristicResponse= Heuristic(Plan,rmvd,Data,Parameters.timeMultiplier) newPlan=heuristicResponse[0] bestPlan=newPlan bestObjective=heuristicResponse[1] iterations=0 #print time.time()-STARTTIME while (iterations<MAXITERATIONS and (time.time() - STARTTIME)<25): metaheu=1 while(metaheu<=3): if metaheu==1: meta=MetaH1(newPlan.route) if metaheu==2: meta=MetaH2(newPlan.route) if metaheu==3: meta=MetaH3(newPlan.route) Plan=PlanVariables(meta[0],Data) rmvd=meta[1] heuristicResponse=Heuristic(Plan,rmvd,Data,Parameters.timeMultiplier) newPlan=heuristicResponse[0] newObjective=heuristicResponse[1] if newObjective>bestObjective: bestObjective=newObjective bestPlan=newPlan metaheu=1 iterations=0 else: metaheu=metaheu+1 iterations=iterations+1 #print time.time()-STARTTIME #print bestObjective #print bestPlan.route #return (no_of_locations,names,latitudes,longitudes,start,end,free) return [bestPlan,Data]
def __init__(self, color, weights, timeout = 59.5, depth = 4): self.TIMEOUT = timeout self.DEPTH = depth self.COLOR = color self.NUM_WORKERS = cpu_count() self.game = Game() self.heuristic = Heuristic(weights) # transposition table & history heuristic self.tt = Cache(1e7) self.hh = Cache(1e7) self.cache_pipes = [] self.sync_pipes = [] self.jobs_queue = Queue(1) self.moves_queue = Queue() # start search workers self.search_workers = [] for i in range(self.NUM_WORKERS): cache_pipe0, cache_pipe1 = Pipe(True) sync_pipe0, sync_pipe1 = Pipe(True) self.sync_pipes.append(sync_pipe1) self.cache_pipes.append(cache_pipe1) process = Process(target=self.search_worker_process, args=[self.jobs_queue, self.moves_queue, cache_pipe0, sync_pipe0, depth, color]) self.search_workers.append(process) process.start() cache_pipe0.close() # are used by search workers sync_pipe0.close() # start cache worker self.cache_worker = Process(target=self.cache_worker_process, args=[self.cache_pipes]) self.cache_worker.start() for p in self.cache_pipes: p.close()
def __init__(self, search_depth=3, score_cls=Heuristic(), timeout=20.): ''' Game-playing agent that chooses a move using minimax search. You must finish and test this player to make sure it properly uses minimax to return a good move before the search time limit expires. Params ---------- search_depth : int (optional) A strictly positive integer (i.e., 1, 2, 3,...) for the number of layers in the game tree to explore for fixed-depth search. (i.e., a depth of one (1) would only explore the immediate sucessors of the current state.) score_fn : callable (optional) A function to use for heuristic evaluation of game states. timeout : float (optional) Time remaining (in milliseconds) when search is aborted. Should be a positive value large enough to allow the function to return before the timer expires. ''' self.search_depth = search_depth self.score = score_cls.get_score self.TIMER_THRESHOLD = timeout
def choose_next_node(self): """ Calculate f(x) for each new generated node from open_list. Choose next node. :return: next node. """ for node in self.open_list: heuristic = Heuristic(current_node=node, final_node=self.final_node, heuristic=self.heuristic) h_score = heuristic.calculate() node.h = h_score node.f = self.get_f_score(h_score, node) # sort list of node by f-score, from higher to lower. self.open_list.sort(key=lambda x: x.f) if len(self.open_list) > 1 and self.open_list[0].f == self.open_list[1].f: return [node for node in self.open_list if node.f == self.open_list[0].f] else: self.solution_history.append(deepcopy(self.open_list[0])) return self.open_list[0]
def a_star(self): heuristic = Heuristic() moves_solution = [] print while not self.board.num_not_visited == 0 and not self.board == None: queue = PriorityQueue() for move in moves: aux_board = move(copy.deepcopy(self.board)) if aux_board == None: continue aux_board.priority = heuristic.heuristicA(aux_board) queue.put(aux_board) self.board = queue.get() # self.board.printBoard() moves_solution.append(self.board.last_movement) # queue.queue.clear() print('finish with a total of {} moves').format(len(moves_solution))
def main(): fileName = ''.join(sys.argv[1:]) file = open(fileName, "r") lines = file.readlines() intervals, busyIntervals, tasks, meals = createDataStructure(lines) printData(intervals, busyIntervals, tasks, meals) startTime = time.time() heuristic = Heuristic(intervals, busyIntervals, tasks, meals) heuristic.heuristic() endTime = time.time() generateSpreasheet(heuristic.intervals) printData(heuristic.intervals, heuristic.busyIntervals, heuristic.tasks, heuristic.meals) print('\n\nTempo de Execução: ', endTime - startTime, 'segundos') print('\n\nFunção Objetivo: ', heuristic.objectiveFunction())
from game import Player, TwoPlayerGameState, TwoPlayerMatch from heuristic import Heuristic from javier_adrian_heuristic import (evaluation_function, MinimaxAlphaBetaStrategy) from strategy import ManualStrategy from reversi import Reversi heuristic = Heuristic( name='Teo va en avion', evaluation_function=evaluation_function, ) player_1 = Player( name='player_1', strategy=ManualStrategy(verbose=0), ) player_2 = Player( name='player_2', strategy=MinimaxAlphaBetaStrategy( heuristic=heuristic, max_depth_minimax=4, verbose=0, ), ) player_a, player_b = player_1, player_2 initial_player = player_a initial_board = None
def create_path(self, g, u, v, heuristic): self.path = Heuristic(g, u, v, heuristic)
def play_AStar(self): start = time() rootNode = deepcopy(self.board) generatedNodes, repeatedNodes = 1, 0 if not rootNode.get_stor_coordinates(): end = time() return 'THERE ARE NO STORAGE LOCATIONS!', (end - start) if not rootNode.get_box_coordinates(): end = time() return 'THERE ARE NO BOX LOCATIONS!', (end - start) if not rootNode.get_player_loc(): end = time() return 'SOKOBAN PLAYER MISSING!', (end - start) if rootNode.is_goal_state(): end = time() return 'BOARD IS ALREADY IN GOAL STATE!', (end - start) H = Heuristic() # H.set_heuristic("manhattan2") heuristicVal = H.calculate(rootNode.get_stor_coordinates(), rootNode.get_box_coordinates()) frontier1 = PriorityQueue() frontier2 = PriorityQueue() path = PriorityQueue() frontier1.push(rootNode, heuristicVal) frontier2.push( (rootNode.get_player_loc(), rootNode.get_box_coordinates()), heuristicVal) path.push([''], heuristicVal) visited = [] deadlockConditions = 0 # Hamza: This i represents the number of states visited, I think generated Nodes does not apply because # we don't explore possible moves for all of the generated nodes so the branching factor cannot use this value # i, b = 0, 0 # don't really need i since we can just do len(visited) for this b = 0 self.branchingFactor = 0 self.treeDepth = 0 while True: # print('Generated Nodes: {}, Repeated Nodes: {}, Frontier Length: {}, Deadlock Conditions: {}'.format( # generatedNodes, repeatedNodes, len(frontier1.Heap), deadlockConditions)) if not frontier1.Heap: end = time() return 'SOLUTION NOT FOUND', (end - start) currentNode = frontier1.pop() (currentPlayer, currentBoxCoordinates) = frontier2.pop() currentActionSequence = path.pop() possibleMoves = currentNode.possible_moves() visited.append((currentPlayer, currentBoxCoordinates)) # Tree depth and branch factor variables b += len(possibleMoves) # branching factor of the current node # i = len(visited) # number of visited nodes self.treeDepth += 1 for move in possibleMoves: childNode = deepcopy(currentNode) generatedNodes += 1 childNode.update_board(move) if (childNode.get_player_loc(), childNode.get_box_coordinates()) not in visited: if childNode.is_goal_state(): childNode.make_board_grid() # childNode.display_board() end = time() self.branchingFactor = ceil( b / len(visited)) # average branching factor return str( len(currentActionSequence[1:] + [move]) ) + ' ' + ' '.join( map(lambda x: x.upper(), currentActionSequence[1:] + [move])).replace( ',', '') #, str((end - start)) + ' seconds' # return None if self.is_deadlock(childNode): # print('DEADLOCK CONDITION') deadlockConditions += 1 continue heuristicVal = H.calculate( childNode.get_stor_coordinates(), childNode.get_box_coordinates()) cost = self.compute_cost(currentActionSequence + [move]) # childNode.make_board_grid() # childNode.display_board() frontier1.push(childNode, heuristicVal + cost) frontier2.push((childNode.get_player_loc(), childNode.get_box_coordinates()), heuristicVal + cost) path.push(currentActionSequence + [move], heuristicVal + cost) else: repeatedNodes += 1
def run(self, student_strategies: dict, increasing_depth: bool = True, n_pairs: int = 1, allow_selfmatch: bool = False) -> Tuple[dict, dict, dict]: scores = dict() totals = dict() name_mapping = dict() for student1 in student_strategies: strats1 = student_strategies[student1] for student2 in student_strategies: if student1 > student2: continue if student1 == student2 and not allow_selfmatch: continue strats2 = student_strategies[student2] for player1 in strats1: for player2 in strats2: # we now instantiate the players for pair in range(2 * n_pairs): player1_first = (pair % 2) == 1 sh1 = player1() name1 = student1 + "_" + sh1.get_name() name_mapping[name1] = sh1.get_name() sh2 = player2() name2 = student2 + "_" + sh2.get_name() name_mapping[name2] = sh2.get_name() if increasing_depth: for depth in range(1, self.__max_depth): pl1 = Player( name=name1, strategy=MinimaxAlphaBetaStrategy( heuristic=Heuristic( name=sh1.get_name(), evaluation_function=sh1. evaluation_function), max_depth_minimax=depth, verbose=0, ), ) pl2 = Player( name=name2, strategy=MinimaxAlphaBetaStrategy( heuristic=Heuristic( name=sh2.get_name(), evaluation_function=sh2. evaluation_function), max_depth_minimax=depth, verbose=0, ), ) self.__single_run(player1_first, pl1, name1, pl2, name2, scores, totals) else: depth = self.__max_depth pl1 = Player( name=name1, strategy=MinimaxAlphaBetaStrategy( heuristic=Heuristic( name=sh1.get_name(), evaluation_function=sh1. evaluation_function), max_depth_minimax=depth, verbose=0, ), ) pl2 = Player( name=name2, strategy=MinimaxAlphaBetaStrategy( heuristic=Heuristic( name=sh2.get_name(), evaluation_function=sh2. evaluation_function), max_depth_minimax=depth, verbose=0, ), ) self.__single_run(player1_first, pl1, name1, pl2, name2, scores, totals) return scores, totals, name_mapping
for i in range(size): for j in range(size): pygame.draw.line(window, (255, 0, 0), (i * 800 // size, 0), (i * 800 // size, 600)) pygame.draw.line(window, (255, 0, 0), (0, j * 600 // size), (800, j * 600 // size)) img = font.render(str(board[j][i]) + "", True, (200, 200, 200)) window.blit(img, (i * 800 // size, j * 600 // size)) pygame.init() window = pygame.display.set_mode((800, 600)) pygame.display.set_caption("Max Profit Path Finder") player = Player() h = Heuristic(5) h.calculate_max_profit_path() path_stack = h.recover_path() run = True show = False player_turn = True printWinner = False while run: for event in pygame.event.get(): pos = pygame.mouse.get_pos() if event.type == pygame.QUIT: run = False if event.type == pygame.MOUSEBUTTONDOWN and player_turn: current_location = player.current_square(pos, h.board_size) if player.is_legal(current_location): player.move(current_location, h.board)
14: (-1, 0), 15: (-1, 1), 16: (-1, 2), 17: (-1, 3), 18: (-1, 4), 19: (-1, 5) } # Heurística manhattan def h(points): return lambda u, v: abs(points[v][0] - points[u][0]) + abs(points[v][1] - points[u][1]) path = Heuristic(g, 0, 13, h(points)) print("Camino con búsqueda por heurísticas: ") print(path.path_to(13)) path2 = A_Star(g, 0, 13, h(points)) print("Camino con A*: ") print(path2.path_to(13)) print("----.----") """ Otro ejemplo: Grafo Triangular con heurística Manhattan. """ g = Graph(4) g.add_edge(0, 1, 1) g.add_edge(1, 3, 15) g.add_edge(0, 2, 11) g.add_edge(2, 3, 9)
def generateAndRunGame(configFile, matrixFile): # Parse config config = generateConfigDetails(configFile) # Generate matrix matrix, boxes, targets, player = generateMatrixAndPositions(matrixFile) # Generate board and heuristic board = Board(matrix, boxes, targets, player) heuristic = Heuristic(config.heuristic) # Start timer start = time.time() # Run the selected algorithm if config.algorithm == SearchMethods.BFS: print("============================") print("\n[Starting BFS Algorithm]\n") print("============================\n") bfs.solve(board) print("\n============================") print("\n[Finished BFS Algorithm]\n") print("============================") elif config.algorithm == SearchMethods.DFS: print("============================") print("\n[Starting DFS Algorithm]\n") print("============================\n") dfs.solve(board) print("\n============================") print("\n[Finished DFS Algorithm]\n") print("============================") elif config.algorithm == SearchMethods.IDDFS: print("============================") print("\n[Starting IDDFS Algorithm]\n") print("============================\n") iddfs.solve(board, config.maxDepth) print("\n============================") print("\n[Finished IDDFS Algorithm]\n") print("============================") elif config.algorithm == SearchMethods.GREEDY: print("============================") print("\n[Starting GREEDY Algorithm]\n") print("============================\n") greedy.solve(board, heuristic) print("\n============================") print("\n[Finished GREEDY Algorithm]\n") print("============================") elif config.algorithm == SearchMethods.A_STAR: print("============================") print("\n[Starting A* Algorithm]\n") print("============================\n") aStar.solve(board, heuristic) print("\n============================") print("\n[Finished A* Algorithm]\n") print("============================") elif config.algorithm == SearchMethods.IDA_STAR: print("============================") print("\n[Starting IDA* Algorithm]\n") print("============================\n") idaStar.solve(board, heuristic) print("\n============================") print("\n[Finished IDA* Algorithm]\n") print("============================") end = time.time() print("\nResolution time: ", end - start)
xI[0] = verticesArr[shortcutTour[i]][0] xI[1] = verticesArr[shortcutTour[i + 1]][0] yI[0] = verticesArr[shortcutTour[i]][1] yI[1] = verticesArr[shortcutTour[i + 1]][1] tourCost += getWeights(verticesArr[shortcutTour[i]], verticesArr[shortcutTour[i + 1]]) plt.plot(xI, yI, 'yo-', alpha=0.5, lw=4) txt = str(shortcutTour[i]) plt.annotate(txt, (xI[0], yI[0])) plt.show() tourCost = int(tourCost) print("Tour Cost after shortcut= ", tourCost) heur = Heuristic(shortcutTour, vertices) heur.iterateAndFix() heur.iterateAndFix() tour = heur.tour print(len(tour)) print(tour) plt.figure(3) tourCost = 0 for i in range(0, len(tour) - 1): xI[0] = verticesArr[tour[i]][0] xI[1] = verticesArr[tour[i + 1]][0] yI[0] = verticesArr[tour[i]][1] yI[1] = verticesArr[tour[i + 1]][1]
# Amount of iterations N = 10 # Maximum time of a traject MIN_180 = 180 MIN_120 = 120 # Improve algorithm on/off IMPROVE = True DEPTH = 4 # Name of city to exclude from schedule EXCLUSION = "" # Starting in random city, choosing random connections A = Heuristic("random", Heuristic.random_city, Heuristic.general_connections) # Starting in city with most connections, choosing random connections B = Heuristic("centered", Heuristic.centered_city, Heuristic.general_connections) # Starting in city with least connections, choosing random connections C = Heuristic("outer", Heuristic.outer_city, Heuristic.general_connections) # Starting in random city, choosing connections based on probability distribution D = Heuristic("overlay", Heuristic.random_city, Heuristic.overlay_connections) # Starting in random city, continuing to city with least connections E = Heuristic("lookahead", Heuristic.random_city, Heuristic.least_connections) run(CONNECTIONS_FILE, COORDINATES_FILE, BEST_SCHEDULE_FILE, N, MIN_180,
def simulate(self): i = 0 success = sys.maxsize currentState = self.startState t = self.startTemp solutions = [] while not (success == 0) and i < self.iterate: j = 0 success = 0 while success <= self.maxSuc and j < self.maxDis: f1 = Heuristic(currentState).attacks() newState = self.neighbor.generateState() if Heuristic(newState).attacks() == 0: if not Heuristic(newState).queensPosition() in solutions: solutions.append(Heuristic(newState).queensPosition()) if self.state_update: print Heuristic(currentState).queensPosition( ), " -> ", Heuristic(newState).queensPosition() f2 = Heuristic(newState).attacks() deltaF = f2 - f1 if not t == 0.0: if (deltaF <= 0) or (exp(-deltaF / t) > random.random()): currentState = newState success += 1 j += 1 self.neighbor = Neighbor(currentState) t = self.alpha * t i += 1 file.write(Heuristic(currentState).queensPosition(), self.neighbor.createBoard(), url='./resource/newBoard.txt') print "Contagem final de sucessos : ", success print "Temperatura final : ", t print "Numero de iteracoes : ", i print "Posicao Inicial das ", len( self.startState), " rainhas : ", Heuristic( self.startState).queensPosition() print "Posicao Final das ", len( self.startState), " rainhas : ", Heuristic( currentState).queensPosition() print "\tNumero de rainhas atacando : ", Heuristic( currentState).attacks() print "Solucoes encontradas: " for solution in solutions: print solution return Heuristic(currentState).attacks()
def __init__(self, state): self.state = state self.baseBoard = [] self.qP = Heuristic(state).queensPosition()