def computer(board, o_s, turn_no): if turn_no == 1: if o_s[-1] in [(0, 0), (2, 2), (2, 0), (0, 2)]: board[1][1] = 'X' else: i, j = minimax(board, 0, True, 8)[1:] board[i][j] = 'X' elif turn_no == 3: s1 = ''.join(board[i][i] for i in range(3)) s2 = ''.join(board[i][2 - i] for i in range(3)) if any(i == 'OXO' for i in [s1, s2]): board[2][1] = 'X' else: ch = checkConsecutive(board) if ch != -1: i, j = ch else: i, j = minimax(board, 0, True, 9 - turn_no - 1)[1:] board[i][j] = 'X' return board else: try: ch = checkConsecutive(board) if ch != -1: i, j = ch else: i, j = minimax(board, 0, True, 9 - turn_no - 1)[1:] board[i][j] = 'X' return board except: print('Its a Draw') return board
def run(self, agent): ''' This function specifies the framework for the play - changing turns and continuing the game with provided search. ''' self.noOfLegalActionsPerPlay=[] self.noOfFaceUpCardsPerPlay=[] sequence_of_actions = [] # initializing the sequence of actions opponent = self.get_enemy(agent) # getting the opponent print("*********************************\nInitial Table: " + str(self.table)) # shows the dominoes on the initial table print(str(agent.role) + " is playing first") print(str(agent.role) + "'s hand: " + str(agent.hand)) # shows the initial dominoes in the current player's hand print("Dominos " + str(agent.role) + " collected: " + str(agent.collected)) # shows the cards collected by the current player print(str(opponent.role) + "'s hand: " + str(opponent.hand)) # the dominoes on the opponent's hand print("Dominos " + str(opponent.role) + " collected: " + str(opponent.collected)) # shows the cards collected by the opponent print("*********************************\n\n") while not self.isGameOver(): best_action = minimax.minimax(self, agent) # identifying the best action according to the search algorithm print("main_action1: " + str(best_action)) if len(best_action) > 1: # picked_last = the last player who performed an action self.picked_last = agent.role self.makeMove(best_action[0], best_action[1:], agent) # the agent performs the best option self.noOfFaceUpCardsPerPlay.append(len(self.table)) self.print_board(agent) # prints the board after the move sequence_of_actions.append(best_action) # if the game is not over => switch to the other player if not self.isGameOver(): best_action = minimax.minimax(self, opponent) # identifying the best action according to the search algorithm print("main_action2: " + str(best_action)) if len(best_action) > 1: self.picked_last = agent.role self.makeMove(best_action[0], best_action[1:], opponent) self.noOfFaceUpCardsPerPlay.append(len(self.table)) self.print_board(opponent) sequence_of_actions.append(best_action) else: break #print("Number of times legal actions generation was executed=%d" % globals.countLegalActionsGenerationExecution) print("Score of the Max at the end:" + str(self.computeScore(agent))) print("sequence of actions played:" + str(sequence_of_actions)) return sequence_of_actions
def test_time(): state = State() start_time = time.time() minimax_full(state) execution_time = round(time.time() - start_time, 2) print(f'Full minimax: {execution_time}s') for i in range(1, 10): start_time = time.time() minimax(state, i) execution_time = round(time.time() - start_time, 2) print(f'Minimax depth = {i}: {execution_time}s')
def play(self, gameState, timeLimit): nextStates = gameState.findNextStates() max = minimax(nextStates[0], self.depth, True) i = 0 rangMax = 0 for state in nextStates: if minimax(state, self.depth, True) > max: max = minimax(state, self.depth, True) rangMax = i i += 1 return (nextStates[rangMax]) #use minimax here to return the next state with higher score raise NotImplementedError() # remove this error once it is done
def defendStrategy(self, Board, colour, oppo_colour, type, no_turns): FIRST_SHRINK = 128 SECOND_SHRINK = 192 #weights for the evaluation function and depth evalWeights = [3, 2, 0, 1, 1, 3] depth = 3 #set distance line evaluating function to zero since its after placing phase if (type == "MP"): evalWeights[0] = 0 #Increase weights for eval from center when board get close to shrinking if (no_turns <= FIRST_SHRINK and no_turns >= 96): evalWeights[3] = evalWeights[3] + (0.6) * (no_turns - 96) if (no_turns <= SECOND_SHRINK and no_turns >= 152): evalWeights[3] = evalWeights[3] + (0.6) * (no_turns - 152) #increase depth when we shrink for the last time if (no_turns > SECOND_SHRINK): depth = 4 #switch this evaluation if its in this number of ranges: done to keep #time constraint if (no_turns > 100 or no_turns < 20): evalWeights[2] = 2 return (minimax.minimax(Board, colour, oppo_colour, type, evalWeights, depth))
def play_ai(): print("AI IS PLAYING") global board, turn # score = 0 best_score = -inf pos = None pos_to_play = None for i in range(8): for j in range(8): b = board[i][j] if (b != ""): if (b[0] != "w"): score = minimax(board, i, j, 2, True) # print(score) if (score[1] > best_score): pos = [i, j] pos_to_play = [score[0][0], score[0][1]] if (pos): print(pos, pos_to_play) mv = board[pos[0]][pos[1]] print(mv) if (rules(mv, pos[0], pos[1], pos_to_play[0], pos_to_play[1], board)): board[pos[0]][pos[1]] = '' board[pos_to_play[0]][pos_to_play[1]] = mv initialize() turn = "b" if turn == "w" else "w"
def main(): board = [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ] display(board) while not mm.board_filled(board) and mm.over(board) == 0: player_move = int(input("Move: ")) - 1 mm.move(player_move, 1, board) # fix display(board) if (mm.board_filled(board)) or mm.over(board) != 0: break print("Thinking...") next_move = mm.minimax(-1, 1, board) sleep(2) mm.move(next_move % 7, -1, board) # fix display(board) print("Your Turn:")
def ai_decision_making(): ai_temp_hand = [] for i in ai_hand: #get playable tiles if i.topval == tiles_on_field[0].openval: ai_temp_hand.append(i) if i.bottomval == tiles_on_field[0].openval: ai_temp_hand.append(i) if i.topval == tiles_on_field[-1].openval: ai_temp_hand.append(i) if i.bottomval == tiles_on_field[-1].openval: ai_temp_hand.append(i) if len(ai_temp_hand) == 0: #if none, get random tile if len(set_of_tiles) > 0: get_random_tile(0) ai_decision_making() else: print("AI cannot get tile") return utility = -inf #utility value utile = None #uitility tile temp_field = tiles_on_field[:] for t in ai_temp_hand: #minimax to make decision maxi = minimax.minimax(temp_field,ai_hand,'a',field,set_of_tiles,t) if maxi > utility: utility = maxi utile = t player_place_tile(utile,ai_hand,0) #place tile return
def findBestMove(): s.found_pattern = False bestScore = -math.inf bestMove = s.Vec2(0, 0) for y in range(s.board.size): for x in range(s.board.size): if (s.board.state[y][x].val == '0'): s.board.state[y][x].val = '1' score = minimax(0, -math.inf, math.inf, False) s.board.state[y][x].val = '0' # if (x == 9 and y == 5): # print("score when 9,5: ", score) # print("best =", bestScore) if (score > bestScore): # print("new score:", score) # print("x = " + str(x) + ", y = " + str(y)) bestScore = score bestMove.x = x bestMove.y = y if (bestScore == 0 and s.found_pattern == False): bestMove = s.board.randomCoord() # print('random') return bestMove
def minimax_move(self, depth=1, eval_func='pos_score'): # return the move with max minimax score # minimax(board, depth, player, alpha, beta) -> int: move_eval_dict = {} possible_moves = self.find_all_valid_moves() if possible_moves: for move in possible_moves: game_copy = deepcopy(self) game_copy.take_move(move[0], move[1]) move_eval_dict[move] = mm.minimax(game_copy.board, depth=depth, player=opposite( self.current_player), eval_func=eval_func) move_eval_dict = mm.shuffle_dict( move_eval_dict ) # shuffle the dict, or always choose the same move if self.current_player == BLACK: return max(move_eval_dict, key=move_eval_dict.get ) # return the move with max minimax score else: # White is the minimizing player, the less the better. return min(move_eval_dict, key=move_eval_dict.get) # Initially when I adapted from Sebestian's Youtube code, I forgot the above two lines # and Black wins 95% even white uses minimax and black uses 'random' else: return None
def test_bestposiblemove_number(supply_board_bestpossiblemove2): import metainfo metaObj = metainfo.metainfo(4) import minimax obj = minimax.minimax(metaObj) board = supply_board_bestpossiblemove2 pos = obj.findBestMove(board, '2', '1') assert pos == (2, 0), "Computer's Best Possible Move Test Case Failed"
def test_win_detection(self): z = [0, 0, 1, 1, 2, 2] # black wins if plays in 3 for i in z: self.board = self.board.move(i) aiPlay = minimax.minimax(self.board, maximizing=True, original_player=connectfour.C4Piece.B, max_depth=4) self.assertEqual(aiPlay, 3)
def auto_alg(): init_game() best_move = -2 game_over = False depth = 4 start_timer = 0 end_timer = 0 timer_array = [] times_2048 = 0 while (not (game_over)): start_timer = timer() temp_board = [] temp_board = Game.board empty_cells = [] false_counter = 0 for x in range(0, 4): for y in range(0, 4): if (temp_board[x][y] == 0): empty_cells.append(0) test_depth = len(empty_cells) depth_map = [9, 9, 8, 7, 7, 7, 7, 6, 6, 6, 6, 6, 5, 4, 4, 4] depth = depth_map[test_depth] best_move = minimax.minimax(temp_board, depth, "PLAYER", -np.inf, np.inf)[0] Game.board = game_logic.main_loop(temp_board, best_move)[1] print("New board with move: " + str(best_move)) print(Game.board) for i in range(0, 4): if (game_logic.main_loop(temp_board, i)[0] != True): false_counter += 1 if (false_counter == 4): print("NOOOOO GAME OVAAAHHR") game_over = True end_timer = timer() print("TIME ELAPSED THIS MOVE:") print(end_timer - start_timer) print("-") timer_array.append(end_timer - start_timer) print("TOTAL TIME ELAPSED:") print(sum(timer_array)) print("AVERAGE TIME EACH MOVE:") print(np.mean(timer_array)) for x in range(0, 4): for y in range(0, 4): if (Game.board[x][y] >= 2048): times_2048 += 1 return np.mean(timer_array), sum(timer_array), times_2048
def init_game_minimax(grid=1): grid = gr.create_grid(grid) start_location = gr.get_start_location() game = Game(grid, start_location) score, moves = minimax(game) if score < game.max_steps(): print("Score:", score) else: print("Score: infinity") return score, moves
def on_get(self, req, resp): boardString = req.params['board'] player = int(req.params['player']) board = list(map(int, boardString.split(', '))) move = minimax(board, player) resp.status = falcon.HTTP_200 # This is the default status resp.body = json.dumps(move)
def player_machine_mini(): move = minimax(board) print move board[move[0]][move[1]] = 'M' printBoard(board) green_circle(150 + move[1] * 100, 400 - move[0] * 100) if isGameOver(board)[0] == True: print "Machine wins" t.onscreenclick(None) player = 'H'
def test(root, solution, maximize): ''' Test si minimax appliqué à root renvoie bien la solution ''' result = minimax(root, 10, maximize=maximize) if not result == solution: print( str.format("Error lors du test (maximize = {:b}) sur l'arbre : ", maximize)) print(root) print('minimax returned : ', result) print('but the solution was : ', solution) raise Exception()
def get_ready_state(data): board = data['board'] move = minimax(board, checkOpens(board), 20, True, data['player_turn_id'], -1000, 1000, ['efficiency_heuristic'])[1] sio.emit( 'play', { 'tournament_id': 5000, 'player_turn_id': data['player_turn_id'], 'game_id': data['game_id'], 'movement': move })
def get_agent_piece_and_move(self): ''' here we will call minimax from minimax.py. This will return the agent piece that will move along with the coordinate it will move to ''' start = time.time() _, _, piece, pos = minimax(self, depth, float('-inf'), float('inf'), True) print('time passed:', time.time() - start) return piece, pos
def massacre(self, Board, colour, oppo_colour, type, no_turns): SECOND_SHRINK = 192 #weights for the evaluation function and depth evalWeights = [0, 2, 0, 1, 0, 1] depth = 3 #increase depth when we shrink for the last time if (no_turns > SECOND_SHRINK): depth = 4 return (minimax.minimax(Board, colour, oppo_colour, type, evalWeights, depth))
def test_win_prevention(self): self.board = self.board.move(3) self.board = self.board.move(3) self.board = self.board.move(4) # In this position, black needs to occupy 2 or 5, otherwise black will occupy any of them # and win next move. This is actually harder than preventing a direct win. recommendation = minimax(self.board, maximizing=True, original_player=C4Piece.B, max_depth=4) self.assertIn(recommendation, [2, 5])
def minimaxStep(self, graph): if len(self.strategy) < 2: self.strategy = minimax(graph, self.currentPosition, self.otherPos, self.limit, self.type) if self.strategy == None: print("No Strategy!") return # self.calcTime = exp * T print("New Strategy: ", self.strategy) if len(self.strategy) < 2: return "" self.strategy = self.strategy[1:] # print('Next step: ',self.strategy[0]) return self.strategy[0]
def test_win_prevention(self): """ testing to see if the ai play is the right one to prevent the other player from wining :return: """ z = [0, 0, 1, 1, 2] # the ai play must be 3 ot stop the other player from wining for i in z: self.board = self.board.move(i) aiPlay = minimax.minimax(self.board, maximizing=True, original_player=connectfour.C4Piece.B, max_depth=4) self.assertEqual(aiPlay, 3)
def opponent_move(self): """ Computer movement - minimax algorithm. Changes the state of the object. """ print('\nKolej przeciwnika.') best_payment = minimax(self.state, self.search_depth, self.successor_fun, self.heuristics) next_states = self.state['next'] out_states = list() for next_state in next_states: if next_state['payment'] == best_payment: out_states.append(next_state) new_state = rd.choice(out_states) self.state = new_state
def test_win_detection(self): self.board = self.board.move(0) self.board = self.board.move(1) self.board = self.board.move(0) self.board = self.board.move(1) self.board = self.board.move(0) self.board = self.board.move(1) # Red will win if played in 0 recommendation = minimax(self.board, maximizing=True, original_player=C4Piece.B, max_depth=4) self.assertEqual(recommendation, 0)
def computer_player_turn(board, computer_player): print("It is the computer's turn. Computer is making a move.") print() board.pretty_print() (move, value, nodes_evaluated, time_taken) = minimax(board, computer_player, 7) board.place_marker(computer_player, move) print( str(nodes_evaluated) + " game states evaluated in " + str(time_taken) + " seconds.") print("Computer selected column " + str(move + 1) + " for its marker.") print()
def ai_play(board: Board, search_depth: int = SEARCH_DEPTH): best_move = None best_move_score = None for i in board.available_squares: board_clone = copy.deepcopy(board) board_clone.place_mark(i) score = minimax(board_clone, search_depth) if best_move is None: best_move = i best_move_score = score else: is_better = best_move_score < score if board.playing == Mark.NOUGHT else best_move_score > score if is_better: best_move = i best_move_score = score board.place_mark(best_move)
def __main__(): max_player = True s = State(max_player) while (True): print("\n\ntwoj ruch!") s = s.make_move() s.show_board() if game_over(s): break print("\n\nruch przeciwnika!") U = s.get_successors() index = minimax(s, 4, not max_player, 4) s = State(not U[index].max_player, copy.deepcopy(U[index].board)) s.show_board() if game_over(s): break
def getScores(board, depth): scores = [] for value in prange(BOARD_SIZE**2): x = math.floor(value / 5) y = value % 5 if board[x][y] == 0: copiedArray = np.copy(board) copiedArray[x][y] = 1 score = minimax(copiedArray, depth, -999999, 999999, False) scores.append([x, y, score]) print(x, y, score) return scores
def ready(data): gameID = data['game_id'] playerTurnID = data['player_turn_id'] board = data['board'] movementCoordinates = [0, 0] #print("Ready") #print(board) movementCoordinates = minimax(board, True) sio.emit( 'play', { 'tournament_id': tournamentID, 'player_turn_id': playerTurnID, 'game_id': gameID, 'movement': movementCoordinates })
def computerGame(): #--Make human move(s) and computer reply/replies. copyOldBoardToScreenInMiniturizedForm(c,r) move=minimax.minimax(COMPUTER, M, PW, PB, MAXDEPTH) print(move) makeMove(move[1], move[0], move[2], COMPUTER) canvas.create_rectangle(655, 330, 870,370, width = 0, fill = 'grey30') if legalMove(HUMAN) and not legalMove(COMPUTER): return #--Make computer reply/replies (1 = BLACK = human, -1 = computer = WHITE) if legalMove(COMPUTER): makeComputerReply() # <--This is the computer's strategy (IMPORTANT!). while legalMove(COMPUTER) and not legalMove(HUMAN): makeComputerReply() #displayAllLegalMovesForHumanPlayer('BLACK') if not legalMove(HUMAN) and not legalMove(COMPUTER): quit() #-- Note: legal move for human must now necessarily exist. return
def computer_play(self): minimax.PLAYER = self.game.turn.color print '------------------------------' print 'executing minimax with:' print self.game.board print self.game.turn.color print '------------------------------' position = minimax.minimax(self.game.board, self.difficulty, self.game.turn.color, self.heuristic)[1] print 'minimax finished with choice: %s' % str(position) self.game.play(position) message = "%s's turn." % (self.game.turn.color) self.update_status(message) self.update_board() self.update_score() self.update_pass_turn() self.check_next_turn()
def gameover(self, game, last_move): pass if __name__ == "__main__": import othello import minimax # Experiment 1: # Player 1 and Player 2 are evenly matched with 3-ply deep search # player 2 wins with a final score of 28 # player 1 0.2 s per ply player 2 0.4 s per ply play(othello.game(), player_epsilon(lambda x: minimax.minimax(x, 3)), player_epsilon(lambda x: minimax.minimax(x, 3)), False) # Experiment 2: # now we show the significance of an evaluation function # we weaken player1 to 2 ply deep but use the edge eval fun # player 1 now beats player 2 with a score of 58! # player 1 0.1 s per ply player 2 0.4 s per ply # play(othello.game(), player(lambda x: minimax.minimax(x, 2, othello.edge_eval)),player(lambda x: minimax.minimax(x, 3)), False) # Experiment 1 (with alpha-beta): # player 1 0.1 s per ply, player 2 0.1 s per ply # play(othello.game(), player(lambda x: minimax.alphabeta(x, 3)),player(lambda x: minimax.alphabeta(x, 3)), False) # Experiment 2 (with alpha-beta): # player 1 0.0 s per ply player 2 0.1 s per ply
circle = not circle win[1][2] = first print("H") elif (3 * width) / 3 > mouse[0] > (2 * width) / 3 and (3 * height) / 3 > mouse[1] > ( 2 * height) / 3 and 0 == win[2][2]: pygame.draw.circle(tic, blue, i, rad, 7) circle = not circle win[2][2] = first print("I") if minimax.isGameOver(win) or minimax.isTie(win): print("GAME OVER") playing = False if not circle and playing: print("Xs Turn") win = list(minimax.minimax(list(win), movefirst)) if win[0][0] == second: pygame.draw.line(tic, red, (a[0] - xch, a[1] - xch), (a[0] + xch, a[1] + xch), 7) pygame.draw.line(tic, red, (a[0] - xch, a[1] + xch), (a[0] + xch, a[1] - xch), 7) print("A") if win[1][0] == second: pygame.draw.line(tic, red, (b[0] - xch, b[1] - xch), (b[0] + xch, b[1] + xch), 7) pygame.draw.line(tic, red, (b[0] - xch, b[1] + xch), (b[0] + xch, b[1] - xch), 7) print("B") if win[2][0] == second: pygame.draw.line(tic, red, (c[0] - xch, c[1] - xch), (c[0] + xch, c[1] + xch), 7) pygame.draw.line(tic, red, (c[0] - xch, c[1] + xch), (c[0] + xch, c[1] - xch), 7) print("C")
## Rich's playing around with functions.... #game2.play(othello.game(), game2.player(lambda x: minimax.minimax(x, 3)), game2.player(lambda x: random_move_policy(x)), True) #t = UCT_tree.Tree(5, random_policy, 1) #game2.play(othello.game(), game2.player(lambda x: minimax.minimax(x, 3)), game2.player(t.policy), True) #game2.play(othello.game(), game2.player(t.policy), game2.player(lambda x: minimax.minimax(x, 4)), True) policies = {"random": random_policy}#, "greedy": greedy_policy} budgets = [1] c_vals = [1, 5]#, 20, 50] # opponents = {"random": game2.player(random_policy), # "greedy": game2.player(greedy_policy), # "minimax-2": game2.player(lambda x: minimax.minimax(x, 2)), # "minimax-3": game2.player(lambda x: minimax.minimax(x, 3)), # "minimax-4": game2.player(lambda x: minimax.minimax(x, 4))} opponents = {"minimax-4": game2.player(lambda x: minimax.minimax(x, 4))} # number of games going first/second with each configuration n = 10 # run this algorithm with different hard-coded sizes # for each policy # for each budget (i.e. 1,2,5 seconds) # for each opponent # run n trials with us first, n trials with them first for pol_key in policies: for b in budgets: for c in c_vals: for opp_key in opponents: t = UCT_tree.Tree(b, policies[pol_key], c) uct_player = game2.player(t.policy)
def computerMove(self): index = minimax.minimax(game, game.turn) game.makeMove(index)
runs = 10 steps = np.zeros((episodes)) for i in range(runs): print "run",i steps += Qlearning(episodes, [(0,0)], epsGreedyPolicy,alpha_pred=0.2,alpha_prey=0.2)[0] steps /= runs steps = ndimage.filters.gaussian_filter(steps,4) pl.plot(range(len(steps)), steps) steps = np.zeros((episodes)) for i in range(runs): print "run",i steps += minimax(episodes,[(0,0)],0.1, 0.99999, 0.9)[0] steps /= runs steps = ndimage.filters.gaussian_filter(steps,4) pl.plot(range(len(steps)), steps) pl.legend(["Q-learning","Minimax"]) pl.title('Number of iterations per episode using Minimax and Q-learning.') pl.ylabel('Iterations') pl.xlabel('Episodes') pl.savefig("plots/equalminimax.png") pl.ylim(0,200) pl.savefig("plots/equalminimaxzoom.png") pl.close()
""" if game_elem==-1: return 2 elif game_elem==1: return 0 else: return 1 if __name__ == "__main__": """ Creates a main player """ nTuplesSystematicObject = nTuplesSystematic() # nTuplesSystematic - Black # Minimax - White # game2.play(othello.game(),game2.player(lambda x: nTuplesSystematicObject.play_next_move(x)), game2.player(lambda x: minimax.minimax(x, 0)), False) # Minimax - Black # nTuplesSystematic - White # game2.play(othello.game(), game2.player(lambda x: minimax.minimax(x, 0)),game2.player(lambda x: nTuplesSystematicObject.play_next_move(x)),False) # nTuplesSystematic - Black # Minimax Edge Eval - White # game2.play(othello.game(),game2.player(lambda x: nTuplesSystematicObject.play_next_move(x)), game2.player(lambda x: minimax.minimax(x, 0,othello.edge_eval)), True) # Minimax Edge Eval - Black # nTuplesSystematic - White game2.play(othello.game(), game2.player(lambda x: minimax.minimax(x, 0,othello.edge_eval)),game2.player(lambda x: nTuplesSystematicObject.play_next_move(x)), True)
def calculate_move(self, board, whites_turn, depth): value, move = minimax.minimax(board, None, depth, whites_turn, evaluator.evaluate_advanced) return move
def makeComputerReply(): UpdateThePointMatrices() move=minimax.minimax(COMPUTER, M, PW, PB, MAXDEPTH) print(move.shape) makeMove(move[0], move[1], LocateTurnedPieces(move[0],move[1],COMPUTER, M), COMPUTER) displayAllLegalMovesForHumanPlayer('BLUE')
def play(self, game, opp_move): return self.play_fn(game) def gameover(self, game, last_move): pass if __name__ == "__main__": import othello import minimax # Experiment 1: # Player 1 and Player 2 are evenly matched with 3-ply deep search # player 2 wins with a final score of 28 # player 1 0.2 s per ply player 2 0.4 s per ply play(othello.game(), player(lambda x: minimax.minimax(x, 3)), player(lambda x: minimax.minimax(x, 3)), True) # Experiment 2: # now we show the significance of an evaluation function # we weaken player1 to 2 ply deep but use the edge eval fun # player 1 now beats player 2 with a score of 58! # player 1 0.1 s per ply player 2 0.4 s per ply play(othello.game(), player(lambda x: minimax.minimax(x, 2, othello.edge_eval)), player(lambda x: minimax.minimax(x, 3)), False) # Experiment 1 (with alpha-beta): # player 1 0.1 s per ply, player 2 0.1 s per ply play(othello.game(), player(lambda x: minimax.alphabeta(x, 3)), player(lambda x: minimax.alphabeta(x, 3)), False)
def main(): global m,n, surface #start screen font = pygame.font.Font('Font.ttf', 72) text = font.render("Red", 1, (255,0,0)) textpos = text.get_rect(centerx = 25*n, centery = 12*m) surface.blit(text, textpos) text = font.render("Green", 1, (0,255,0)) textpos = text.get_rect(centerx = 25*n, centery = 36*m) surface.blit(text, textpos) font = pygame.font.Font('Font.ttf', 12) text = font.render("Choose a Color", 1, (100,100,100)) textpos = text.get_rect(centerx = 25*n, centery = 25*m) surface.blit(text, textpos) pygame.display.update() this_loop = True while this_loop: for event in pygame.event.get(): if event.type == pygame.MOUSEBUTTONDOWN: y = pygame.mouse.get_pos()[1] if y < 25*m: player_first = True else: player_first = False this_loop = False #depth screen surface.fill((0,0,0)) font = pygame.font.Font('Font.ttf', 12) text = font.render("How deep should I look?", 1, (100,100,100)) textpos = text.get_rect(centerx = 25*n, centery = 12*m) surface.blit(text, textpos) font = pygame.font.Font('Font.ttf', 48) depth = 3 text = font.render(str(depth),1,(255,255,0)) textpos = text.get_rect(centerx = 25*n, centery = 25*m) surface.blit(text, textpos) pygame.display.update() this_loop = True while this_loop: for event in pygame.event.get(): if (event.type == pygame.KEYDOWN and event.key == pygame.K_RETURN) or event.type == pygame.MOUSEBUTTONDOWN: this_loop = False elif event.type == pygame.KEYDOWN: if event.key < 256 and chr(event.key) in '1234567890': rect = pygame.Rect(12*m,25*n,100,100) pygame.draw.rect(surface,(0,0,0),rect,0) pygame.display.update() depth = int(chr(event.key)) text = font.render(str(depth),1,(255,255,0)) textpos = text.get_rect(centerx = 25*n, centery = 25*m) surface.blit(text, textpos) pygame.display.update() #rows screen surface.fill((0,0,0)) font = pygame.font.Font('Font.ttf', 12) text = font.render("How many rows?", 1, (100,100,100)) textpos = text.get_rect(centerx = 25*n, centery = 12*m) surface.blit(text, textpos) font = pygame.font.Font('Font.ttf', 48) rows = 9 text = font.render(str(rows),1,(255,255,0)) textpos = text.get_rect(centerx = 25*n, centery = 25*m) surface.blit(text, textpos) pygame.display.update() this_loop = True while this_loop: for event in pygame.event.get(): if (event.type == pygame.KEYDOWN and event.key == pygame.K_RETURN) or event.type == pygame.MOUSEBUTTONDOWN: this_loop = False elif event.type == pygame.KEYDOWN: if event.key < 256 and chr(event.key) in '1234567890': rect = pygame.Rect(12*m,25*n,100,100) pygame.draw.rect(surface,(0,0,0),rect,0) pygame.display.update() rows = int(chr(event.key)) text = font.render(str(rows),1,(255,255,0)) textpos = text.get_rect(centerx = 25*n, centery = 25*m) surface.blit(text, textpos) pygame.display.update() #columns screen surface.fill((0,0,0)) font = pygame.font.Font('Font.ttf', 12) text = font.render("How many columns?", 1, (100,100,100)) textpos = text.get_rect(centerx = 25*n, centery = 12*m) surface.blit(text, textpos) font = pygame.font.Font('Font.ttf', 48) columns = 6 text = font.render(str(columns),1,(255,255,0)) textpos = text.get_rect(centerx = 25*n, centery = 25*m) surface.blit(text, textpos) pygame.display.update() this_loop = True while this_loop: for event in pygame.event.get(): if (event.type == pygame.KEYDOWN and event.key == pygame.K_RETURN) or event.type == pygame.MOUSEBUTTONDOWN: this_loop = False elif event.type == pygame.KEYDOWN: if event.key < 256 and chr(event.key) in '1234567890': rect = pygame.Rect(12*m,25*n,100,100) pygame.draw.rect(surface,(0,0,0),rect,0) pygame.display.update() columns = int(chr(event.key)) text = font.render(str(columns),1,(255,255,0)) textpos = text.get_rect(centerx = 25*n, centery = 25*m) surface.blit(text, textpos) pygame.display.update() #some initialization code m, n = rows, columns surface = pygame.display.set_mode((50*n, 50*m)) pygame.display.set_caption('Chain Reaction') board = structure.Board(m=m,n=n) total_moves = 0 #game screen drawBoard(board) if not player_first: new_move = minimax.minimax(board)[0] lock.acquire() thread.start_new_thread(slowMove, (board, new_move)) board = structure.move(board, new_move) total_moves += 1 this_loop = True while this_loop: for event in pygame.event.get(): if event.type == pygame.MOUSEBUTTONDOWN: x,y = pygame.mouse.get_pos() x,y = x/50,y/50 if not (board.new_move == structure.sgn(board[(y,x)]) or 0 == structure.sgn(board[(y,x)])): print "Illegal Move!" continue show_move((y,x)) lock.acquire() thread.start_new_thread(slowMove, (board,(y,x))) board = structure.move(board,(y,x)) total_moves += 1 if total_moves >= 2: if structure.score(board,board.new_move*(-1)) == 10000: winner = board.new_move*(-1) this_loop = False break new_move = minimax.minimax(board,depth)[0] show_move(new_move) lock.acquire() thread.start_new_thread(slowMove, (board, new_move)) board = structure.move(board, new_move) total_moves += 1 if total_moves >= 2: if structure.score(board,board.new_move*(-1)) == 10000: winner = board.new_move*(-1) this_loop = False break #winning screen while lock.locked(): continue m, n = 9, 6 surface = pygame.display.set_mode((50*n, 50*m)) font = pygame.font.Font('Font.ttf', 72) pygame.display.set_caption('Chain Reaction') if winner == 1: text = font.render("Red", 1, (255,0,0)) else: text = font.render("Green", 1, (0,255,0)) textpos = text.get_rect(centerx = 25*n, centery = 12*m) surface.blit(text, textpos) font = pygame.font.Font('Font.ttf', 48) text = font.render("Wins!", 1, (100,100,100)) textpos = text.get_rect(centerx = 25*n, centery = 25*m) surface.blit(text, textpos) pygame.display.update()
def play_game(game, heuristic1, heuristic2): while not end_game(game.board): turn = game.turn.color board = game.board if has_valid_position(board, turn): minimax.PLAYER = turn if turn == "B": position = minimax.minimax(board, 1, turn, heuristic1)[1] else: position = minimax.minimax(board, 1, turn, heuristic2)[1] game.play(position) else: game.change_turn() return game.winning_side(formatted=False)
def main(): print "Terceira questao" tabuleiro = [' ']*9 raiz = arvore.No(tabuleiro) print " " arvore.geraArvore(tabuleiro,raiz) print "Abaixo segue a arvore, com nos raiz, filhos e netos" print " " arvore.imprimirArvore(raiz) print " " print "Avaliacao das folhas" arvore.avaliarArvore(raiz) print " " minimax_value = minimax(raiz, 2, True) print 'Minimax melhor valor: ' + str(minimax_value) for i,filho in enumerate(raiz.filhos): if filho.avaliacao == minimax_value: print 'Melhor jogada vai para:' filho.imprimir() break