def main(in1, in2): in1 = str(in1) in2 = str(in2) p1_dict = {"human": players.HumanPlayer("Team Jimmy", 1), "random": players.RandomPlayer(1), "mini_easy": players.MinimaxPlayer(1, 3), "mini_medium": players.MinimaxPlayer(1, 5), "mini_hard": players.MinimaxPlayer(1, 6), "net_random": players.NetPlayer(1, "Random"), "net_easy": players.NetPlayer(1, "Easy"), "net_medium": players.NetPlayer(1, "Medium"), "net_hard": players.NetPlayer(1, "Hard") } result = "" h2_name = "second place" for c in h2_name: result = result + c + '\u0336' p2_dict = {"human": players.HumanPlayer("Team " + result + " Ben", 2), "random": players.RandomPlayer(2), "mini_easy": players.MinimaxPlayer(2, 3), "mini_medium": players.MinimaxPlayer(2, 5), "mini_hard": players.MinimaxPlayer(2, 6), "net_random": players.NetPlayer(2, "Random"), "net_easy": players.NetPlayer(2, "Easy"), "net_medium": players.NetPlayer(2, "Medium"), "net_hard": players.NetPlayer(2, "Hard") } player1 = p1_dict[in1] player2 = p2_dict[in2] game_board = GameBoard([player1, player2]) game_board.game_loop() # there has to be a better way to do this
def __init__(self, screen): """Init Game with all needed attributes. """ self.screen = screen self.gui = gui.Gui(self.screen) self.gui.draw_background() self.game_running = True self.game_move_number = 0 self.game_board = check_board_state.create_board() self.game_arbiter = check_board_state.CheckBoardState( self.screen, self.game_board) self.played_moves = [] self.player1 = players.HumanPlayer(constants.PLAYER1_NAME, constants.WHITE) self.player2 = players.AiPlayer(self.screen, self.game_board, constants.BLACK) self.player_on_move = self.player1 self.game_mode = constants.STANDARD self.gui_board = [[ gui.Square(self.screen) for i in range(constants.BOARD_SIZE) ] for j in range(constants.BOARD_SIZE)] self.gui_on_move = gui.OnMove(self.screen) self.button_new_game = gui.ButtonRightMenu(self.screen, 0, constants.RESTART) self.button_menu = gui.ButtonRightMenu(self.screen, 1, constants.MENU) self.button_white_stone = gui.ButtonChooseColor(self.screen, 0) self.button_black_stone = gui.ButtonChooseColor(self.screen, 1) self.button_ai_opponent = gui.ButtonChooseOpponent(self.screen, 0) self.button_ai_player = gui.ButtonChooseOpponent(self.screen, 1) self.button_standard_game_mode = gui.ButtonChooseMode(self.screen, 0) self.button_swap2_game_mode = gui.ButtonChooseMode(self.screen, 1) self.last_move = LastMove
def __init__(self, master, n, difficulty, params, player2starts): self.state = state.State(n, difficulty) self.difficulty = difficulty self.n = n #----GUI members-----# self.frame = Frame(master) self.frame.pack(fill="both", expand=True) self.canvas = Canvas(self.frame, width=100 * n, height=100 * n) self.canvas.pack(fill="both", expand=True) self.label = Label(self.frame, text='Tic Tac Toe Game', height=6, bg='black', fg='blue') self.label.pack(fill="both", expand=True) self.frameb = Frame(self.frame) self.frameb.pack(fill="both", expand=True) self.Start = Button(self.frameb, text='Click here to start playing', height=4, command=self.start, bg='purple', fg='white') self.Start.pack(fill="both", expand=True, side=LEFT) self.AI_TIMES = [] #for performance logging #---------------------- if params[1] == 'H': self.player1 = players.HumanPlayer(params[0], params[2]) else: self.player1 = players.AIPlayer(parms[2], self.difficulty) if params[2] == 'X': player2token = 'O' else: player2token = 'X' if params[4] == 'H': self.player2 = players.HumanPlayer(params[3], player2token) else: self.player2 = players.AIPlayer(player2token, self.difficulty) self.whostarts = player2starts #if False, player 1 starts. If true - player 2 starts. #need this data for replaying if self.whostarts: self.curr_player = self.player2 else: self.curr_player = self.player1
def menu(self): """Setting up on screen menu where player can choose game options before start. """ self.gui_on_move.white(self.player_on_move.name) self.button_white_stone.white(selected=True) self.button_black_stone.black() self.button_ai_opponent.AI(selected=True) self.button_ai_player.player() self.button_standard_game_mode.standard(selected=True) self.button_swap2_game_mode.swap2() pygame.display.update() while self.game_running: for event in pygame.event.get(): if event.type == pygame.QUIT: sys.exit(0) if event.type == pygame.MOUSEBUTTONDOWN: pos = pygame.mouse.get_pos() if self.button_new_game.graphic.collidepoint(pos): self.game_running = False return GameSettings(player1=self.player1, player2=self.player2, player_on_move=self.player_on_move, game_mode=self.game_mode) if self.button_white_stone.graphic.collidepoint(pos): self.player_on_move = self.player1 self.gui_on_move.white(self.player1.name) self.button_white_stone.white(selected=True) self.button_black_stone.black(selected=False) if self.button_black_stone.graphic.collidepoint(pos): self.player_on_move = self.player2 self.gui_on_move.black(self.player2.name) self.button_white_stone.white(selected=False) self.button_black_stone.black(selected=True) if self.button_ai_opponent.graphic.collidepoint(pos): self.player2 = players.AiPlayer( self.screen, self.game_board, self.player2.stone_color) if self.player_on_move.stone_color == self.player2.stone_color: self.player_on_move = self.player2 self.gui_on_move.black(self.player2.name) self.button_ai_opponent.AI(selected=True) self.button_ai_player.player(selected=False) if self.button_ai_player.graphic.collidepoint(pos): self.player2 = players.HumanPlayer( constants.PLAYER2_NAME, self.player2.stone_color) if self.player_on_move.stone_color == self.player2.stone_color: self.player_on_move = self.player2 self.gui_on_move.black(self.player2.name) self.button_ai_opponent.AI(selected=False) self.button_ai_player.player(selected=True) if self.button_standard_game_mode.graphic.collidepoint( pos): self.game_mode = constants.STANDARD self.button_standard_game_mode.standard(selected=True) self.button_swap2_game_mode.swap2(selected=False) if self.button_swap2_game_mode.graphic.collidepoint(pos): self.game_mode = constants.SWAP2 self.button_standard_game_mode.standard() self.button_swap2_game_mode.swap2(True) pygame.display.update()
async def handle_new_game(request): global newnet, games if newnet is None: newnet = get_net() game_id = str(uuid.uuid4()) human_player = players.HumanPlayer('b', game_id) asyncio.get_event_loop().create_task(new_game(newnet, human_player)) games[game_id] = human_player return web.json_response(await human_player.queue_tx.get(), headers=COMMON_HEADERS)
def basic_game(): # black = RandomPlayer() # white = CapturePlayer() white = players.HumanPlayer() black = players.BetterMinMaxPlayer(search_depth=3) # black = players.BasicMinMaxPlayer(search_depth=1) # black = players.HumanPlayer() result = game_loop(white, black, wait=0.01) if result == "1-0": print("White wins") if result == "0-1": print("Black wins") if result == "1/2-1/2": print("Draw")
def main(): playerNum = int(raw_input("Enter number of players: ")) playerList = [] playersAdded = 0 humanInvolved = False while playersAdded < playerNum: playersAdded += 1 playerType = raw_input("Enter player type [human/random/smartv1]: ") if playerType == 'human': humanInvolved = True p = players.HumanPlayer(playersAdded) playerList.append(p) elif playerType == 'random': p = players.RandomBot(playersAdded) playerList.append(p) elif playerType == 'smartv1': p = players.SmartBotV1(playersAdded) playerList.append(p) else: playersAdded -= 1 print "Incorrect player type" if not humanInvolved: sim = ('y' == raw_input( "No humans are playing. Running simulation? (y/n) ")) if sim: trailCount = int(raw_input("Enter number of trials: ")) winnerCounts = [0 for x in playerList] count = 0 while count < trailCount: colorDict = retrieveColorDict(autoRandom=True) G = Game(colorDict, playerList) winnerCounts[G.playGame(printMode=False) - 1] += 1 count += 1 print winnerCounts else: colorDict = retrieveColorDict() G = Game(colorDict, playerList) G.playGame()
def __init__(self, src_dir): resource_mgr = resource.Resource() resource_mgr.load(src_dir) self._graphic_mgr = graphics.Graphics(resource_mgr) self._last_tick = 0 self._current_turn = 0 self._last_tile = None self._all_tiles = [] self._clock = pygame.time.Clock() self._players = [ players.HumanPlayer(self._graphic_mgr, players.Player.POSITION.SOUTH, u'南大'), players.AIPlayer(self._graphic_mgr, players.Player.POSITION.EAST, u'东大'), players.AIPlayer(self._graphic_mgr, players.Player.POSITION.NORTH, u'北大'), players.AIPlayer(self._graphic_mgr, players.Player.POSITION.WEST, u'西大') ] self._graphic_mgr.catch_players(self._players) self._graphic_mgr.clock = self._clock self._can_draw = False self._cache_text = None self.reset()
def main(): kp = Kakerlakenpoker() kp.reset() human_player = players.HumanPlayer() p1_rndact = players.RandomPlayer(kp, PLAYER1) # Q-functionとオプティマイザーのセットアップ off_q_func = qf.QFunction(32, 64) # q_func.to_gpu(0) off_optimizer = chainer.optimizers.Adam(eps=1e-2) off_optimizer.setup(off_q_func) gamma = 0.95 # Epsilon-greedyを使ってたまに冒険。50000ステップでend_epsilonとなる off_explorer = chainerrl.explorers.LinearDecayEpsilonGreedy( start_epsilon=1.0, end_epsilon=0.3, decay_steps=50000, random_action_func=p1_rndact.random_offence_action_func) # Experience ReplayというDQNで用いる学習手法で使うバッファ off_replay_buffer = chainerrl.replay_buffer.ReplayBuffer(capacity=10**6) urayama_offence = chainerrl.agents.DoubleDQN(off_q_func, off_optimizer, off_replay_buffer, gamma, off_explorer, replay_start_size=500, target_update_interval=100) # Q-functionとオプティマイザーのセットアップ def_q_func = qf.QFunction(40, 2) # q_func.to_gpu(0) def_optimizer = chainer.optimizers.Adam(eps=1e-2) def_optimizer.setup(def_q_func) def_explorer = chainerrl.explorers.LinearDecayEpsilonGreedy( start_epsilon=1.0, end_epsilon=0.3, decay_steps=50000, random_action_func=p1_rndact.random_defence_action_func) # Experience ReplayというDQNで用いる学習手法で使うバッファ def_replay_buffer = chainerrl.replay_buffer.ReplayBuffer(capacity=10**6) urayama_defence = chainerrl.agents.DoubleDQN(def_q_func, def_optimizer, def_replay_buffer, gamma, def_explorer, replay_start_size=500, target_update_interval=100) # chainerrl.agent.load_npz_no_strict("offence_model3000",urayama_offence) # chainerrl.agent.load_npz_no_strict("defence_model3000",urayama_defence) urayama_offence.load("offence_model3000") urayama_defence.load("defence_model3000") offence_act = [urayama_offence.act, human_player.offence_act] defence_act = [urayama_defence.act, human_player.defence_act] turn = PLAYER1 #PLAYER1がurayama, PLAYER2がhuman turn_count = 1 while not kp.done: print("***Turn", str(turn_count), "***") kp.show_vs_URAYAMA() off_act = offence_act[turn](kp.get_env().copy()) off_act_vec = np.zeros(8, dtype=np.float32) off_act_vec[off_act % 8] = 1 if turn == PLAYER1: print("URAYAMA declare:" + str(off_act % 8)) else: print("Player declare:" + str(off_act % 8)) def_act = defence_act[PLAYER2 - turn](np.append( kp.get_env().copy(), off_act_vec)) ans = "True" if def_act == 1 else "Lie" if turn == PLAYER1: print("Player answer:" + ans) else: print("URAYAMA answer:" + ans) is_turn_change = kp.step(off_act, def_act, turn) kp.check_winner() if kp.done is True: if kp.winner == 1: print("URAYAMA win") elif kp.winner == -1: print("YOU win") else: print("Error") if kp.miss is True: print("MISS") if is_turn_change: turn = PLAYER1 if turn == PLAYER2 else PLAYER2 #ターンの交換 turn_count += 1
def __init__(self, player_name): self.players = [players.HumanPlayer(player_name), players.AIPlayer()] self.board = board.Board([p.name for p in self.players]) self.player_turn_index = random.randrange(0, 2) self.turn_count = 1 # counts both players turns, ie. one players turn here counts as a single turn
if winner == 0: player1.win_update(board) player2.loss_update(board) elif winner == 1: player1.loss_update(board) player2.win_update(board) elif winner == -1: player1.draw_update(board) player2.win_update(board) # test finding optimal p2 strategt wins.append(winner) ## Analyse the results wins = np.array(wins) p1_wins = np.cumsum(wins == 0) p2_wins = np.cumsum(wins == 1) draws = np.cumsum(wins == -1) fig, axes = plt.subplots(1) axes.plot(p1_wins, c='r') axes.plot(p2_wins, c='b') axes.plot(draws, c='k') plt.show() ## Now let the human play! board.set_state(init_state) human = players.HumanPlayer('x') game = Game(board, player1, human) game.play()
# Create board board = Board() board_dim = [0, 100, screen_width, screen_height - 100] # Create network net = nets.Connect4Network() data = util.read_from_file("test.csv") X, y = util.split_features_labels(data) X, y = util.shuffle_data(X, y) X = np.expand_dims(X, axis=1) X = torch.from_numpy(X).float() y = torch.from_numpy(y).float() net.fit(X, y) # Create players player_1 = players.HumanPlayer("Rachael") #player_1 = players.RandomPlayer("Bimbo") #player_1 = players.MinimaxPlayer("Max", 8) #player_2 = players.RandomPlayer("Bimbo") player_2 = players.DeepMinimaxPlayer("Susan", net, 4) # Create texts game_text = "{} to move.".format(player_1.name) # Begin game loop last_frame_click = False while True: # Check if exited for event in pygame.event.get(): if event.type == pygame.QUIT: sys.exit()
status_after_play = self.play(player) if status_after_play is not None: self.game_running = False self.switch_player() return status_after_play def get_board_values(self): return self.game_board.board_values.copy() # Unit test if __name__ == "__main__": player_1 = players.HumanPlayer() player_2 = players.MonteCarloPlayer() player_3 = players.RfPlayer(players.get_dqn(), epsilon=0.01) # data3 = np.array([[1, -1, 1], [1, -1, 1], [-1, 1, 0]]) # test_board = TicTacToeBoard(data3) # game = TicTacToeGame(test_board) game = TicTacToeGame(TicTacToeBoard.create_empty_board()) print("Game started") game_result = game.start_game(player_1, player_2) # Show final status of a board game.show_board() if game_result == CROSS:
def get_humans(self, num): humans = [] for order in range(1, num + 1): name = input(f'Name for Player {order}: ') humans.append(players.HumanPlayer(order, name)) return humans