def __init__(self, multiplayer=False): self.grid = Grid() self.players[0] = players.Human(self.grid, 0) if not multiplayer: self.players[1] = players.Computer(self.grid, 1) else: self.players[1] = players.Human(self.grid, 1)
def test_display_board(): print('\n--------- Test Update Pegs -------------\n') u1 = users.User(username='******', email='*****@*****.**') u2 = users.User(username='******', email='*****@*****.**') p1 = players.Human(name='Jon', user=u1, lane=1) p2 = players.Human(name='Rick', user=u2, lane=2) b = board.Classic(player_one=p1, player_two=p2) b.display_board()
def __init__(self): self.players = [players.Human(0), players.Human(1)] self.turn = 1 self.board = Board() self.status = 'UNFINISHED' self.moves = [] self.check = False self.check_mate = False self.__prep_move()
def test_cut_deck(): d = deck.Deck() c = players.Computer(difficulty='easy') print(f"\n--------- TEST cut_deck() method computer -------------\n") print(f'\n-- before cut --\n') i = 0 for card in d.deck: i += 1 print(f'{i}) {card.name}') c.cut_deck(d) print(f'\n-- after cut --\n') i = 0 for card in d.deck: i += 1 print(f'{i}) {card.name}') d = deck.Deck() username = '******' email = '*****@*****.**' u = users.User(username=username, email=email) h = players.Human(user=u) print(f"\n--------- TEST cut_deck() method human -------------\n") print(f'\n-- before cut --\n') i = 0 for card in d.deck: i += 1 print(f'{i}) {card.name}') h.cut_deck(d) print(f'\n-- after cut --\n') i = 0 for card in d.deck: i += 1 print(f'{i}) {card.name}')
def test_match_human(): #human vs computer u = users.sign_in() p1 = players.Human(user=u) p2 = players.Computer(difficulty='hard') g = games.Cribbage(p1, p2) g.game_driver()
def __init__(self, resolution): self.resolution = resolution self.engine = engine.GfxEngine(self.resolution) self.game_state = state.GameState([]) self.history = [] # one player by default self.player_list = [ players.Human("white"), players.AI("black", "random") ]
def test_Classic_constructor(): print('\n--------- Test Board constructor -------------\n') u1 = users.User(username='******', email='*****@*****.**') u2 = users.User(username='******', email='*****@*****.**') p1 = players.Human(name='Jon', user=u1, lane=1) #p2 = players.Human(name='Rick', user=u2, lane=2) p2 = players.Computer(difficulty='hard', lane=2) b = board.Classic(player_one=p1, player_two=p2) print(f'- Player 1 name: {b.player_one.name}') print(f'- Player 2 name: {b.player_two.name}\n') b.display_board()
def test_human(): username = '******' email = '*****@*****.**' u = users.User(username=username, email=email) h = players.Human(user=u) print("\n--------- TEST human constructor -------------\n") print(f'--Human name: {h.name}') print(f'--Human score: {str(h.score)}') print(f'--Human cards: {h.cards}') print(f'--Human is_dealer: {h.is_dealer}') print(f'--Human user match stats: {h.user.match_stats}')
def play(model_path, is_max_entropy): """ Play a game against a model :param model_path: String. Path to the model :param is_max_entropy: Boolean. Does the model uses entropy maximization """ random.seed(int(time())) p1 = players.QPlayer(hidden_layers_size=layers_size, learning_batch_size=batch_size, gamma=gamma, tau=tau, batches_to_q_target_switch=batches_to_q_target_switch, memory_size=memory_size, session=tf.Session(), maximize_entropy=is_max_entropy) p1.restore(model_path) p2 = players.Human() for g in range(1): print('STARTING NEW GAME (#{})\n-------------'.format(g)) if g % 2 == 0: game = Game(p1, p2) print("Computer is X (1)") else: game = Game(p2, p1) print("Computer is O (-1)") while not game.game_status()['game_over']: if isinstance(game.active_player, players.Human): game.print_field() print("{}'s turn:".format(game.current_player)) game.print_field() state = np.copy(game.board) # Force Q-Network to select different starting positions if it plays first action = int( game.active_player.select_cell(state, epsilon=0.0) ) if np.count_nonzero(game.board) > 0 or not isinstance( game.active_player, players.QPlayer) else random.randint( 0, 399) print(game.current_player, action) game.play(action) if not game.game_status()['game_over']: game.next_player() if game._invalid_move_played: print("*") break print('-------------\nGAME OVER!') game.print_board() print(game.game_status()) print('-------------')
def test_update_pegs(): print('\n--------- Test Update Pegs -------------\n') u1 = users.User(username='******', email='*****@*****.**') u2 = users.User(username='******', email='*****@*****.**') p1 = players.Human(name='Jon', user=u1, lane=1) p2 = players.Human(name='Rick', user=u2, lane=2) b = board.Classic(player_one=p1, player_two=p2) p1.score = 15 b.update_pegs() p2.score = 25 b.update_pegs() b.display_board() p1.score = 35 b.update_pegs() p2.score = 40 b.update_pegs() b.display_board() print(f'Player one: {b.player_one.score}') print(f'Player two: {b.player_two.score}') print(f'lane1 lead: {b.lane1_lead_peg}') print(f'lane1 hind: {b.lane1_hind_peg}') print(f'lane2 lead: {b.lane2_lead_peg}') print(f'lane2 hind: {b.lane2_hind_peg}')
def main(): human = players.Human() computer = players.Computer() while human.score < 10 and computer.score < 10: print('Human: {} ... ' 'Computer: {}'.format(human.score, computer.score)) print('====================================================') showdown(human.user_output(), computer.get_random(), human, computer) if human.score > computer.score: print('Human wins by score ' 'of {} to {}'.format(human.score, computer.score)) else: print('Computer wins by score ' 'of {} to {}'.format(computer.score, human.score))
def __init__(self, master): self.master = master self.game = chessgame.ChessGame() self.history = [] #the history which is shown in the listbox self.gui = gui.Gui(self) self.gui.gameDisplay.game = self.game self.players = [None, None] self.players[const.WHITE] = players.Human(self, const.WHITE) self.players[const.BLACK] = players.AI(self, const.BLACK) self.game.setBoard(mat.TESTBOARD) self.master.bind("<<turn_complete>>", self.turnComplete)
def __init__(self, typePlayer1, typePlayer2, numberOfRounds, verbose=False): self.numberOfRounds = numberOfRounds if typePlayer1 == "ai": self.player1 = players.AI(1) elif typePlayer1 == "human": self.player1 = players.Human(1) elif typePlayer1 == "test": self.player1 = players.Player(1) else: raise ValueError("typePlayer1 not understood") if typePlayer2 == "ai": self.player2 = players.AI(2) elif typePlayer2 == "human": self.player2 = players.Human(2) elif typePlayer2 == "test": self.player2 = players.Player(2) else: raise ValueError("typePlayer2 not understood") self.scores = [0, 0] self.verbose = verbose
def test_discard_human(): d = deck.Deck() d.shuffle() username = '******' email = '*****@*****.**' u = users.User(username=username, email=email) h = players.Human(user=u) hand = [] discards = [] for i in range(6): hand.append(d.deal_one()) h.cards = hand print(f"\n--------- TEST discard() method human -------------\n") print('---hand before discards---') h.display_hand(is_numbered=True) discards = h.discard(2) print('---hand after discards---') h.display_hand(is_numbered=False) h.cards = discards print('---Discard choices---') h.display_hand(is_numbered=False)
def play_in_env(): print("play in environment\n") # Model = load_a_model('models/func_model_duel1b1_dueling_3xconv+2xdenseSMALL4x4_catCros_SGD+extra dense Functmethod1_startstamp1568924178_endtraining__170.00max__115.00avg_-280.00min_1568928710.model') # against drunk actor = load_a_model('models/A2C/1591332024-1591336109_ep3300_actor.model') critic = load_a_model( 'models/A2C/1591332024-1591336109_ep3300_critic.model') # against model above # Model = load_a_model('models/func_model1_3xconv+2xdenseSMALL4x4(func)(mse^Adam^lr=0.001)_startstamp1569850212_episode9800__170.00max__165.40avg__150.00min_1569854021.model') critic.model.hyper_dict = {} actor.model.hyper_dict = {} critic.model.hyper_dict['output_num'] = 1 actor.model.hyper_dict['output_num'] = 7 #p1 = players.DDQNPlayer(Model, enriched_features=True) p1 = players.A2CAgent(actor, critic, 0.99, enriched_features=True) #p1 = players.Human() p2 = players.Human() p2 = players.Selfplay(p1) # p2 = players.DDQNPlayer(Model2) #p1.enriched_features = True #p2.enriched_features = True p1.name = "A2C" p2.name = "selfplay" param = TrainingParameters() # get reward_dict #env = environment(p1, p2, reward_dict=param.reward_dict) env = environment(reward_dict=param.reward_dict) env.add_players(p1, p2) env.env_info() [rew, rew_p1, rew_p2], _ = env.test(render=True, visualize_layers=False) print(f"reward: {rew}") print(f"reward_p1: {rew_p1}") print(f"reward_p2: {rew_p2}") print(env.Winnerinfo())
def play(): random.seed(int(time())) p1 = players.QPlayer([100, 160, 160, 100], learning_batch_size=100, gamma=0.95, tau=0.95, batches_to_q_target_switch=100, memory_size=100000) p1.restore('./models/q.ckpt') p2 = players.Human() for g in range(4): print('STARTING NEW GAME (#{})\n-------------'.format(g)) if g % 2 == 0: game = Game(p1, p2) print("Computer is X (1)") else: game = Game(p2, p1) print("Computer is O (-1)") while not game.game_status()['game_over']: if isinstance(game.active_player(), players.Human): game.print_board() print("{}'s turn:".format(game.current_player)) state = np.copy(game.board) # Force Q-Network to select different starting positions if it plays first action = int( game.active_player().select_cell(state, epsilon=0.0) ) if np.count_nonzero(game.board) > 0 or not isinstance( game.active_player(), players.QPlayer) else random.randint( 0, 8) game.play(action) if not game.game_status()['game_over']: game.next_player() print('-------------\nGAME OVER!') game.print_board() print(game.game_status()) print('-------------') #train()
def add_humans(self): p=[] for i in range(1000): newph=players.Human(1, 1, 1, 1, 0) p.append(newph) return p
def main(): # input board board = [1, 7, 5] game = Game(board) game.play(players.Human("Yu"), players.Human("Long")) print(game.history)