def __init__(self, game='chess', player1=HumanPlayer(), player2=HumanPlayer(), history_enabled=False): self._game = game if self._game == 'checkers': # create board, set up, and initialize game state b = Board(int(BOARD_SIZE), CheckerFactory()) b.set_up() self._game_state = CheckersGameState(b, WHITE, None) elif self._game == 'chess': b = Board(int(BOARD_SIZE), ChessFactory()) b.set_up() self._game_state = ChessGameState(b, WHITE, None) # set up players self._players = {WHITE: player1, BLACK: player2} player1.side = WHITE player2.side = BLACK # set up history if history_enabled: self._history = GameHistory() self.history_enabled = history_enabled
def __init__(self, game_type="CHESS", player1=HumanPlayer(), player2=HumanPlayer(), size=8, history_enabled=False): # create board, set up, and initialize game state if game_type == CHECKERS: b = Board(int(size), CheckerFactory()) else: b = Board(int(size), ChessFactory()) b.set_up() if game_type == CHECKERS: self._game_state = CheckersGameState(b, WHITE, None) else: self._game_state = ChessGameState(b, WHITE, None) # set up players self._players = {WHITE: player1, BLACK: player2} player1.side = WHITE player2.side = BLACK # set up history if history_enabled: self._history = GameHistory() self.history_enabled = history_enabled
def __init__(self, game_type="chess", player1=HumanPlayer(), player2=HumanPlayer(), size=8, history_enabled=False): # create board, set up, and initialize game state if not game_type in ["checkers", "chess"]: raise Exception(f"Invalid argument {game_type}") state_type = ChessGameState if game_type == "chess" else CheckersGameState factory = ChessPieceFactory if game_type == "chess" else CheckerFactory b = Board(int(size), factory()) b.set_up() self._game_state = state_type(b, WHITE, None) # set up players self._players = {WHITE: player1, BLACK: player2} player1.side = WHITE player2.side = BLACK # set up history if history_enabled: self._history = GameHistory() self.history_enabled = history_enabled
def main(args): parser = ap.ArgumentParser( description= "A Game of Tic Tac Toe that gradually learns how to play over time") parser.add_argument("-v", "--verbose", action="store_true", default=False, help="Increase verbosity") parser.add_argument("--gametype", default=PLAYER_VS_PLAYER, choices=[ PLAYER_VS_PLAYER, PLAYER_VS_COMPUTER, PLAYER_VS_LEARNER, LEARNER_VS_COMPUTER, LEARNER_VS_LEARNER ]) args = parser.parse_args(args) if args.gametype in [PLAYER_VS_PLAYER, PLAYER_VS_COMPUTER]: game = TTT(args.verbose) p1 = HumanPlayer(PLAYER_1) if args.gametype == PLAYER_VS_PLAYER: p2 = HumanPlayer(PLAYER_2) else: p2 = Player(PLAYER_2) curr_player = p1 while True: print "\nPlayer " + ("1" if curr_player.id == PLAYER_1 else "2") + ", your move: " move = curr_player.move(game.board) game.move(curr_player.id, move) print game.draw_board() check = game.check_win() if check == PLAYER_1: print "Player 1 wins!" break elif check == PLAYER_2: print "Player 2 wins!" break elif check == CAT_GAME: print "It's a tie!" break curr_player = p1 if curr_player == p2 else p2 else: raise NotImplementedError()
def __init__(self): self.mw = tkinter.Tk( ) #assigning the GUI method Tk to variable self.mw # default image = black hole/empty space self.default_image = tkinter.PhotoImage( file="empty2.gif" ) #all buttons will start with this deafult image when game starts and need to make instance so tkinter does keeps image # red and yellow represent the disc colours self.default_image_r = tkinter.PhotoImage( file="Red2.gif" ) #IMPORTANT TO MAKE INSTANCE OF ALL IMAGES IN __INIT__ METHOD TO PREVENT IMAGES FROM DISAPPAERING self.default_image_y = tkinter.PhotoImage(file="Yellow2.gif") self.size = 7 self.buttons_2d_list = [] for i in range(self.size): self.row = [' '] * self.size self.buttons_2d_list.append( self.row ) #creating a button list in order to configure mechanics of the game nad game GUI self.gboard = GameBoard(6) #gameboard reference print("\tYELLOW or RED") colour = input("Please Select Your Colour(y/r): " ) #giving user option to select colour colour = colour.lower() if colour == "y": p1 = HumanPlayer("Y") #assigning colours to variables p1 and p2 p2 = ComputerPlayer("R", self.buttons_2d_list) opnt = input("\t Do you want to play against a computer(y/n)? ") if opnt == "y": p2 = ComputerPlayer("R", self.buttons_2d_list) else: p2 = HumanPlayer("R") else: p1 = HumanPlayer("R") p2 = ComputerPlayer("Y", self.buttons_2d_list) opnt = input("\t Do you want to play against a computer(y/n)? ") if opnt == "y": p2 = ComputerPlayer("Y", self.buttons_2d_list) else: p2 = HumanPlayer("Y") self.players_lst = (p1, p2) # creating a list of the players self.currnt_player_index = 0 #initilise the current player to zero self.winner = False #initilise winner to false
def __init__(self): self.mw = tkinter.Tk() self._board = [] self.default_image = tkinter.PhotoImage(file="empty2.gif") self.default_image_2 = tkinter.PhotoImage( file="empty3.gif" ) #instead of using the same images, as used in the standard board. #I had to reduce the size of the images in order to fit the window of the larger self.default_image_r = tkinter.PhotoImage( file="Red2.gif") # I created another set of images self.default_image_r_2 = tkinter.PhotoImage(file="Red3.gif") self.default_image_y_2 = tkinter.PhotoImage(file="Yellow3.gif") self.default_image_y = tkinter.PhotoImage(file="Yellow2.gif") self.size = 7 + 2 self.buttons_2d_list = [] for i in range(self.size): self.row = [' '] * self.size self.buttons_2d_list.append(self.row) self.gboard = Large_gameboard(6 + 2) #calling the largest gameboard print("\tYELLOW or RED") colour = input("Please Select Your Colour(y/r): ") colour = colour.lower() if colour == "y": p1 = HumanPlayer("Y") p2 = ComputerPlayer("R", self.buttons_2d_list) opnt = input("\t Do you want to play against a computer(y/n)? ") if opnt == "y": p2 = ComputerPlayer("R", self.buttons_2d_list) else: p2 = HumanPlayer("R") else: p1 = HumanPlayer("R") p2 = ComputerPlayer("Y", self.buttons_2d_list) opnt = input("\t Do you want to play against a computer(y/n)? ") if opnt == "y": p2 = ComputerPlayer("R", self.buttons_2d_list) else: p2 = HumanPlayer("Y") self.players_lst = (p1, p2) self.currnt_player_index = 0 self.winner = False
def human_vs_human_mode(): """ To start a game in which 2 humans play against each other. Parameters: None Returns: string - The value returned by play(). """ game = TicTacToe() x_player = HumanPlayer("X") o_player = HumanPlayer("O") return play(game, x_player, o_player)
def run(): print("WELCOME TO TIC-TAC-TOE!") print("X------------------------------------------------------------------------------------------------X") print("You can choose player 1 or player 2. They can be Human(H), Random Computer Moves (R) or AI (AI). Player 1 uses the X token " "and player 2 uses O. Both players are Human by default.") print("The Human Player is controlled by you. The AI is unbeatable and chooses its own moves. Random chooses any move on the board" " at random.") print("X------------------------------------------------------------------------------------------------X") p1 = input("Choose Player 1 (H/AI/R): ").strip().lower() p2 = input("Choose Player 2 (H/AI/R): ").strip().lower() if p1 == "ai": x_player = AIPlayer("X") elif p1 == "h": x_player = HumanPlayer("X") elif p1 == "r": x_player = RandomPlayer("X") else: x_player = HumanPlayer("X") if p2 == "ai": o_player = AIPlayer("O") elif p2 == "h": o_player = HumanPlayer("O") elif p2 == "r": o_player = RandomPlayer("O") else: o_player = HumanPlayer("O") print("X------------------------------------------------------------------------------------------------X\n") print(f"Player 1 (X) is {p1}") print(f"Player 2 (O) is {p2}") ctd = 5 while ctd: try: sys.stdout.write(f"\rStarting in {ctd}...") sys.stdout.flush() time.sleep(1) ctd -= 1 except KeyboardInterrupt: break print("\n") game = TicTacToe() os.system("clear") play(game, x_player, o_player, print_game=True)
def _kwargs_to_players(**kwargs): """Convert a game type string (e.g., 'computer-human') to actual players.""" player_strings = kwargs['game_type'].split('-') players = [] for idx, string in enumerate(player_strings): if string == 'human': player = HumanPlayer(_id=idx + 1) elif idx == 0 or not kwargs.get('difficulty2'): # If only one difficulty is specified, use it for all computer players. player = ComputerPlayer( _id=idx + 1, itermax=DIFFICULTY_TO_ITERMAX_MAP[kwargs['difficulty']], is_omniscient=(kwargs['difficulty'] == 'insane') ) else: # If multiple difficulties are specified, use the second for the second computer player. player = ComputerPlayer( _id=idx + 1, itermax=DIFFICULTY_TO_ITERMAX_MAP[kwargs['difficulty2']], is_omniscient=(kwargs['difficulty2'] == 'insane') ) players.append(player) return {idx + 1: player for idx, player in enumerate(players)}
def play_rlbot(fname='model_upload/strong_vs_RandomAction', dql='', version='SarsaAgent', pre_witch=0): player1 = HumanPlayer('You') player2, dql = load_rl_bot(fname, dql, version, pre_witch) game = Game.setup([player1, player2], variable_cards, False) game.run() return dql
def __init__(self): """Initializes the game and starts it.""" print('Welcome. Please, choose game mode: ') mode = self._get_mode() player1 = HumanPlayer() if not isinstance(mode, tuple): player2 = HumanPlayer() else: if mode[1] == 'easy': player2 = ComputerBinary() else: player2 = ComputerAdvanced() self._board = Board() print('Field index representation:\n{}'.format( self._board.represent())) self._players = {1: player1, 2: player2} self.current_player = None self._start()
def start(self): player_1_marker = input("Choose mark for first player: ") game_with_ai = input("Do you want play with ai? (y/n): ") if player_1_marker == "X": if game_with_ai == "y": player_1 = HumanPlayer("X", "Player 1") player_2 = ArtificialPlayer("O", self) else: player_1 = HumanPlayer("X", "Player 1") player_2 = HumanPlayer("O", "Player 2") else: if game_with_ai == "y": player_1 = ArtificialPlayer("X", self) player_2 = HumanPlayer("O", "Human player") else: player_1 = HumanPlayer("O", "Player 1") player_2 = HumanPlayer("X", "Player 2") self.add_player(player_1) self.add_player(player_2) self._board.clear() self._graphics.init() self._graphics.draw()
def __init__(self): self.mw = tkinter.Tk() self.default_image = tkinter.PhotoImage(file="empty2.gif") self.default_image_r = tkinter.PhotoImage(file="Red2.gif") self.default_image_y = tkinter.PhotoImage(file="Yellow2.gif") self.size = 7 self.buttons_2d_list = [] for i in range(self.size): self.row = [' '] * self.size self.buttons_2d_list.append(self.row) self.gboard = GameBoard(6) d = input( "\nSelect Difficulty: Easy(e) - Medium (default)(m) - Advanced (a) > " ) # giving the user the option to slect between 3 difficulties d = d.lower() print("\tYELLOW or RED") colour = input( "Please Select Your Colour(y/r) : " ) #no need to ask user if they want to play aginst computer colour = colour.lower() if colour == "y": p1 = HumanPlayer("Y") p2 = ComputerPlayer("R", self.buttons_2d_list) else: p1 = HumanPlayer("R") p2 = ComputerPlayer("Y", self.buttons_2d_list) self.difficulty = d #assigning the input given to the variable 'd' to self.difficulty self.players_lst = (p1, p2) self.currnt_player_index = 0 self.winner = False
def play_with_ai(depth: int = 6) -> None: """A function that runs a visualized game of Connect4 between AIPlayerComplex and HumanPlayer. depth is the number of moves the AI will look ahead to calculate the best move to be made. Higher depth will result in a smarter AI that makes better moves and is harder to beat. But, if depth is too high, the AI will take too long to play the move. The function defaults the depth to 6, and that is recommended. depth of 7 is also playable, but note that the AI will take approximately 40s per move, especially in the beginning stages of the game. """ red = AIPlayerComplex(depth=depth) yellow = HumanPlayer() run_game_visualized(red, yellow)
class TestHumanPlayer(unittest.TestCase): def setUp(self): #self.blackjack_game = BlackjackGame() self.human_player = HumanPlayer(None, 100) #self.human_player = self.blackjack_game.player def test_matrix_values(self): print self.human_player.fg_values_matrix self.assertEqual(1, 1) def test_obtaining_maximum_from_vector(self): vector = {4: 'stand', 5: 'continue', 8: 'double bet', 1: 'split'} max = self.human_player.calculate_maximum_from_vector(vector) self.assertEqual(8, max)
def get_players(name_1=None, name_2=None): """ Create the two players as Player instances. Players are either HumanPlayer or ComputerPlayer depending on their name """ if name_1 is None: name_1 = input( "Please enter the name of player 1. For a computer player, enter cpu: " ) if name_1 == "cpu": player_1 = ComputerPlayer(0, name_1, "X") else: player_1 = HumanPlayer(0, name_1, "X") if name_2 is None: name_2 = input( "Please enter the name of player 2. For a computer player, enter cpu: " ) if name_2 == "cpu": player_2 = ComputerPlayer(1, name_2, "O") else: player_2 = HumanPlayer(1, name_2, "O") return [player_1, player_2]
def _play_game() -> None: """ Play a round of tic tac toe """ turn_num = 0 game_board = [[None, None, None], [None, None, None], [None, None, None]] ai_first_inpt = input("Enter if AI is playing first (Y/N): ").upper() while ai_first_inpt != "Y" and ai_first_inpt != "N": print("Invalid Input! Try again") ai_first_inpt = input("Enter if AI is playing first (Y/N): ").upper() if ai_first_inpt == "Y": ai_first = True else: ai_first = False if ai_first: first_plyr = TicTacToeAI("X") scnd_plyr = HumanPlayer("O") else: first_plyr = HumanPlayer("X") scnd_plyr = TicTacToeAI("O") while not game_over(game_board): if turn_num % 2 == 0: action = first_plyr.play_move(game_board) if action is not None: i, j = action game_board[i][j] = first_plyr.get_player_piece() else: action = scnd_plyr.play_move(game_board) if action is not None: i, j = action game_board[i][j] = scnd_plyr.get_player_piece() turn_num += 1 print("===== GAME OVER =====") winnr = winner(game_board) if first_plyr.get_player_piece() == winnr: print(first_plyr.get_child_class_name() + " wins!") elif scnd_plyr.get_player_piece() == winnr: print(scnd_plyr.get_child_class_name() + " wins!") else: print("DRAW! Nobody wins")
def human_vs_computer_mode(difficulty): """ To start the game in which a human plays against the computer. Paramters: difficulty(int) - 0 if the level of difficulty should be easy, and 1 if the level should be hard. Returns: string - The value returned by the play() method. """ game = TicTacToe() x_player = HumanPlayer("X") o_player = ComputerPlayer("O") if difficulty == 0 else ComputerPlayer( "O", smart=True) return play(game, x_player, o_player)
def set_players(self, line_info, is_human=True): player_type = 'human' if is_human else 'computer' info_items = line_info.split('|') # First item is number of players, remaining items are player names num_players = int(info_items[0].strip()) i = 1 try: while i < num_players + 1: if info_items[i]: player_name = info_items[i].strip() if is_human: self.players.append(HumanPlayer(player_name)) else: self.players.append(ComputerPlayer(player_name)) # Track player color for visualization if self.COLOR_COUNTER >= len(self.COLORS): raise Exception( 'too many {} players have been declared'.format( player_type)) self.player_colors[player_name] = self.COLORS[ self.COLOR_COUNTER] self.COLOR_COUNTER += 1 i += 1 else: raise Exception( 'a {} player with no name has been declared'.format( player_type)) except IndexError: # Number of players indicated > number of names given raise Exception( '{} {} players were declared but only {} names were provided'. format( num_players, player_type, i - 1, )) if i < len(info_items): # Number of players indicated < number of names given raise Exception( 'only {} {} players were declared but {} names were provided'. format( num_players, player_type, len(info_items) - 1, ))
def __init__(self, game_number): self.game_number = game_number logging.debug("Initializing map for game %d", game_number) self.map: Map = Map(Args.instance().args().n_turns) self.map.load_from_file(Args.instance().args().map) self.player: Dict[str, RandomPlayer] = dict() for player_name in self.map.players.keys(): logging.debug("Initializing player %s for game %s as %s", player_name, game_number, Args.instance().args().player) if Args.instance().args().player == "random": self.player[player_name] = RandomPlayer(player_name) elif Args.instance().args().player == "human": self.player[player_name] = HumanPlayer(player_name) elif Args.instance().args().player == "qlearn": self.player[player_name] = QlearnPlayer(player_name)
def game(): game = Board() human_player = HumanPlayer(game) ia_player = IAPlayer(game) player = ia_player turn_number = 0 while True: try: print("Board:") print(game.board) print("Score: {}".format(player.score)) print("Turn: {}".format(turn_number)) player.turn() game.add_number_in_board() turn_number += 1 except NoMovesPossibleException as e: print("Sorry, you lose") print("Nb of turn : {}".format(turn_number)) print("Final Score : {}".format(player.score)) break
class BlackjackGame: deck_of_cards = None dealer = None player = None active = None #This indicates if the game is active or not current_player_bet = None def __init__(self): self.deck_of_cards = DeckOfCards() self.dealer = Dealer(self) self.player = HumanPlayer(self, 10000) #The human player starts with 10000 coins def start_game(self, training_repetitions, real_games_repetitions): self.player.victories = 0 self.dealer.victories = 0 self.player.coins = 10000 self.active = True # As the game begins, we set this flag the True value training_flag = True #It's time to train! training_repetitions = training_repetitions #this number can be changed for x in range(0, training_repetitions): print 'Training hand #' + str(x) + '\n' self.begin_hand(training_flag) self.deck_of_cards.restart_deck_of_cards() training_flag = False #I'm tired of training, I want to play seriously!! print 'END OF TRAINING!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!' self.player.print_victories() self.dealer.print_victories() #On the while condition we could ask if the player wants to keep playing, #But here we prefer the automated player to play a fixed set of hands, let's say, 250 real_hands_to_play = real_games_repetitions i = 0 while (self.not_ended() and i < real_hands_to_play): i+=1 print '--------------------------------------------------' print '\n\nReal hand #' + str(i) + '\n' self.begin_hand(training_flag) self.deck_of_cards.restart_deck_of_cards() self.player.print_victories() self.dealer.print_victories() print 'Initial coins: ' + '10000' #it starts with 10000 coins print 'Coins (after the game): ' + str(self.player.coins) def not_ended(self): return self.active def clean_hands(self): self.player.clean_hand() self.dealer.clean_hand() def get_deck(self): return self.deck_of_cards def begin_hand(self, training_flag): self.current_player_bet = self.player.bet(training_flag) self.clean_hands() #Makes sure both the dealer and the player have empty hands print '\nNEW ROUND:' print '\n Dealer Hand:' self.dealer.get_card(self.deck_of_cards.give_a_card()) self.dealer.print_hand() print '\n Human Hand:' self.player.get_card(self.deck_of_cards.give_a_card()) self.player.get_card(self.deck_of_cards.give_a_card()) self.player.print_hand() dealer_original_value = self.dealer.calculate_value() move = self.player.make_move(dealer_original_value, training_flag) if (move == 'split'): self.split_hand(training_flag) else: player_value = self.player.calculate_value() self.dealer.make_move(player_value) self.compute_and_print_hand_results(self.current_player_bet, player_value, training_flag) #This should be refactored def compute_and_print_hand_results(self, bet, player_value, training_flag): if player_value == 21: print 'BlackJack! You win' print '-------------------------------------------------' if training_flag and (len(self.player.temp_state_action) > 0): self.player.update_fg_values('win') elif not training_flag: self.player.compute_victory() self.player.get_prize(1.5 * 2 * self.current_player_bet) result = 'win' elif (self.player.calculate_value() == self.dealer.calculate_value()): if not training_flag: self.player.get_prize(self.current_player_bet) print "It's a tie! Your bet is refunded" return 'tie' elif player_value > 21: print ' \nThe Dealer WINS! (Human got over 21)' print '-------------------------------------------------' if training_flag: self.player.update_fg_values('lose') else: self.dealer.compute_victory() self.player.get_prize(self.current_player_bet) result = 'lose' self.player.restart_temp_state_action() elif self.dealer.calculate_value() > 21: print '\nHuman Player WINS! (Dealer got over 21)' print '-------------------------------------------------' if training_flag: self.player.update_fg_values('win') else: self.player.compute_victory() self.player.get_prize(2 * bet) result = 'win' self.player.restart_temp_state_action() elif (21 - player_value) < (21 - self.dealer.calculate_value()): print "\nHuman Player WINS! (Has a better score)" print '-------------------------------------------------' if training_flag: self.player.update_fg_values('win') else: self.player.compute_victory() self.player.get_prize(2 * bet) result = 'win' self.player.restart_temp_state_action() elif (21 - player_value) > (21 - self.dealer.calculate_value()): print "\nThe Dealer WINS! (Has a better score)" print '-------------------------------------------------' if training_flag: self.player.update_fg_values('lose') else: self.dealer.compute_victory() result = 'lose' self.player.restart_temp_state_action() def split_hand(self, training_flag): #If the player chooses to split, then two 'sub-hands' are played #instead of one. Each hand with one of the cards, and each hand #with the same bet. Obviously, if the player chooses to split, he #must bet again the same quantity. player_initial_hand = copy.deepcopy(self.player.hand) dealer_hand = self.dealer.hand card = self.player.hand.cards.pop() print 'SPLIT!\n' print '----Split hand 1\n' self.begin_one_split_hand(training_flag, self.player.hand, dealer_hand) player_value_a = self.player.calculate_value() aux_temp_state_action_a = copy.deepcopy(self.player.temp_state_action) print aux_temp_state_action_a print '----Split hand 2\n' self.player.restart_temp_state_action() self.player.temp_state_action.append(((player_initial_hand.calculate_status(),dealer_hand.calculate_value()), 'split')) self.player.hand.clean() self.player.hand.add_card(card) self.begin_one_split_hand(training_flag, self.player.hand, dealer_hand) self.dealer.make_move(0) #0 because it play with 2 hands at the same time player_value_b = self.player.calculate_value() #hand b print "Hand 2:" self.compute_and_print_hand_results(self.current_player_bet, player_value_a, training_flag) #hand a print "Hand 1" self.player.temp_state_action = aux_temp_state_action_a self.compute_and_print_hand_results(self.current_player_bet, player_value_b,training_flag) def begin_one_split_hand(self, training_flag, player_hand, dealer_hand): dealer_original_value = dealer_hand.calculate_value() self.player.hand = player_hand self.player.make_move(dealer_original_value, training_flag)
def __init__(self): self.deck_of_cards = DeckOfCards() self.dealer = Dealer(self) self.player = HumanPlayer(self, 10000) #The human player starts with 10000 coins
glEnable(GL_DEPTH_TEST) glEnable(GL_NORMALIZE) glEnable(GL_LIGHTING) #create the game world object gameWorld = GameWorld() #add ball to game world ball = Ball(gameWorld) gameWorld.addObject(ball) #add bounds to game world gameWorld.addObject(objects.BoundingBox(gameWorld, 20, 20)) gameWorld.addObject(objects.Field()) #add players to game world player = HumanPlayer(gameWorld, (-5.0, 1.1, 5.0)) player.camera = gameWorld.camera gameWorld.addObject(player) gameWorld.addObject(AiPlayer(gameWorld, ball.body, (-5.0, 1.1, -5.0))) gameWorld.addObject(AiPlayer(gameWorld, ball.body, (5.0, 1.1, -5.0))) gameWorld.addObject(AiPlayer(gameWorld, ball.body, (5.0, 1.1, 5.0))) glClearColor(0.5, 0.7, 0.9, 1.0) #create game menu object gameMenu = GameMenu() #add the background objects to menu gameMenu.addObject(objects.Field()) gameMenu.addObject(objects.BoundingBox(gameMenu, 10.0, 10)) crazyCubes = [objects.Cube(gameMenu, (-5.0, 1.001, -5.0), 200, (2.0, 2.0, 2.0), (1,1,0)), objects.Cube(gameMenu, (-5.0, 3.002, -5.0), 200, (2.0, 2.0, 2.0), (0,0,1)),
input_depth=3, num_layers=6, num_filters=[64, 64, 128, 128, 256, 256], dropout_rate=0.0, ) value_network.load('models/VALUE_NETWORK/VALUE_NETWORK') value_player = ValuePlayer(value_network) mcts_player = MCTSPlayer(num_simulations=1000) alpha_player = AlphaPlayer(policy_network, value_network, num_simulations=1000) player1 = alpha_player player2 = HumanPlayer() simulator = OthelloSimulator(player1, player2) num_games = 1 win_count = 0 black_scores = [] start_time = int(round(time.time() * 1000)) print('Running {0} Game Simulations...'.format(num_games)) for i in range(num_games): scores, play_by_play = simulator.simulate_game(record=False, verbose=True) print(scores) black_scores.append(scores[OthelloConstants.BLACK]) if scores[OthelloConstants.BLACK] > scores[OthelloConstants.WHITE]: win_count += 1
curr_move = (-1, -1) try: curr_move = self.__active_player__.move( self, legal_player_moves, time_left) except Exception as e: print(e) pass # check time limit remaining if time_left() <= 0: print("%s ran out of time. %s wins." % (str( self.__active_player__), str(self.__inactive_player__))) return self.__inactive_player__, self, "%s ran out of time" % str( self.__active_player__) # check if move is considered a legal move if not curr_move in legal_player_moves: print("Illegal move at %d,%d." % curr_move) print("Player %s wins." % str(self.__player_symbols__[self.__inactive_player__])) return self.__inactive_player__, self, "%s had no valid move" % str( self.__active_player__) self.__apply_move__(curr_move) # initilizer for starting game if __name__ == '__main__': print("Starting game:") from players import RandomPlayer from players import HumanPlayer board = Board(RandomPlayer(), HumanPlayer()) board.play_isolation()
from games import TicTacToe from brain import Brain from players import PolicyGradientPlayer, HumanPlayer, RandomPlayer from brain.activation_functions import ReLU, Softmax human = HumanPlayer() DISCOUNT_FACTOR = 0.6 REWARD_FACTOR = 2 EXPERIENCE_BATCH_SIZE = 512 BATCH_ITERATIONS = 128 EXPERIENCE_BUFFER_SIZE = 2 ** 15 LEARNING_RATE = 0.0005 REGULARIZATION = 0.1 BRAIN_TOPOLOGY = ( (18, None), (512, ReLU), (9, Softmax), ) robot_brain = Brain(BRAIN_TOPOLOGY, learning_rate=LEARNING_RATE, regularization=REGULARIZATION) robot = PolicyGradientPlayer( robot_brain, discount_factor=DISCOUNT_FACTOR, reward_factor=REWARD_FACTOR, batch_iterations=BATCH_ITERATIONS, experience_batch_size=EXPERIENCE_BATCH_SIZE, experience_buffer_size=EXPERIENCE_BUFFER_SIZE, )
def bot_vs_human_loop(): b = Board() p1 = OtherNaivePlayer(0, b) p2 = HumanPlayer(1, b) end = False lock = 0 c = 0 b.draw_board() while not end: c += 1 print('\n========== Turn', c) print('\n########## Player 1 turn') replay = True while replay: replay = False dices = roll_dices() print('Here is a', dices, '!') if dices > 0: pawn = p1.play(dices) if pawn != -1: lock = 0 res = b.play(p1.me, pawn, dices) if res == Moves.WIN: print('Player 1 Won') end = True elif res == Moves.REPLAY: replay = True b.draw_board() else: lock += 1 print('Player 1 cannot play.') else: print('Player 1 do nothing.') if not end: print('\n########## Player 2 turn') replay = True while replay: replay = False dices = roll_dices() print('Here is a', dices, '!') if dices > 0: pawn = p2.play(dices) if pawn != -1: lock = 0 res = b.play(p2.me, pawn, dices) if res == Moves.WIN: print('Player 2 Won') end = True elif res == Moves.REPLAY: replay = True b.draw_board() else: lock += 1 print('You cannot do anything.') else: print('You do nothin.') if lock >= 10: end = True print('This was a deadlock !')
def human_game(p=[]): if p == []: p = SmithyBot() ready_player_one = HumanPlayer('You') game = Game.setup([p, ready_player_one], variable_cards, False) return game.run()
def _set_players(self): self._players = HumanPlayer("First", FIRST_PLAYER), HumanPlayer( "Second", SECOND_PLAYER)
def game_loop(x_player, o_player, reward): game = GameLogic() players = cycle([(x_player, game.putX), (o_player, game.putO)]) player, make_action = next(players) while not game.has_ended: if player.make_move(game, make_action): player, make_action = next(players) if reward: x_player.reward(REWARDS_X[game.winner]) o_player.reward(REWARDS_O[game.winner]) x_player.reset() o_player.reset() return game def learn(x_player, o_player): return game_loop(x_player, o_player, reward=True) def play(x_player, o_player): return game_loop(x_player, o_player, reward=False) if __name__ == '__main__': from players import RandomPlayer, HumanPlayer game = play(RandomPlayer(), HumanPlayer()) print('Winner:', game.winner)