def runGame(p1, p2):
    p1.resetForNewGame()
    p2.resetForNewGame()
    d1 = Dealer(5)
    p1.cards = d1.dealCards()[0]
    p2.cards = d1.dealCards()[0]
    gameState = GameState(p1, p2)
    aqlearnScore = 0
    gameLoop = True
    while gameLoop:
        p1.pickCard()
        p2.pickCard()

        roundWinner = gameState.judgeRound(p1, p2)

        #? generating successor
        if roundWinner == p1:
            p1.accumulatedCards[p1.playedCard[1]] += 1
        if roundWinner == p2:
            p2.accumulatedCards[p2.playedCard[1]] += 1

        if gameState.judgeGameOver(p1, p2) == p1:
            return p1.name
            
        elif gameState.judgeGameOver(p1, p2) == p2:
            return p2.name

        p1.cards.append(d1.generateRandomCard())
        p2.cards.append(d1.generateRandomCard())
Пример #2
0
def main():
    #初始化pygame
    pygame.init()
    #创建屏幕对象
    #设置分辨率
    screen = pygame.display.set_mode((SCREEN_WIDTH, SCREEN_HEIGHT))
    #设置窗口
    pygame.display.set_caption("俄罗斯方块")
    #让按键看起来更连续一些
    pygame.key.set_repeat(10, 100)
    #背景颜色RGB
    bg_color = BACKGROUND_COLOR
    random.seed(int(time.time()))
    game_state = GameState(screen)
    game_resource = GameResource()
    #游戏主循环
    while True:
        #判断方块是否落在最底部
        if game_state.piece and game_state.piece.is_on_bottom:
            game_state.wall.add_to_wall(game_state.piece)
            game_state.add_score(game_state.wall.eliminate_lines())
            game_state.piece = Piece(random.choice(PIECE_TYPES), screen,
                                     game_state.wall)

        check_events(game_state)
        #设置屏幕背景颜色
        screen.fill(bg_color)
        #绘制游戏区域,网格线和墙体
        GameDisplay.draw_game_area(screen, game_state, game_resource)

        if game_state.piece:
            game_state.piece.paint()
        #刷新屏幕
        pygame.display.flip()
Пример #3
0
def runGame(searchAlgo, initialGameState):

    # Create a current state object and initialize it for given input
    currentState = GameState()
    currentState.initializeGame(initialGameState)
    printPuzzle(currentState)  # To view whats going on

    # Send the initial state to the SearchAgent along with searchType
    searchType = SearchAgent(searchAlgo)
    searchType.registerInitialState(currentState)
    searchType.generateOutFile()
Пример #4
0
    def __init__(self):
        '# Initialize game window and settings etc.'
        self.screen = pygame.display.set_mode((1000, 1000))

        pygame.display.set_caption("Asteroids")
        pygame.font.init()
        self.myfont = pygame.font.SysFont('Comic Sans MS', 30)
        self.debug_text_surface = self.myfont.render('Hello', False,
                                                     (255, 255, 255))

        self.render_pace: float = 1 / 60
        self.game_active = True
        asteroid_list = []
        bullet_list = []

        ship = Ship(self.screen, Point(400, 500), Point(0, 0), -math.pi / 2, 1)
        enemy_ship = Ship(self.screen, Point(600, 500), Point(0, 0),
                          -math.pi / 2, 1)
        self.game_state = GameState(ship, enemy_ship, bullet_list,
                                    asteroid_list)

        for count in range(0, 8):
            asteroids = Asteroid(self.screen,
                                 Point(randint(0, 900), randint(0, 900)),
                                 Point(randint(-20, 20), randint(-20, 20)),
                                 randint(120, 170))
            list.append(asteroid_list, asteroids)

        self.movement_manager = MovementManager(self.render_pace, 1000, 1000)
        self.collision_manager = CollisionManager()
Пример #5
0
 def turn(self):
     activeAgent = self.activeFighter
     options = activeAgent.getAttackOptions()
     decision = self.controller.decide(options)
     effect = activeAgent.getAttackEffects(decision)
     receptiveAgent = GameState.getEnemyActive(self.name)
     receptiveAgent.receiveEffects(effect)
Пример #6
0
    def calculate_movement(self, game_state: GameState) -> GameState:
        """Calculates movement for all game objects in the game state"""
        '# Calculate ships movements'
        game_state.my_ship = self.calculate_ship_movement(game_state.my_ship)
        game_state.enemy_ship = self.calculate_ship_movement(
            game_state.enemy_ship)

        '# Calculate movement for asteroids and for bullets'
        for i in game_state.asteroids:
            i.pos += i.pos_delta * self.renderPace
            i.pos = self.calculate_wrap(i.pos)
        for i in game_state.bullets:
            i.pos += i.pos_delta * self.renderPace
            i.pos = self.calculate_wrap(i.pos)

        return game_state
Пример #7
0
def _check_winner(state: GameState) -> bool:
    return _all_same(state.get_first_column())       or \
           _all_same(state.get_first_row())          or \
           _all_same(state.get_second_column())      or \
           _all_same(state.get_second_row())         or \
           _all_same(state.get_third_column())       or \
           _all_same(state.get_third_row())          or \
           _all_same(state.get_top_left_diagonal())  or \
           _all_same(state.get_top_right_diagonal())
Пример #8
0
    def run(self,
            num_plays=PLAYS_PER_EVAL,
            search_iters=EVAL_PLAY_SEARCH_ITERS,
            markov_exp=EVAL_PLAY_MARKOV_EXP):
        '''Starts an evaluation. The evaluation process is synchronized 
        with the self-play processes and evaluation processes via instances of  
        UpdateSignal and AsyncSignal, respectively, in the main script: 
        asyn_training.py. The UpdateSignal triggers an update in each of the 
        self-play processes if the total number of apprentice wins to total 
        evaluation games surpasses the declared win ratio while the AsyncSignal
        triggers the evaluation processes.

        Parameters
        ----------
        num_plays : int (positive), optional
            The numnber of evaluation games to play. The default is 
            PLAYS_PER_EVAL.
        search_iters : int (positive), optional
            the number of search iterations to perform during MCTS. 
            The default is EVAL_PLAY_SEARCH_ITERS.
        markov_exp : float, optional
            The exponent determining the number of steps taken in 
            the markov chain in generating games for evaluation.

        Returns
        -------
        apprentice_wins : int (nonegative)
            the number of apprentice wins.

        '''
        # put models in eval mode...
        self.alpha_agent.model.eval()
        self.apprentice_agent.model.eval()
        # setup gameplay
        alpha = 0
        apprentice = 1
        actions = np.arange(MAX_NODES)
        state_generator = GameState.state_generator(markov_exp)
        apprentice_wins = 0
        # start evaluation game play
        for i in range(num_plays):
            # uniformly randomly choose which agent plays first
            next_move = np.random.choice([alpha, apprentice])
            # play a randomly generated game of upset-downset
            game_state = next(state_generator)
            while not game_state.is_terminal_state():
                root = PUCTNode(game_state)
                policy = self.alpha_agent.MCTS(root, search_iters, 0) \
                    if next_move == alpha \
                        else self.apprentice_agent.MCTS(root, search_iters, 0)
                move = np.random.choice(actions, p=policy)
                game_state = root.edges[move].state
                next_move = 1 - next_move
            # decide winner
            winner = 1 - next_move
            if winner == apprentice:
                apprentice_wins += 1

        return apprentice_wins
Пример #9
0
def check_winner(state: GameState) -> int or None:
    global winner
    if _check_winner(state):
        return winner
    elif len(state.get_possible_moves()) == 0:
        winner = TIE
        return TIE
    return NO_PLAYER
Пример #10
0
class Controller:
    def __init__(self):
        self.board = Board()
        self.game_state = GameState()
        self.selected_square = [0, 0]
        self.possible_moves = []
        self.white_turn = True

    def select_square(self, square):
        self.selected_square = square
        self.game_state.select_piece(self.selected_square)
        self.possible_moves = self.game_state.get_moves(self.white_turn)

    def handle_mouseclick(self):
        mouse_state = pygame.mouse.get_pressed()
        if mouse_state[0] == True:
            mouse_pos = pygame.mouse.get_pos()
            pressed_square = self.board.get_square(mouse_pos)
            if self.is_possible_move(pressed_square):
                self.game_state.move_selected_piece(pressed_square)
                self.white_turn = not self.white_turn
            self.select_square(pressed_square)

    def is_possible_move(self, square):
        if self.possible_moves == None:
            return False
        else:
            for move in self.possible_moves:
                if square[0] == move[0] and square[1] == move[1]:
                    return True
            return False

    def draw(self, surface):
        self.board.draw(surface, self.selected_square, self.possible_moves)
        self.game_state.draw_pieces(surface)
Пример #11
0
class Game:
    def __init__(self):
        pygame.display.set_caption("My Game")
        self.FPS = 60
        self.WIDTH = 900
        self.HEIGHT = 500
        self.WIN = pygame.display.set_mode((self.WIDTH, self.HEIGHT))
        self.BLACK = (0, 0, 0)
        self.WHITE = (255, 255, 255)
        self.dest = (100, 100)
        self.handler = Handler(self)
        # should be before any other object that have handler in its constr.
        # Handler(self) current instance in the current class
        self.assets = Assets(self.handler)

        self.gameState = GameState(self.handler)
        self.gameState.init()
        self.menuState = MenuState(self.handler)
        self.menuState.init()
        self.currentState = self.menuState

        # self.ssTile = SsTiles('spritesheet.png')
        # self.map = TileMap('test_level.csv', self.ssTile)
        # self.bg = pygame.transform.rotate(self.bg, -90)

        # draw everything here:
    def draw_window(self):
        self.WIN.fill(self.BLACK)
        self.currentState.draw()

        # end of drawing
        pygame.display.flip()

    def tick(self):
        self.currentState.tick()

    def run(self):
        clock = pygame.time.Clock()
        running = True
        while running:
            clock.tick(self.FPS)
            self.handler.inputManager.tick()
            self.tick()
            self.draw_window()
Пример #12
0
    def __init__(self, verbose, agentType, nHands, startingMoney, nTraining):
        """
        Initialize the game! Create dealer and player objects and an initial gameState
        input: verbose
            whether or not to print each step as agents play
        input: agentType
            string representing the type of agent to instantiate
        input: nHands
            number of hands to play at max if agent never busts
        input: startingMoney
            the amount of money the agent gets to start with
        input: nTraining
            the number of training hands to do for a qlearning player
        returns: nothing
        """
        self.verbose = verbose
        self.agentType = agentType
        self.nHands = int(nHands) + int(nTraining)
        self.nStartingHands = int(nHands) + int(nTraining)
        print("{} test {} train {} total".format(nHands, nTraining,
                                                 self.nHands))
        self.startingMoney = startingMoney
        self.nTraining = int(nTraining)
        self.dealer = Dealer()
        self.player = self.createAgent(self.agentType, self.startingMoney,
                                       nTraining)

        self.agents = [self.player, self.dealer]

        # Clean slate
        dealerHand = Hand()
        playerHand = Hand()
        deck = Deck()

        # list because player can split
        playerHands = [playerHand]
        if self.player:
            initialBets = [self.player.getBetAmt()]
            # Create initial game state
            self.gameState = GameState(verbose, self.dealer, dealerHand,
                                       self.player, playerHands, deck,
                                       initialBets)

        self.q = self.agentType == 'qlearning'
Пример #13
0
    def __init__(self):
        pygame.display.set_caption("My Game")
        self.FPS = 60
        self.WIDTH = 900
        self.HEIGHT = 500
        self.WIN = pygame.display.set_mode((self.WIDTH, self.HEIGHT))
        self.BLACK = (0, 0, 0)
        self.WHITE = (255, 255, 255)
        self.dest = (100, 100)
        self.handler = Handler(self)
        # should be before any other object that have handler in its constr.
        # Handler(self) current instance in the current class
        self.assets = Assets(self.handler)

        self.gameState = GameState(self.handler)
        self.gameState.init()
        self.menuState = MenuState(self.handler)
        self.menuState.init()
        self.currentState = self.menuState
Пример #14
0
    def execute_one_game(self, pac_individual, ghost_individual):
        """
        Execute one game / eval of a run given a Pac individual and
        Ghost individual selected from their respective populations.
        """
        # Pick a new map and set up a new game state.
        game_map = self.experiment.pre_loaded_maps[random.randint(0, 99)]
        self.experiment.world_data = []
        game_state = GameState(game_map, self.experiment.pill_density,
                               self.experiment.time_multiplier,
                               self.experiment.fruit_spawning_probability,
                               self.experiment.fruit_score,
                               self.experiment.num_pacs,
                               self.experiment.num_ghosts)
        game_state.write_world_config(self.experiment.world_data)
        game_state.write_world_time_score(self.experiment.world_data)

        # Create new Pac and Ghost controllers
        for curr_pac_id in range(self.experiment.num_pacs):
            self.pac_controllers[curr_pac_id] = PacController(
                curr_pac_id, pac_individual)
        for curr_ghost_id in range(self.experiment.num_ghosts):
            self.ghost_controllers[curr_ghost_id] = GhostController(
                curr_ghost_id, ghost_individual)

        # While the game isn't over, play game turns.
        game_over = False
        while (not game_over):
            game_over = game_state.play_turn(self.experiment.world_data,
                                             self.pac_controllers,
                                             self.ghost_controllers)

        # Set Pac fitness and implement parsimony pressure
        pac_individual.fitness = game_state.score
        if (self.pac_pop.parsimony_technique == 'size'):
            pac_individual.fitness -= (self.pac_pop.pppc *
                                       pac_individual.root.size)
        else:
            pac_individual.fitness -= (self.pac_pop.pppc *
                                       pac_individual.root.height)

        # Set Ghost fitness and implement parsimony pressure
        ghost_individual.fitness = -(game_state.score)
        if (game_state.ghost_won):
            ghost_individual.fitness += int(
                (game_state.time * 100.0) / game_state.orig_time)
        if (self.ghost_pop.parsimony_technique == 'size'):
            ghost_individual.fitness -= (self.ghost_pop.pppc *
                                         ghost_individual.root.size)
        else:
            ghost_individual.fitness -= (self.ghost_pop.pppc *
                                         ghost_individual.root.height)

        # Set Pac and Ghost scores
        pac_individual.score = game_state.score  # Score is raw game score without parsimony pressure for Pac
        ghost_individual.score = ghost_individual.fitness  # Score and fitness interchangeable for Ghost
Пример #15
0
 def __init__(self, boardDim):
     self.gameOffset = Point()
     self.boardOffset = Point()
     
     self.gemImgs = {}
     
     self.gameState = GameState(boardDim)
     
     self.calibrate()
     
     self.submitImg = bitmap.Bitmap.open(SUBMIT_IMAGE_PATH)
     self.replayImg = bitmap.Bitmap.open(REPLAY_IMAGE_PATH)
Пример #16
0
class Game:
    def __init__(self):
        pygame.display.set_caption("AREA CONQUEST")
        self.FPS = 60
        self.WIDTH = 900
        self.HEIGHT = 500
        self.WIN = pygame.display.set_mode((self.WIDTH, self.HEIGHT))
        self.BLACK = (0, 0, 0)
        self.WHITE = (255, 255, 255)
        self.dest = (100, 100)
        self.handler = Handler(self)
        # should be before any other object that have handler in its constr.
        # Handler(self) current instance in the current class

        self.assets = Assets(self.handler)
        self.menuState = MenuState(self.handler)
        self.menuState.init()
        self.gameState = GameState(self.handler)
        self.gameState.init()
        self.currentState = self.menuState

        # draw everything here:
    def draw_window(self):
        self.WIN.fill(self.BLACK)
        self.currentState.draw()

        # end of drawing
        pygame.display.flip()

    def tick(self):
        self.currentState.tick()

    def run(self):
        clock = pygame.time.Clock()
        running = True
        while running:
            clock.tick(self.FPS)
            self.handler.inputManager.tick()
            self.tick()
            self.draw_window()
Пример #17
0
    def newGame(self,
                layout,
                pacmanType,
                ghostType,
                display,
                agentOpts,
                quiet=False,
                catchExceptions=False):
        # TODO done
        # agents = [pacmanAgent] + ghostAgents[:layout.getNumGhosts()]
        agents = []
        # agents = pacmanAgent[:layout.numPacman] + \
        #     ghostAgents[:layout.getNumGhosts()]
        initState = GameState()
        initState.initialize(layout)

        for index, agentState in enumerate(initState.data.agentStates):
            if agentState.isPacman:
                agents.append(pacmanType(agentOpts, index, agentState))
            else:
                agents.append(ghostType(agentOpts, index, agentState))

        game = Game(agents, display, self, catchExceptions=catchExceptions)
        game.state = initState
        initState.game = game

        self.initialState = initState.deepCopy()

        self.quiet = quiet
        return game
    def execute_one_game(self, weights):
        """
        Execute one game / eval of a run.

        Initialize the Pac controller with the given weights.

        Return score.
        """
        # Pick a new map and set up a new game state.
        game_map = self.experiment.pre_loaded_maps[random.randint(0, 99)]
        self.experiment.world_data = []
        game_state = GameState(game_map, self.experiment.pill_density,
                               self.experiment.time_multiplier,
                               self.experiment.fruit_spawning_probability,
                               self.experiment.fruit_score,
                               self.experiment.num_pacs,
                               self.experiment.num_ghosts)
        game_state.write_world_config(self.experiment.world_data)
        game_state.write_world_time_score(self.experiment.world_data)

        # Create a new Pac controller with a hard-coded expression tree
        # to add a weighted sum of G, P, W, and F.
        g_node = Node(expr='*',
                      left=Node('constant', constant=weights[0]),
                      right=Node('G'))
        p_node = Node(expr='*',
                      left=Node('constant', constant=weights[1]),
                      right=Node('P'))
        w_node = Node(expr='*',
                      left=Node('constant', constant=weights[2]),
                      right=Node('W'))
        f_node = Node(expr='*',
                      left=Node('constant', constant=weights[3]),
                      right=Node('F'))
        add1_node = Node(expr='+', left=g_node, right=p_node)
        add2_node = Node(expr='+', left=w_node, right=f_node)
        root_node = Node(expr='+', left=add1_node, right=add2_node)
        self.pac_controllers[0] = PacController(0, ExprTree(root_node))

        # While the game isn't over, play game turns.
        game_over = False
        while (not game_over):
            game_over = game_state.play_turn(self.experiment.world_data,
                                             self.pac_controllers,
                                             self.ghost_controllers)

        return game_state.score
Пример #19
0
    def testExtration(self):
        featureExt = FeatureExtractor()
        agent = Player("aql agent")
        enemy = Player("greedy agent")
        gameState = GameState(agent, enemy)
        enemy.accumulatedCards["Water"] += 1
        enemy.accumulatedCards["Fire"] += 1
        features = featureExt.getFeatures(gameState, "action", agent.name)
        self.assertEqual(features["enemy-distance-to-closest-win"], 1)
        self.assertEqual(features["agent-distance-to-closest-win"], 4)

        agent.cards.append((1, "Water"))
        enemy.accumulatedCards["Fire"] -= 1
        enemy.accumulatedCards["Water"] += 1

        features = featureExt.getFeatures(gameState, "action", agent.name)
        self.assertEqual(features["agent-distance-to-closest-win"], 3)
        self.assertEqual(features["enemy-distance-to-closest-win"], 1)
Пример #20
0
    def __init__(self):
        QMainWindow.__init__(self)
        self._logger = StateLogger()
        self._game_state = GameState(self._logger, self.hit_sound)
        self._kom = Komunikator(self._game_state)
        self._rx_thread = None

        uic.loadUi('untitled.ui', self)
        self.setWindowTitle("Statki")

        self.mapaWroga = mapaGry(self._game_state,
                                 False,
                                 self.refresh_selection)
        self.mapaNasza = mapaGry(self._game_state,
                                 True,
                                 self.refresh_selection)
        self.mapEnemy.addWidget(self.mapaWroga)
        self.mapOur.addWidget(self.mapaNasza)

        # Associate buttons
        self.b_connect.clicked.connect(self.client_init)
        self.b_listen.clicked.connect(self.server_init)
        self.b_lose.clicked.connect(self.instant_lose)
        self.b_save_log.clicked.connect(self.log_popup)
        self.b_load_log.clicked.connect(self.help_popup)
        self.b_send.clicked.connect(self.send_message)
        self.b_strzel.clicked.connect(self.shoot)
        self.b_ustaw.clicked.connect(self.add_statek)
        self.b_next.clicked.connect(self.get_next_state)
        self.b_prev.clicked.connect(self.get_prev_state)

        #sounds
        self.s_splash  = QtGui.QSound("res/splash.wav")
        self.s_explode = QtGui.QSound("res/bombexpl.wav")
        self.s_lose    = QtGui.QSound("res/loser.wav")

        self.startTimer(1000)
        self.state_view_update()
Пример #21
0
    def predict_next_move(self, game, current_player, search_iters):
        ''' Returns the (deterministic) best move choice predicted by the 
        agent. This method is for use when playing against
        the agent (or two agents playing one another) in real-time. (E.g. via 
        the play() method of the UpDown class.)
        
        Parameters
        ----------
        game : UpDown
            a game of upset-downset.
        current_player : int or str
            the current player to move. Can be either the integers 0/1 
            (see UP/DOWN in config.py) or the strings 'up'/'down' 
            (for Up/Down resp.). 
        search_iters : int (nonnegative)
            the number of search iterations to perform during MCTS.
            

        Returns
        -------
        int
            the best action in 'game' for 'current_player' as predicted by the 
            agent via the deterministic MCTS policy (i.e., temp=0)
        '''
        # make sure the model is in evaluation mode.
        self.model.eval()
        # if passed a string for current player, convert
        if isinstance(current_player, str):
            player_dict = {'up': UP, 'down': DOWN}
            current_player = player_dict[current_player.casefold()]
        # get the MCTS policy
        game_state = GameState(game, current_player)
        root = PUCTNode(game_state)
        policy = self.MCTS(root, search_iters, temp=0)

        return np.argmax(policy)
Пример #22
0
    def execute_one_game(self, pac_expr_tree):
        """
        Execute one game / eval of a run given experiment data
        and the root of an expression tree for controlling Pac.

        Return score.
        """
        # Pick a new map and set up a new game state.
        game_map = self.experiment.pre_loaded_maps[random.randint(0, 99)]
        self.experiment.world_data = []
        game_state = GameState(game_map, self.experiment.pill_density,
                               self.experiment.time_multiplier,
                               self.experiment.fruit_spawning_probability,
                               self.experiment.fruit_score,
                               self.experiment.num_pacs,
                               self.experiment.num_ghosts)
        game_state.write_world_config(self.experiment.world_data)
        game_state.write_world_time_score(self.experiment.world_data)

        # Create a new Pac controller
        self.pac_controllers[0] = PacController(0, pac_expr_tree)

        # While the game isn't over, play game turns.
        game_over = False
        while (not game_over):
            game_over = game_state.play_turn(self.experiment.world_data,
                                             self.pac_controllers,
                                             self.ghost_controllers)

        # Implement parsimony pressure
        fitness = 0
        if (self.parsimony_technique == 'size'):
            fitness = game_state.score - (self.pppc * pac_expr_tree.root.size)
        else:
            fitness = game_state.score - (self.pppc *
                                          pac_expr_tree.root.height)

        return fitness, game_state.score
Пример #23
0
 def __init__(self):
     self.board = Board()
     self.game_state = GameState()
     self.selected_square = [0, 0]
     self.possible_moves = []
     self.white_turn = True
Пример #24
0
    def run(self,
            replay_buffer,
            update_signal,
            self_play_id,
            search_iters=SELF_PLAY_SEARCH_ITERS,
            markov_exp=SELF_PLAY_MARKOV_EXP,
            temp=TEMP,
            temp_thrshld=TEMP_THRSHLD):
        '''Starts indefinite self-play loop. The games for self-play are 
        generated via an ongoing Markov chain as described in randomDag.py.
        The self-play processes are synchronized with one another, train 
        and evaluation processes via the 'replay_buffer' and 'update_signal', 
        respectively. 'replay_buffer' stores the self-play data and triggers 
        the start of training while 'update_signal' triggers model parameter 
        updates.
        
        Parameters
        ----------
        replay_buffer : ReplayBuffer
            remote actor for managing self-play data between self-play processes 
            and the Train process. Also carries the signal to start training.
        update_signal : UpdateSignal
            remote actor for synchronization between self-play processes and 
            evaluation processes. Triggers model parameter updates.
        self_play_id : int (nonnegative)
            unique identifier for the self-play process.
        search_iters : int (positve), optional
             the number of search iterations to perform during MCTS. 
             The default is SELF_PLAY_SEARCH_ITERS.
        markov_exp : float, optional
            The exponent determining the number of steps taken in 
            the markov chain in generating games for self-play.
        temp : float (nonnegative)
            partially controls exploration. If 0, the policy is deterministic 
            and the position with highest visit  from MCTS is chosen.
        temp_thrshld : int (nonnegative), optional
            The number of moves after which the policy becomes determnistic.
            I.e., temp is set to 0. (See temp, above.) The default is 
            TEMP_THRSHLD.

        Returns
        -------
        None.

        '''
        # put agent in evaluation mode
        self.agent.model.eval()
        # the action space...
        actions = np.arange(MAX_NODES)
        # game state generator via an ongoing Markov chain
        state_generator = GameState.state_generator(markov_exp)
        # start indefinite self-play loop
        while True:
            # check for updates
            if ray.get(update_signal.get_update.remote(self_play_id)):
                # get current update_id
                update_id = ray.get(update_signal.get_update_id.remote())
                # load current alpha paramenters
                self.agent.load_parameters(
                    path=f'./model_data/alpha_{update_id}.pt')
                # reset the update signal
                update_signal.clear_update.remote(self_play_id)
            # get a game and play
            initial_state = next(state_generator)
            root = PUCTNode(initial_state)
            states = []
            policies = []
            move_count = 0
            while not root.state.is_terminal_state():
                t = temp if move_count < temp_thrshld else 0
                policy = self.agent.MCTS(root, search_iters, t)
                move = np.random.choice(actions, p=policy)
                states.append(root.state.encoded_state)
                policies.append(policy)
                root = root.edges[move]
                root.to_root()
                move_count += 1
            # update state values as seen from current players perspective
            if move_count % 2 == 0:
                values = [(-1)**(i + 1) for i in range(move_count)]
            else:
                values = [(-1)**i for i in range(move_count)]
            # construct training data from self-play
            train_data = [
                (state, policy, value)
                for state, policy, value in zip(states, policies, values)
            ]
            # add training data to replay buffer
            replay_buffer.add.remote(train_data)
Пример #25
0
def menuLoop():
    import ui
    import otherEffects
    done = False
    hud = ui.HudScreen()
    hud.set_button_text(0, "Play")
    hud.set_button_text(1, "Editor")
    hud.set_button_text(2, "Tutorial")
    hud.set_button_text(3, "About")
    hud.set_button_text(4, "Exit")
    flag = None
    pygame.mouse.set_visible(True)
    pygame.event.set_grab(False)

    fractal = otherEffects.ChaosObject(
        (renderer.SCREEN_WIDTH // 2, (renderer.SCREEN_HEIGHT // 2) + 15), 225,
        3)
    s_field = otherEffects.StarField(renderer.SCREEN_SIZE)

    hud.onChangedButton.append(s_field.change_speed)
    while not done:
        deltaTime = clock.get_time() / 1000

        events = pygame.event.get()
        flag = hud.update(deltaTime, events)
        for event in events:

            if event.type == pygame.QUIT:
                return GameState.Quit

            elif event.type == pygame.KEYDOWN:
                if event.key == pygame.K_p:
                    return GameState.Quit

        if hud.selected_button == GameState.Quit.value:
            s_field.speed += deltaTime * 75
            if s_field.speed > 100:
                s_field.speed = 100

        for x in range(10):
            fractal.update()

        s_field.update(deltaTime)

        # region Buttons

        s_field.draw()
        renderer.SCREEN.blit(s_field, (0, 0))
        fractal.draw(renderer.SCREEN)

        textDraw.message_display_MT(renderer.SCREEN, "The dawn of Otrozhny",
                                    renderer.SCREEN_WIDTH // 2, 100, 30)
        textDraw.message_display_MT(renderer.SCREEN, "Containment breach",
                                    renderer.SCREEN_WIDTH // 2, 150, 30)

        if flag is not None:
            return GameState(flag)

        hud.draw()
        pygame.display.update()
        clock.tick()
Пример #26
0
            return response['data']['games_won'], response['data']['board']

    print("Error \'next\'\n" + str(response))
    sys.exit()

def get_hash(hashString):
    print("Saving hash to " + email + "_hash.txt")
    f = open(email + "_hash.txt","w")
    f.write("Email: " + email +  "\nHash: " + hashString)
    sys.exit()

##------------------------------------------------------------------------------------------
## Play the game
    
if __name__ == '__main__':
    player = GameState()
    state = new_game()
    numMoves = 0
    numGames = 0
    startTime = time.time()

    ## Play games until hash recieved (challenge requires 50 games)
    while True:
        print('Game Number: {0}'.format(numGames))
        print('Time Elapsed: {0:3f}'.format(time.time() - startTime))

        ## Make moves until game is over (games requires you make 30 moves without losing.)
        while numMoves < 30:
    ##        print(str(state[0]) + str(state[1]) + str(state[2]))
    ##        print(str(state[3]) + str(state[4]) + str(state[5]))
    ##        print(str(state[6]) + str(state[7]) + str(state[8]))
Пример #27
0
from gameState import GameState


if __name__ == '__main__':

    print 'Starting Game of War'

    game = GameState()
Пример #28
0
class GameInterface:
    # Takes boardDim, a Point representing the board dimensions (8x8 board is represented by Point(8,8))
    def __init__(self, boardDim):
        self.gameOffset = Point()
        self.boardOffset = Point()
        
        self.gemImgs = {}
        
        self.gameState = GameState(boardDim)
        
        self.calibrate()
        
        self.submitImg = bitmap.Bitmap.open(SUBMIT_IMAGE_PATH)
        self.replayImg = bitmap.Bitmap.open(REPLAY_IMAGE_PATH)
    
    # Sets the location of the top left of the board - all other points are represented relative to the gameOffset
    def calibrate(self):
        calibrationImg = bitmap.Bitmap.open(CALIBRATION_IMAGE_PATH)
        
        bmp = bitmap.capture_screen()
        (x, y) = bmp.find_bitmap(calibrationImg)
        
        self.gameOffset = Point(x, y) + GAME_OFFSET
        self.boardOffset = self.gameOffset + BOARD_OFFSET
    
    # Reads the board from the screen and returns a GameState
    def readGame(self):
        bmp = bitmap.capture_screen()
        
        submitPt = bmp.find_bitmap(self.submitImg)
        replayPt = bmp.find_bitmap(self.replayImg)
        
        if submitPt != None or replayPt != None:
            if submitPt != None:
                print 'submit found!'
                mouse.move(submitPt[0], submitPt[1])
                mouse.click()
            time.sleep(10)
            self.replayGame()
            return self.gameState
        
        for y in range(self.gameState.boardDim.y):
            for x in range(self.gameState.boardDim.x):
                gem = self.getGem(bmp, Point(x, y))
                self.gameState.board.board[y][x] = gem
                
        return self.gameState
    
    # Returns a Gem given a screenshot and board coordinates
    def getGem(self, bmp, point):
        absPt = self.boardToAbsPt(point)
        
        halfSize = 5
        total = 0
        totalColor = RGB()
        
        for x in range(absPt.x - halfSize, absPt.x + halfSize):
            for y in range(absPt.y - halfSize, absPt.y + halfSize):
                hexColor = bmp.get_color(x, y)
                r, g, b = color.hex_to_rgb(hexColor)
                rgb = RGB(r, g, b)
                
                totalColor += rgb
                total += 1
                
        avgRGB = totalColor / total
        gemColor = self.RGBToGem(avgRGB)
        
        return Gem(gemColor, 'status', point)

    # Finds the gem color closest to the given RGB value
    def RGBToGem(self, RGB):
        minDistance = None;
        color = 'none'
        for key, value in COLOR_CONSTANTS.items():
            if RGB.distSquared(value) < minDistance or minDistance == None:
                color = key
                minDistance = RGB.distSquared(value)
        return color

    # Click and drag the mouse to make a move - takes a Move object
    # Attempts to place the cursor back where it found it
    def makeMove(self, move):
        self.gameState.makeMove(move)
        
        firstPt, secondPt = move.pointTuple()
        
        absFirst = self.boardToAbsPt(firstPt)
        absSecond = self.boardToAbsPt(secondPt)
        
        lastX, lastY = mouse.get_pos()
        
        mouse.move(absFirst.x, absFirst.y)
        mouse.toggle(True)
        mouse.move(absSecond.x, absSecond.y)
        mouse.toggle(False)
        
        mouse.move(lastX, lastY)
    
    # Move mouse off the board
    def moveOffBoard(self):
        mouse.move(self.gameOffset.x - 10, self.gameOffset.y - 10)
    
    # Returns True if the mouse is in the exit box (10x10 pix block at the top left of the game)
    def isMouseAtExit(self):
        (x, y) = mouse.get_pos()
        
        #return x > self.gameOffset.x and x < self.gameOffset.x + GAME_SIZE.x and y > self.gameOffset.y and y < self.gameOffset.y + GAME_SIZE.y
        return x > self.gameOffset.x and x < self.gameOffset.x + 10 and y > self.gameOffset.y and y < self.gameOffset.y + 10

    # Converts board coordinates to absolute screen coordinates (the center of the tile)
    def boardToAbsPt(self, boardPt):
        absX = self.boardOffset.x + boardPt.x * PIECE_OFFSET.x + PIECE_OFFSET.x / 2
        absY = self.boardOffset.y + boardPt.y * PIECE_OFFSET.y + PIECE_OFFSET.y / 2
        
        return Point(absX, absY)
    
    def replayGame(self):
        print 'replay!!!'
        replayPt = None
        
        time.sleep(5)
        
        while replayPt == None:
            bmp = bitmap.capture_screen()
            replayPt = bmp.find_bitmap(self.replayImg)
        
        mouse.move(replayPt[0], replayPt[1])
        mouse.click()
        
        time.sleep(60)
        mouse.move(self.gameOffset.x + GAME_SIZE.x / 2, self.gameOffset.y + GAME_SIZE.y / 2)
        mouse.click()
        
        time.sleep(2)
        mouse.move(self.gameOffset.x + 100, self.gameOffset.y + 100)
        mouse.click()
Пример #29
0
    def approximate_outcome(self, game, search_iters):
        '''Approximates the outcome of game. For this to malke sense the game
        must be a normal play short partisan combinatorial game (a combinatorial 
        game which ends in a finite number of moves and the last player to 
        move wins).
        
        The approximate outcome is found by having the agent self-play
        the game twice: once with each player starting. The results are
        combined via the outcome rules for normal play short partisan 
        combintorial games to determine an approximate outcome.
        
        (For more in for on combinatorila game see :
        https://en.wikipedia.org/wiki/Combinatorial_game_theory)
            
        
        Parameters
        ----------
        game : UpDown
            a game of upset-downset.
        search_iters : int (positive),
            the number of search iterations to perform during each MCTS.

        Returns
        -------
        approx_out : str,
            'Next', if the Next player (first player to move) wins.
            'Previous', if the Previous player (second player to move) wins.
            'Up', if Up can force a win. (Playing first or second). 
            'Down', if Down can force a win. (Playing first or second).

        '''
        # make sure the model is in evaluation mode.
        self.model.eval()
        # game state from each players persepective
        up_start = GameState(game, UP)
        down_start = GameState(game, DOWN)
        # action space and stor for outcomes found.
        actions = np.arange(MAX_NODES)
        outcomes = []
        #self-play, once with each player moving first
        for game_state in [up_start, down_start]:
            # set root
            root = PUCTNode(game_state)
            move_count = 0
            # play until a terminal state is reached
            while not root.state.is_terminal_state():
                policy = self.MCTS(root, search_iters, temp=0)
                move = np.random.choice(actions, p=policy)
                root = root.edges[move]
                root.to_root()
                move_count += 1

            # update outcome: 'P' for second player to move
            # and 'N' for first player to move
            out = 'P' if (move_count % 2 == 0) else 'N'
            outcomes.append(out)

        # get outcome prediction
        up_start_out, down_start_out = outcomes
        if up_start_out == 'P' and down_start_out == 'P':
            approx_out = 'Previous'
        elif up_start_out == 'N' and down_start_out == 'N':
            approx_out = 'Next'
        elif up_start_out == 'P' and down_start_out == 'N':
            approx_out = 'Down'
        elif up_start_out == 'N' and down_start_out == 'P':
            approx_out = 'Up'

        return approx_out
Пример #30
0
import pygame
from pygame import Color
from gameState import GameState
from bots.jack import Jack
from bots.sunny import Sunny

if __name__ == "__main__":
    pygame.init()
    clock = pygame.time.Clock()

    gameWidth = 20
    gameHeight = 15
    blockWidth = 40
    initPos = (gameWidth / blockWidth / 2, gameHeight / blockWidth / 2)

    pygame.display.set_caption("Armageddon Blitz")

    game = GameState(gameWidth, gameHeight, blockWidth)
    game.add_bot(Jack(69), Color('orange'))
    game.add_bot(Sunny(420), Color('red'))
    game.run_game_loop()
Пример #31
0
from gameState import GameState

## Basic Fuzzing Test
## We have the GameState play against itself.
## Since it is a FSM designed to never reach a terminal state, the games should go on indefinitely.
## GameState() will ramdomly select a path given multiple possible moves allowing for complete path coverage eventually.
testPlayer = GameState()

## Run 100 games
for i in range(100):
    state = list('---------')

    ##    print("%c%c%c" %(state[0],state[1],state[2]))
    ##    print("%c%c%c" %(state[3],state[4],state[5]))
    ##    print("%c%c%c\n" %(state[6],state[7],state[8]))

    ## Play 100 moves
    for _ in range(100):
        update = testPlayer.get_next_move(state, player=1)

        if len(update) == 1:
            state[update[0] - 1] = 'p'
        else:
            state[update[0] - 1] = '-'
            state[update[1] - 1] = 'p'

##        print("Player 1")
##        print("%c%c%c" %(state[0],state[1],state[2]))
##        print("%c%c%c" %(state[3],state[4],state[5]))
##        print("%c%c%c\n" %(state[6],state[7],state[8]))
Пример #32
0
class MyWin(QMainWindow):
    def __init__(self):
        QMainWindow.__init__(self)
        self._logger = StateLogger()
        self._game_state = GameState(self._logger, self.hit_sound)
        self._kom = Komunikator(self._game_state)
        self._rx_thread = None

        uic.loadUi('untitled.ui', self)
        self.setWindowTitle("Statki")

        self.mapaWroga = mapaGry(self._game_state,
                                 False,
                                 self.refresh_selection)
        self.mapaNasza = mapaGry(self._game_state,
                                 True,
                                 self.refresh_selection)
        self.mapEnemy.addWidget(self.mapaWroga)
        self.mapOur.addWidget(self.mapaNasza)

        # Associate buttons
        self.b_connect.clicked.connect(self.client_init)
        self.b_listen.clicked.connect(self.server_init)
        self.b_lose.clicked.connect(self.instant_lose)
        self.b_save_log.clicked.connect(self.log_popup)
        self.b_load_log.clicked.connect(self.help_popup)
        self.b_send.clicked.connect(self.send_message)
        self.b_strzel.clicked.connect(self.shoot)
        self.b_ustaw.clicked.connect(self.add_statek)
        self.b_next.clicked.connect(self.get_next_state)
        self.b_prev.clicked.connect(self.get_prev_state)

        #sounds
        self.s_splash  = QtGui.QSound("res/splash.wav")
        self.s_explode = QtGui.QSound("res/bombexpl.wav")
        self.s_lose    = QtGui.QSound("res/loser.wav")

        self.startTimer(1000)
        self.state_view_update()

    def get_prev_state(self):
        self._game_state.set_logged_state(self._logger.get_prev_state())

    def get_next_state(self):
        self._game_state.set_logged_state(self._logger.get_next_state())

    def timerEvent(self, *args, **kwargs):
        if self._game_state.get_state_change():
            self.state_view_update()
            self.repaint()

    def onParseEvent(self, event):
        print event

    def shoot(self):
        sel_list = self.mapaWroga.get_selection()
        if sel_list:
            self.mapaWroga.clear_selection()
            sel = sel_list[0]
            self.repaint()
            self._kom.send_shoot(sel)
        else:
            print "Brak zaznaczenia!"

    def add_statek(self):
        assert self._game_state.get_turn_no() == 0
        try:
            self.mapaNasza.add_new_ship()
        except AssertionError:
            print "Shipyard: Not enough parts for that kind of ship"
        self.repaint()

    def state_view_update(self):
        if self._game_state.get_turn_no() == 0:
            self.l_turn_indicator.setText("Rozmieszczanie statkow")
        else:
            if self._game_state.is_my_turn():
                self.l_turn_indicator.setText("Nasza kolej")
            else:
                self.l_turn_indicator.setText("Ich kolej")

        self.l_turn_no.setText(" ".join(["Numer tury:",
                                         str(self._game_state.get_turn_no())]))
        self.l_my_ile_1.setText(str(self._game_state.get_ship_count(1)))
        self.l_my_ile_2.setText(str(self._game_state.get_ship_count(2)))
        self.l_my_ile_3.setText(str(self._game_state.get_ship_count(3)))
        self.l_my_ile_4.setText(str(self._game_state.get_ship_count(4)))
        self.l_oni_ile_1.setText(str(self._game_state.get_enemy_ship_count(1)))
        self.l_oni_ile_2.setText(str(self._game_state.get_enemy_ship_count(2)))
        self.l_oni_ile_3.setText(str(self._game_state.get_enemy_ship_count(3)))
        self.l_oni_ile_4.setText(str(self._game_state.get_enemy_ship_count(4)))

    def refresh_selection(self, selection):
        try:
            x, y = selection;
            self.l_wsp_x.setText(str.format("X:{}", x))
            self.l_wsp_y.setText(str.format("Y:{}", y))
            self.repaint()
        except TypeError:
            print "No new selections allowed."

    def send_message(self):
        try:
            self._kom.send(self._komunikat)
        except AttributeError:
            print("Sender: not initialized")

    def log_popup(self):
        logFile = QtGui.QFileDialog.getOpenFileName()
        self._logger.save_logger(logFile)

    def help_popup(self):
        popup = HelpDialog()
        popup.exec_()

    def socket_cleanup(self):
        try:
            self._kom.send("q")
            self._kom.sender_terminate()
        except AttributeError:
            print("Sender: not initialized")

    def closeEvent(self, event):
        self._rx_thread = Thread()
        self._kom.cleanup()
        event.accept()

    def client_init(self):
        self.setWindowTitle("Statki - Klient")
        if 1: #self._game_state.ready_for_battle():
            # text, ok = QtGui.QInputDialog.getText(self, 'Wybor celu',
            #                                       'Podaj wody na ktore '
            #                                       'chcesz wyplynac (ip):')
            if 1: #ok:
                text = "localhost"

                self._rx_thread = Thread(target=self._kom.client_init, args=(text,))
                self._rx_thread.start()
        else:
            print "Generals: We're not ready to fight yet, commander!"

    def server_init(self):
        self.setWindowTitle("Statki - Serwer")
        if 1: #self._game_state.ready_for_battle():
            self._rx_thread = Thread(target=self._kom.server_init, args=())
            self._rx_thread.start()
        else:
            print "Generals: We're not ready to fight yet, commander!"



    def instant_lose(self):
        self.s_lose.play()
        sleep(6)
        #        winsound.PlaySound("res/loser.wav",(winsound.SND_ALIAS))
        self.socket_cleanup()
        qApp.exit(0)

    def hit_sound(self,hit):
        if hit:
            self.s_explode.play()
        else:
            self.s_splash.play()
Пример #33
0
class Game():
    """
    Game class instantiates dealer and player and the initial game state. It plays the game by playing
    a sequence of hands until the player bustso or until the nHands value is reached (nHands should be used
    when not using a user-agent so if the agent keeps winning the game doesnt go on forever)
    """
    def __init__(self, verbose, agentType, nHands, startingMoney, nTraining):
        """
        Initialize the game! Create dealer and player objects and an initial gameState
        input: verbose
            whether or not to print each step as agents play
        input: agentType
            string representing the type of agent to instantiate
        input: nHands
            number of hands to play at max if agent never busts
        input: startingMoney
            the amount of money the agent gets to start with
        input: nTraining
            the number of training hands to do for a qlearning player
        returns: nothing
        """
        self.verbose = verbose
        self.agentType = agentType
        self.nHands = int(nHands) + int(nTraining)
        self.nStartingHands = int(nHands) + int(nTraining)
        print("{} test {} train {} total".format(nHands, nTraining,
                                                 self.nHands))
        self.startingMoney = startingMoney
        self.nTraining = int(nTraining)
        self.dealer = Dealer()
        self.player = self.createAgent(self.agentType, self.startingMoney,
                                       nTraining)

        self.agents = [self.player, self.dealer]

        # Clean slate
        dealerHand = Hand()
        playerHand = Hand()
        deck = Deck()

        # list because player can split
        playerHands = [playerHand]
        if self.player:
            initialBets = [self.player.getBetAmt()]
            # Create initial game state
            self.gameState = GameState(verbose, self.dealer, dealerHand,
                                       self.player, playerHands, deck,
                                       initialBets)

        self.q = self.agentType == 'qlearning'

    def isValidGame(self):
        """ Make sure we created the player correctly """
        if self.player is None:
            return False
        return True

    def createAgent(self, agentType, startingMoney, nTraining):
        """ Create an agent of the right type
        input: string agentType
            type of agent to create
        input: int startingMoney
            how much money the agent starts off with

        returns: An instantiated agent with startingMoney, or None if agent not supported yet
        """
        if (agentType == 'user'):
            return UserPlayer(startingMoney)
        elif (agentType == 'optimal'):
            return OptimalPlayer(startingMoney)
        elif (agentType == 'expectimax'):
            return Expectimax(startingMoney)
        elif (agentType == 'q-learning' or agentType == 'qlearning'):
            return QLearning(startingMoney, nTraining)
        elif (agentType == 'random'):
            return Random(startingMoney)
        else:
            print("Can't create other agent types at this point\n")
            return None

    def reportPerformance(self, aggregateOutcomes, payout, totalBet, moneyLeft,
                          maxAmtHad, minAmtHad):
        """
        Take the values from the playGame loop and output a summary of player performance over the hands 
        Return the stats to the game so they can be passed to statEngine
        """
        nHandsPlayed = sum(aggregateOutcomes.values())
        aggregatePercentages = {
            k: float(v) / float(nHandsPlayed)
            for k, v in aggregateOutcomes.items()
        }

        totalWinnings = payout
        houseEdge = -totalWinnings / float(totalBet)

        print(
            "Counting all splits as two hands, there were {} hands played by the agent who started with ${}\n"
            .format(nHandsPlayed, self.startingMoney))
        print("Most money ever had: {}\t Least money ever had: {}\n".format(
            maxAmtHad, minAmtHad))
        print("Money remaining after all hands:  ${}\n".format(moneyLeft))
        print(
            "Total winnings {} on total bets of {} for a house edge of {:.1%}".
            format(totalWinnings, totalBet, houseEdge))
        for state, number in aggregateOutcomes.items():
            print("{} : {} ({:.1%})\n".format(state, number,
                                              aggregatePercentages[state]))

        stats = {
            'nHands': nHandsPlayed,
            'outcomes': aggregateOutcomes,
            'percentages': aggregatePercentages,
            'totalWinnings': payout,
            'totalBet': totalBet,
            'houseEdge': houseEdge,
        }

        return stats

    def playGame(self):
        """ Loop through playing hands until agent out of money or we reach self.nHands
        input: none
        returns: stat dictionary with summary of performance

        """
        if (self.verbose):
            print(
                "**** Welcome to CS182 Blackjack! ****\n\n\nNew game:\nYour starting money: {}\n"
                .format(self.startingMoney))

        # Performance bookkeeping
        aggregateOutcomes = {
            WinStates.WIN: 0,
            WinStates.PUSH: 0,
            WinStates.BLACKJACK: 0,
            WinStates.LOSE: 0,
        }
        aggregatePayout = 0
        aggregateBet = 0
        minVal = int(self.startingMoney)
        maxVal = int(self.startingMoney)
        stats = None
        # Game loop
        while (True):
            if self.nHands % 10000 == 0:
                print(self.nHands)

            # Play hand
            winStateList, payout, betAmount = self.playHand()

            # Performance tracking. Only track performance for Q-learner if it's out of training
            bookkeep = True
            if self.q:
                if ((self.nStartingHands - self.nHands) < (self.nTraining)):
                    bookkeep = False

            if bookkeep:
                # Bookkeeping on performance
                for winState in winStateList:
                    aggregateOutcomes[winState] += 1
                aggregatePayout += payout
                aggregateBet += betAmount

                curMoney = self.gameState.player.getMoney()
                if curMoney > maxVal:
                    maxVal = curMoney
                if curMoney < minVal:
                    minVal = curMoney

            # Reset hands
            self.gameState.resetHands()

            # if user player, ask if wants to play more
            if self.agentType == 'user':
                sleep(2)
                cont = input("Another hand? y/n ---> ")
                if cont == "n" or cont == "N" or cont == "no":
                    break

            # Out of money or game over
            if self.nHands == 0 or self.gameState.player.getMoney() <= 0:
                stats = self.reportPerformance(aggregateOutcomes,
                                               aggregatePayout, aggregateBet,
                                               curMoney, maxVal, minVal)

                # If qlearner, write the policy to disk
                if self.q:
                    diskIO = QDictIO(self.player.QValues)
                    diskIO.write()
                break

        return stats

    def playHand(self):
        """
        Play a hand! deal to the player and dealer, get players actions, change gameState, get dealer's actions,
        determine winner, etc

        input: none
        returns: Return winsState list for all hands, the payout across all hands, and the amount bet
        (multiple hands mentioned in case fo split)
        """

        vPrint("\n\n*************** NEW HAND ***************\n\n",
               self.verbose)

        self.nHands -= 1

        # Place bet and deal
        self.gameState.initialDeal()

        vPrint(
            "New hand: Player bet: {}\tPlayer money: {}\n".format(
                self.player.getBetAmt(), self.player.getMoney()), self.verbose)
        vPrint("...Dealing...\n", self.verbose)

        # for storing last actions of each hand for qlearning updates
        lastActions = []
        lastNewStates = []
        lastPrevStates = []

        # Hand loop
        while not self.gameState.isTerminal():
            # Player turn
            if self.gameState.isPlayerTurn():
                vPrint("***** Player's turn *****\n\n", self.verbose)
                for idx, hand in enumerate(self.gameState.getPlayerHands()):
                    vPrint(
                        "Player hand {}: {}\n".format(idx, hand.strFromHand()),
                        self.verbose)

                vPrint(
                    "Player is currently playing hand {}\n".format(
                        self.gameState.getPlayerHandIdx()), self.verbose)
                vPrint(
                    "Dealer's shown card: {}\n".format(
                        self.gameState.dealerHand.strFromHand()), self.verbose)

                # Get action player takes in this state (will make sure its action for the hand they're playing)
                playerAction = self.player.getAction(self.gameState)

                vPrint("Player action is {}\n".format(playerAction),
                       self.verbose)

                # Take the action
                newGameState = self.gameState.generatePlayerSuccessor(
                    playerAction)

                # If Q learner player, update them or store their last action to update after the dealer plays
                if self.q:
                    # Still player turn, give a zero reward if playing same hand (didnt bust)
                    if newGameState.isPlayerTurn():
                        if newGameState.getPlayerHandIdx(
                        ) == self.gameState.getPlayerHandIdx():
                            reward = 0
                            newGameState.player.update(self.gameState,
                                                       playerAction,
                                                       newGameState, reward)
                        else:
                            # Playing another hand, store the last action of this hand to update with hand rewards after eval
                            lastActions.append(playerAction)
                            lastPrevStates.append(self.gameState)
                            lastNewStates.append(newGameState)
                    # Its dealer turn, append the last action to update after hand eval
                    else:
                        lastActions.append(playerAction)
                        lastPrevStates.append(self.gameState)
                        lastNewStates.append(newGameState)

                # Update the gamestate
                self.gameState = newGameState

            # Dealer turn
            else:
                vPrint("***** Dealer's turn ****** \n\n", self.verbose)
                for idx, hand in enumerate(self.gameState.getPlayerHands()):
                    vPrint(
                        "Player hand {}: {}\n".format(idx, hand.strFromHand()),
                        self.verbose)
                vPrint(
                    "Dealer's shown card: {}\n".format(
                        self.gameState.dealerHand.strFromHand()), self.verbose)

                # Get dealers action
                dealerAction = self.dealer.getAction(self.gameState)

                vPrint("Dealer action is {}\n".format(dealerAction),
                       self.verbose)

                # Take the action
                self.gameState = self.gameState.generateDealerSuccessor(
                    dealerAction)

        # Evaluate who won
        winStates = self.gameState.getWinState()
        totalBet = sum(self.gameState.getBets())
        payouts = [
            self.gameState.getPayout(winState, handIdx)
            for handIdx, winState in enumerate(winStates)
        ]

        # Update the qlearner with payouts based on their last actions
        if self.q:
            # Blackjack dealt so no actions, no update
            if len(lastActions) != len(payouts):
                pass
            else:
                # send an update for each tuple of (s,a,r,s')
                for i in range(len(payouts)):
                    reward = payouts[i]
                    action = lastActions[i]
                    orig_state = lastPrevStates[i]
                    new_state = lastNewStates[i]
                    self.gameState.player.update(orig_state, action, new_state,
                                                 reward)
            # Reset the lists of update items
            lastActions = []
            lastPrevStates = []
            lastNewStates = []

        # Get the total payout and apply it, return the results to the game loop
        payout = reduce(lambda p1, p2: p1 + p2, payouts)

        # vPrint the results of each hand and total payout
        for idx, hand in enumerate(self.gameState.getPlayerHands()):
            vPrint(
                "=============\n\nHand {}:\nPlayer has {}, dealer has {}\n\nResult of hand is a {} for the player, payout is {}\n\n=============\n\n"
                .format(idx, hand.getHandValue(),
                        self.gameState.dealerHand.getHandValue(),
                        winStates[idx], payouts[idx]), self.verbose)
        vPrint("Total payout across all hands is {}\n".format(payout),
               self.verbose)

        self.gameState = self.gameState.applyPayout(payout)

        return (winStates, payout, totalBet)
Пример #34
0
from boss import Boss
from battle import Battle
from politechnikomon import Politechnikomon


def set_done():
    done = False


#Initialization
pygame.init()
screen = pygame.display.set_mode((800, 600))

#Data setup
map = Map('map.png', 'tiles.png')
gameState = GameState()
gui = GUI(gameState, screen)
player = Player('player.png')
boss = Boss('boss.png')
ppaix = Politechnikomon('pppix.png', 'Ppaix', 20)
paichu = Politechnikomon('paichu.png', 'Paichu', 20)
oirposan = Politechnikomon('oirposan.png', 'Oirposan', 20)
niespanier = Politechnikomon('niespanier.png', 'Niespanier', 50)
map.generate_map()
TILE_SIZE = 40
position = [0, 0]

#Main loop
while not gameState.done:
    if (gameState.mode == 'map'):
        screen.fill((0, 0, 0))