Exemplo n.º 1
0
    def setGhostPosition(self, gameState, ghostPosition, index):
        """
        Set the position of the ghost for this inference module to the specified
        position in the supplied gameState.

        Note that calling setGhostPosition does not change the position of the
        ghost in the GameState object used for tracking the true progression of
        the game.  The code in inference.py only ever receives a deep copy of
        the GameState object which is responsible for maintaining game state,
        not a reference to the original object.  Note also that the ghost
        distance observations are stored at the time the GameState object is
        created, so changing the position of the ghost will not affect the
        functioning of observe.
        """
        conf = game.Configuration(ghostPosition, game.Directions.STOP)
        gameState.data.agentStates[index] = game.AgentState(conf, False)
        return gameState
Exemplo n.º 2
0
    def setEnemyPosition(self, gameState, pos, enemyIndex):
        foodGrid = self.getFood(gameState)
        halfway = foodGrid.width / 2
        conf = game.Configuration(pos, game.Directions.STOP)

        # FOR THE WEIRD ERROR CHECK
        if gameState.isOnRedTeam(self.index):
            if pos[0] >= halfway:
                isPacman = False
            else:
                isPacman = True
        else:
            if pos[0] >= halfway:
                isPacman = True
            else:
                isPacman = False
        gameState.data.agentStates[enemyIndex] = game.AgentState(conf, isPacman)

        return gameState
Exemplo n.º 3
0
    def test_limit_raise_sizes(self):
        self.initialize(game.Configuration(
            max_players=3, game_type=game.GameType.LIMIT, limits=(100, 200), blinds=(50, 100)))

        # pre-flop
        allowed = self.manager.current_hand.allowed_action()
        self.assert_allowed_actions(allowed, 0, {game.ActionType.CALL: (100, 100),
                                                 game.ActionType.RAISE: (100, 100),
                                                 game.ActionType.FOLD: None,})

        # flop
        check_call_all(self.manager)
        self.manager.proceed()
        self.assertEqual(game.GameState.FLOP_DEALT, self.manager.state)

        self.manager.act(game.Action(1, game.ActionType.BET, 100))
        allowed = self.manager.current_hand.allowed_action()
        self.assert_allowed_actions(allowed, 2, {game.ActionType.CALL: (100, 100),
                                                 game.ActionType.RAISE: (100, 100),
                                                 game.ActionType.FOLD: None,})

        # turn
        check_call_all(self.manager)
        self.manager.proceed()
        self.assertEqual(game.GameState.TURN_DEALT, self.manager.state)

        self.manager.act(game.Action(1, game.ActionType.BET, 200))
        allowed = self.manager.current_hand.allowed_action()
        self.assert_allowed_actions(allowed, 2, {game.ActionType.CALL: (200, 200),
                                                 game.ActionType.RAISE: (200, 200),
                                                 game.ActionType.FOLD: None,})

        # river
        check_call_all(self.manager)
        self.manager.proceed()
        self.assertEqual(game.GameState.RIVER_DEALT, self.manager.state)

        self.manager.act(game.Action(1, game.ActionType.BET, 200))
        allowed = self.manager.current_hand.allowed_action()
        self.assert_allowed_actions(allowed, 2, {game.ActionType.CALL: (200, 200),
                                                 game.ActionType.RAISE: (200, 200),
                                                 game.ActionType.FOLD: None,})
Exemplo n.º 4
0
 def chooseAction(self, gameState):
     # Choose an action based on evaluated values of next possible states 
     myPosition = gameState.getAgentPosition(self.index)
     observedDistances = gameState.getAgentDistances()
     newState = gameState.deepCopy()
     for enemy in self.enemyIndices:
         enemyPosition = gameState.getAgentPosition(enemy)
         if enemyPosition:
             newDistribution = util.Counter()
             newDistribution[enemyPosition] = 1.0
             self.enemyPositionitionDistribution[enemy] = newDistribution
         else:
             self.forwardStep(enemy, gameState)
             self.observe(enemy, observedDistances, gameState)
     for enemy in self.enemyIndices:
         probablePosition = self.enemyPositionitionDistribution[enemy].argMax()
         conf = game.Configuration(probablePosition, Directions.STOP)
         newState.data.agentStates[enemy] = game.AgentState(conf, newState.isRed(probablePosition) != newState.isOnRedTeam(enemy))
     action = self.getMaxEvaluatedAction(newState, depth=2)[1]
     return action
Exemplo n.º 5
0
    def chooseAction(self, gameState):
        global trackers
        t1 = time.time()
        self.enemypos[0] = trackers[0].trackPosition(self, gameState)
        self.enemypos[1] = trackers[1].trackPosition(self, gameState)
        simulationState = gameState.deepCopy()

        #print (self.enemypos)

        for i in range(len(self.enemypos)):
            conf = game.Configuration(self.enemypos[i], Directions.STOP)
            simulationState.data.agentStates[
                self.enemies[i]] = game.AgentState(
                    conf,
                    simulationState.isRed(self.enemypos[i]) !=
                    simulationState.isOnRedTeam(self.enemies[i]))

        move = self.expecti_maximize(simulationState, 3)[1]
        t2 = time.time()
        return move
Exemplo n.º 6
0
    def newAction(self, gameState):
        """
        Base choose action. In this function we begin by updating our beliefs
        and elapsing time for the beliefs. We also show our beliefs on the
        screen by using the provided debugging function.
        """

        myPos = gameState.getAgentPosition(self.index)
        noisyDistances = gameState.getAgentDistances()
        newState = gameState.deepCopy()

        for enemy in self.enemies:
            enemyPos = gameState.getAgentPosition(enemy)
            if enemyPos:
                new_belief = util.Counter()
                new_belief[enemyPos] = 1.0
                self.beliefs[enemy] = new_belief
            else:
                self.elapseTime(enemy, gameState)
                self.observe(enemy, noisyDistances, gameState)

        #  self.displayDistributionsOverPositions(self.beliefs.values())

        # Using the most probable position update the game state.
        # In order to use expectimax we need to be able to have a set
        # position where the enemy is starting out.
        for enemy in self.enemies:
            probablePosition = self.beliefs[enemy].argMax()
            conf = game.Configuration(probablePosition, Directions.STOP)
            newState.data.agentStates[enemy] = game.AgentState(
                conf,
                newState.isRed(probablePosition) !=
                newState.isOnRedTeam(enemy))

        # Do expectimax to depth 2 and get the best action to use. This is
        # the furthest out that we could do this because of time constraints.
        action = self.maxFunction(newState, depth=2)[1]

        return action
Exemplo n.º 7
0
    def chooseAction(self, gameState):
        """
        Parent action selection method.
        This method update agent's map beliefs.
        @param: gameState string map of game.
        @returns str action with selected move direction ex. North
        """

        # Distàncies als sons escoltats. Tenim una llista de 4 enters
        # que representen les distàncies
        noisyDistances = gameState.getAgentDistances()

        newState = gameState.deepCopy()

        # For any enemy agent tries to get visual contact of enemy
        for enemy in self.enemies:
            # None if no visual contact else enemy position tuple
            enemyPos = gameState.getAgentPosition(enemy)
            if enemyPos:
                new_belief = util.Counter()
                new_belief[enemyPos] = 1.0
                self.beliefs[enemy] = new_belief
            else:
                # If not visual contact observe and move
                self.updateBeliefs(enemy)
                self.observe(enemy, noisyDistances, gameState)

        for enemy in self.enemies:
            prob_pos = self.beliefs[enemy].argMax()
            conf = game.Configuration(prob_pos, Directions.STOP)
            newState.data.agentStates[enemy] = game.AgentState(conf, newState.isRed(prob_pos) != newState.isOnRedTeam(
              enemy))

        # TODO imp effi
        action = self.maxFunction(newState, depth=EXPECTIMAX_DEPTH)[1]

        return action
Exemplo n.º 8
0
    def test_simple_limit(self):
        self.initialize(game.Configuration(
            max_players=3, game_type=game.GameType.LIMIT, limits=(100, 200), blinds=(50, 100)))

        # pre flop
        allowed = self.manager.current_hand.allowed_action()
        self.assert_allowed_actions(allowed, 0, {game.ActionType.CALL: (100, 100),
                                                 game.ActionType.RAISE: (100, 100),
                                                 game.ActionType.FOLD: None,})

        self.manager.act(game.Action(0, game.ActionType.CALL))
        allowed = self.manager.current_hand.allowed_action()
        self.assert_allowed_actions(allowed, 1, {game.ActionType.CALL: (50, 50),
                                                 game.ActionType.RAISE: (100, 100),
                                                 game.ActionType.FOLD: None,})

        self.manager.act(game.Action(1, game.ActionType.CALL))
        allowed = self.manager.current_hand.allowed_action()
        self.assert_allowed_actions(allowed, 2, {game.ActionType.CHECK: None,
                                                 game.ActionType.RAISE: (100, 100),
                                                 game.ActionType.FOLD: None,})

        self.manager.act(game.Action(2, game.ActionType.CHECK))

        for expected_state, expected_bet in [[game.GameState.FLOP_DEALT, 100],
                                             [game.GameState.TURN_DEALT, 200],
                                             [game.GameState.RIVER_DEALT, 200]]:
            self.manager.proceed()
            self.assertEqual(expected_state, self.manager.state)
            for player_idx in [1, 2, 0]:
                allowed = self.manager.current_hand.allowed_action()
                self.assert_allowed_actions(allowed, player_idx,
                                            {game.ActionType.CHECK: None,
                                             game.ActionType.BET: (expected_bet, expected_bet),
                                             game.ActionType.FOLD: None})
                self.manager.act(game.Action(player_idx, game.ActionType.CHECK))
Exemplo n.º 9
0
def setGhostPositions(gameState, ghostPositions):
    "Sets the position of all ghosts to the values in ghostPositionTuple."
    for index, pos in enumerate(ghostPositions):
        conf = game.Configuration(pos, game.Directions.STOP)
        gameState.data.agentStates[index + 1] = game.AgentState(conf, False)
    return gameState
Exemplo n.º 10
0
 def setGhostPositions(self, gameState, ghostPositions):
     index, pos = ghostPositions
     conf = game.Configuration(pos, game.Directions.STOP)
     gameState.data.agentStates[index] = game.AgentState(conf, False)
     return gameState
Exemplo n.º 11
0
 def setGhostPosition(self, state, ghostPosition, oppIndex):
     "Sets the position of all ghosts to the values in ghostPositionTuple."
     conf = game.Configuration(ghostPosition, game.Directions.STOP)
     state.data.agentStates[oppIndex] = game.AgentState(conf, False)
     return state
Exemplo n.º 12
0
  def chooseAction(self, gameState):
    self.startTime = time.clock()
    self.updateParticleFilters(gameState)
    # Get list of actions, and consider more actions if it's urgent
    actions = self.getGoodLegalActions(gameState, self.index)

    # Check for a death, setting defensive ghosts to attack mode if so
    #for i in self.enemyIndices:
    #  if (self.enemyLocFilters[i].getMostLikelyPos() == gameState.getInitialAgentPosition(i)
    #        and gameState.data.timeleft < 1147):
    #    print('EEOEE')

    # Check if the enemy is observable, changing strategy if necessary
    for i in self.enemyIndices:
      self.seenEnemies = []
      myPos = gameState.getAgentPosition(self.index)
      exactPosition = gameState.getAgentPosition(i)
      newStrat = None
      if exactPosition is not None: #and self.distancer.getDistance(myPos, exactPosition) < 4:
        self.seenEnemies.append(i)
        newStrat = self.evalStratChange(gameState, i)
      else:
        newStrat = "Attack"
    if newStrat is not None:
      self.strategies[self.index] = newStrat

    # Remove old food
    foodLocs = self.getFood(gameState).asList()
    for _, l in self.foodLists.iteritems():
      missingFood = [l1 for l1 in l if l1 not in foodLocs]
      for food in missingFood:
        l.remove(food)

    dist = util.Counter()
    l = 'Bottom' if self.isBottom else 'Top'
    for food in self.foodLists[l]: 
      dist[food] += 1
    dist.normalize()
    self.displayDistributionsOverPositions([dist])

    # Get current target if chasing
    if self.strategies[self.index] == 'Chase':
      minDistance = None
      closestEnemy = None
      for e in self.seenEnemies:
        distance = self.distancer.getDistance(gameState.getAgentPosition(self.index), 
                    gameState.getAgentPosition(e))
        if not closestEnemy or minDistance > distance:
          minDistance = distance
          closestEnemy = e
      if closestEnemy is not None:
        self.currentTarget = self.getTarget(gameState, closestEnemy)

      dist = util.Counter()
      dist[self.currentTarget] += 1
      dist.normalize()
      self.displayDistributionsOverPositions([dist])

    # If there's only one action, just take it
    if len(actions) is 1:
      return actions[0]

    # Create simulated game state based on estimated enemy locations
    simState = gameState.deepCopy()
    for i in self.enemyIndices:
      if gameState.getAgentPosition(i) is None:
        mostLikelyPos = self.enemyLocFilters[i].getMostLikelyPos()
        conf = game.Configuration(mostLikelyPos, game.Directions.STOP)
        simState.data.agentStates[i] = game.AgentState(conf, False)


    bestAction = random.choice(actions)
    currBestAction = self.getBestAction(simState, 2, actions)
    bestAction = currBestAction
    return bestAction
Exemplo n.º 13
0
def setEnemyPositions(gameState, ghostPositions, enemyIndices):
  "Sets the position of all ghosts to the values in ghostPositionTuple."
  for i in range(len(ghostPositions)):
    conf = game.Configuration(ghostPositions[i], game.Directions.STOP)
    gameState.data.agentStates[enemyIndices[i]] = game.AgentState(conf, False)
  return gameState  
Exemplo n.º 14
0
 def setGhostPosition(self, gameState, ghostPosition):
     conf = game.Configuration(ghostPosition, game.Directions.STOP)
     gameState.data.agentStates[self.index] = game.AgentState(conf, False)
     return gameState
Exemplo n.º 15
0
 def setUp(self):
     self.manager = game.Manager(game.Configuration())
     self.recorder = game.RecordingListener()
     self.manager.add_listener(self.recorder)
Exemplo n.º 16
0
    def chooseAction(self, gameState):
        self.startTime = time.clock()
        self.myPos = gameState.getAgentPosition(self.index)
        self.updateParticleFilters(gameState)

        # Remove old food from personal lists
        isFoodRemoved = False
        for _, l in self.foodLists.iteritems():
            missingFood = [
                l1 for l1 in l if l1 not in self.getFood(gameState).asList()
            ]
            for food in missingFood:
                l.remove(food)
                isFoodRemoved = True

        # Compute position of the nearest food
        if len(self.foodList) > 3:
            self.closestFood = self.getClosestFood(self.foodList,
                                                   self.myPos)[0]
        else:
            self.closestFood = self.getClosestFood(
                self.getFood(gameState).asList(), self.myPos)[0]

        # Check if the enemy is observable
        self.seenEnemies = []
        for i in self.enemyIndices:
            exactPosition = gameState.getAgentPosition(i)
            if exactPosition is not None:
                self.seenEnemies.append(i)

        # Reset strategy to attack
        if gameState.getAgentState(self.index).isPacman or \
            self.myPos == gameState.getInitialAgentPosition(self.index) or (not self.seenEnemies and
            (self.strategies[self.index] == "Chase" or self.strategies[self.index] == "Scatter")):
            self.strategies[self.index] = "Attack"

        # Choose whether or not to change strategy based on seen enemies
        if self.seenEnemies:
            newStrat = None
            closestEnemyDist = None
            for e in self.seenEnemies:
                enemyPos = gameState.getAgentPosition(e)
                dist = self.distancer.getDistance(self.myPos, enemyPos)
                if not closestEnemyDist or dist < closestEnemyDist:
                    self.closestEnemy = e
                    closestEnemyDist = dist
            newStrat = self.evalStratChange(gameState, self.closestEnemy)
            if newStrat is not None:
                self.strategies[self.index] = newStrat

        # Get possible actions
        if isFoodRemoved or not self.seenEnemies:
            actions = self.getGoodLegalActions(gameState, self.index, True)
        else:
            actions = self.getGoodLegalActions(gameState, self.index)
        ''' debugging pellets  
    dist = util.Counter()
    l = 'Bottom' if self.isBottom else 'Top'
    for food in self.foodLists[l]: 
      dist[food] += 1
    dist.normalize()
    self.displayDistributionsOverPositions([dist])
    '''
        ''' debugging target
    dist = util.Counter()
    dist[self.currentTarget] += 1
    dist.normalize()
    self.displayDistributionsOverPositions([dist])
    '''

        # If there's only one action, just take it
        if len(actions) is 1:
            return actions[0]

        # Create simulated game state based on estimated enemy locations
        simState = gameState.deepCopy()
        for i in self.enemyIndices:
            if gameState.getAgentPosition(i) is None:
                mostLikelyPos = self.enemyLocFilters[i].getMostLikelyPos()
                conf = game.Configuration(mostLikelyPos, game.Directions.STOP)
                simState.data.agentStates[i] = game.AgentState(conf, False)

        bestAction = random.choice(actions)
        currBestAction = self.getBestAction(simState, 2, actions)
        bestAction = currBestAction
        return bestAction