def setGhostPosition(self, gameState, ghostPosition, index):
        """
        Set the position of the ghost for this inference module to the specified
        position in the supplied gameState.

        Note that calling setGhostPosition does not change the position of the
        ghost in the GameState object used for tracking the true progression of
        the game.  The code in inference.py only ever receives a deep copy of
        the GameState object which is responsible for maintaining game state,
        not a reference to the original object.  Note also that the ghost
        distance observations are stored at the time the GameState object is
        created, so changing the position of the ghost will not affect the
        functioning of observe.
        """
        conf = game.Configuration(ghostPosition, game.Directions.STOP)
        gameState.data.agentStates[index] = game.AgentState(conf, False)
        return gameState
예제 #2
0
    def setEnemyPosition(self, gameState, pos, enemyIndex):
        foodGrid = self.getFood(gameState)
        halfway = foodGrid.width / 2
        conf = game.Configuration(pos, game.Directions.STOP)

        # FOR THE WEIRD ERROR CHECK
        if gameState.isOnRedTeam(self.index):
            if pos[0] >= halfway:
                isPacman = False
            else:
                isPacman = True
        else:
            if pos[0] >= halfway:
                isPacman = True
            else:
                isPacman = False
        gameState.data.agentStates[enemyIndex] = game.AgentState(conf, isPacman)

        return gameState
예제 #3
0
 def chooseAction(self, gameState):
     # Choose an action based on evaluated values of next possible states 
     myPosition = gameState.getAgentPosition(self.index)
     observedDistances = gameState.getAgentDistances()
     newState = gameState.deepCopy()
     for enemy in self.enemyIndices:
         enemyPosition = gameState.getAgentPosition(enemy)
         if enemyPosition:
             newDistribution = util.Counter()
             newDistribution[enemyPosition] = 1.0
             self.enemyPositionitionDistribution[enemy] = newDistribution
         else:
             self.forwardStep(enemy, gameState)
             self.observe(enemy, observedDistances, gameState)
     for enemy in self.enemyIndices:
         probablePosition = self.enemyPositionitionDistribution[enemy].argMax()
         conf = game.Configuration(probablePosition, Directions.STOP)
         newState.data.agentStates[enemy] = game.AgentState(conf, newState.isRed(probablePosition) != newState.isOnRedTeam(enemy))
     action = self.getMaxEvaluatedAction(newState, depth=2)[1]
     return action
예제 #4
0
    def chooseAction(self, gameState):
        global trackers
        t1 = time.time()
        self.enemypos[0] = trackers[0].trackPosition(self, gameState)
        self.enemypos[1] = trackers[1].trackPosition(self, gameState)
        simulationState = gameState.deepCopy()

        #print (self.enemypos)

        for i in range(len(self.enemypos)):
            conf = game.Configuration(self.enemypos[i], Directions.STOP)
            simulationState.data.agentStates[
                self.enemies[i]] = game.AgentState(
                    conf,
                    simulationState.isRed(self.enemypos[i]) !=
                    simulationState.isOnRedTeam(self.enemies[i]))

        move = self.expecti_maximize(simulationState, 3)[1]
        t2 = time.time()
        return move
예제 #5
0
    def newAction(self, gameState):
        """
        Base choose action. In this function we begin by updating our beliefs
        and elapsing time for the beliefs. We also show our beliefs on the
        screen by using the provided debugging function.
        """

        myPos = gameState.getAgentPosition(self.index)
        noisyDistances = gameState.getAgentDistances()
        newState = gameState.deepCopy()

        for enemy in self.enemies:
            enemyPos = gameState.getAgentPosition(enemy)
            if enemyPos:
                new_belief = util.Counter()
                new_belief[enemyPos] = 1.0
                self.beliefs[enemy] = new_belief
            else:
                self.elapseTime(enemy, gameState)
                self.observe(enemy, noisyDistances, gameState)

        #  self.displayDistributionsOverPositions(self.beliefs.values())

        # Using the most probable position update the game state.
        # In order to use expectimax we need to be able to have a set
        # position where the enemy is starting out.
        for enemy in self.enemies:
            probablePosition = self.beliefs[enemy].argMax()
            conf = game.Configuration(probablePosition, Directions.STOP)
            newState.data.agentStates[enemy] = game.AgentState(
                conf,
                newState.isRed(probablePosition) !=
                newState.isOnRedTeam(enemy))

        # Do expectimax to depth 2 and get the best action to use. This is
        # the furthest out that we could do this because of time constraints.
        action = self.maxFunction(newState, depth=2)[1]

        return action
예제 #6
0
파일: myTeam.py 프로젝트: amatmv/PacmanAI
    def chooseAction(self, gameState):
        """
        Parent action selection method.
        This method update agent's map beliefs.
        @param: gameState string map of game.
        @returns str action with selected move direction ex. North
        """

        # Distàncies als sons escoltats. Tenim una llista de 4 enters
        # que representen les distàncies
        noisyDistances = gameState.getAgentDistances()

        newState = gameState.deepCopy()

        # For any enemy agent tries to get visual contact of enemy
        for enemy in self.enemies:
            # None if no visual contact else enemy position tuple
            enemyPos = gameState.getAgentPosition(enemy)
            if enemyPos:
                new_belief = util.Counter()
                new_belief[enemyPos] = 1.0
                self.beliefs[enemy] = new_belief
            else:
                # If not visual contact observe and move
                self.updateBeliefs(enemy)
                self.observe(enemy, noisyDistances, gameState)

        for enemy in self.enemies:
            prob_pos = self.beliefs[enemy].argMax()
            conf = game.Configuration(prob_pos, Directions.STOP)
            newState.data.agentStates[enemy] = game.AgentState(conf, newState.isRed(prob_pos) != newState.isOnRedTeam(
              enemy))

        # TODO imp effi
        action = self.maxFunction(newState, depth=EXPECTIMAX_DEPTH)[1]

        return action
예제 #7
0
def setGhostPositions(gameState, ghostPositions):
    "Sets the position of all ghosts to the values in ghostPositionTuple."
    for index, pos in enumerate(ghostPositions):
        conf = game.Configuration(pos, game.Directions.STOP)
        gameState.data.agentStates[index + 1] = game.AgentState(conf, False)
    return gameState
예제 #8
0
 def setGhostPositions(self, gameState, ghostPositions):
     index, pos = ghostPositions
     conf = game.Configuration(pos, game.Directions.STOP)
     gameState.data.agentStates[index] = game.AgentState(conf, False)
     return gameState
예제 #9
0
파일: myTeam.py 프로젝트: camilodoa/ai-ctf
 def setGhostPosition(self, state, ghostPosition, oppIndex):
     "Sets the position of all ghosts to the values in ghostPositionTuple."
     conf = game.Configuration(ghostPosition, game.Directions.STOP)
     state.data.agentStates[oppIndex] = game.AgentState(conf, False)
     return state
예제 #10
0
  def chooseAction(self, gameState):
    self.startTime = time.clock()
    self.updateParticleFilters(gameState)
    # Get list of actions, and consider more actions if it's urgent
    actions = self.getGoodLegalActions(gameState, self.index)

    # Check for a death, setting defensive ghosts to attack mode if so
    #for i in self.enemyIndices:
    #  if (self.enemyLocFilters[i].getMostLikelyPos() == gameState.getInitialAgentPosition(i)
    #        and gameState.data.timeleft < 1147):
    #    print('EEOEE')

    # Check if the enemy is observable, changing strategy if necessary
    for i in self.enemyIndices:
      self.seenEnemies = []
      myPos = gameState.getAgentPosition(self.index)
      exactPosition = gameState.getAgentPosition(i)
      newStrat = None
      if exactPosition is not None: #and self.distancer.getDistance(myPos, exactPosition) < 4:
        self.seenEnemies.append(i)
        newStrat = self.evalStratChange(gameState, i)
      else:
        newStrat = "Attack"
    if newStrat is not None:
      self.strategies[self.index] = newStrat

    # Remove old food
    foodLocs = self.getFood(gameState).asList()
    for _, l in self.foodLists.iteritems():
      missingFood = [l1 for l1 in l if l1 not in foodLocs]
      for food in missingFood:
        l.remove(food)

    dist = util.Counter()
    l = 'Bottom' if self.isBottom else 'Top'
    for food in self.foodLists[l]: 
      dist[food] += 1
    dist.normalize()
    self.displayDistributionsOverPositions([dist])

    # Get current target if chasing
    if self.strategies[self.index] == 'Chase':
      minDistance = None
      closestEnemy = None
      for e in self.seenEnemies:
        distance = self.distancer.getDistance(gameState.getAgentPosition(self.index), 
                    gameState.getAgentPosition(e))
        if not closestEnemy or minDistance > distance:
          minDistance = distance
          closestEnemy = e
      if closestEnemy is not None:
        self.currentTarget = self.getTarget(gameState, closestEnemy)

      dist = util.Counter()
      dist[self.currentTarget] += 1
      dist.normalize()
      self.displayDistributionsOverPositions([dist])

    # If there's only one action, just take it
    if len(actions) is 1:
      return actions[0]

    # Create simulated game state based on estimated enemy locations
    simState = gameState.deepCopy()
    for i in self.enemyIndices:
      if gameState.getAgentPosition(i) is None:
        mostLikelyPos = self.enemyLocFilters[i].getMostLikelyPos()
        conf = game.Configuration(mostLikelyPos, game.Directions.STOP)
        simState.data.agentStates[i] = game.AgentState(conf, False)


    bestAction = random.choice(actions)
    currBestAction = self.getBestAction(simState, 2, actions)
    bestAction = currBestAction
    return bestAction
예제 #11
0
def setEnemyPositions(gameState, ghostPositions, enemyIndices):
  "Sets the position of all ghosts to the values in ghostPositionTuple."
  for i in range(len(ghostPositions)):
    conf = game.Configuration(ghostPositions[i], game.Directions.STOP)
    gameState.data.agentStates[enemyIndices[i]] = game.AgentState(conf, False)
  return gameState  
예제 #12
0
 def setGhostPosition(self, gameState, ghostPosition):
     conf = game.Configuration(ghostPosition, game.Directions.STOP)
     gameState.data.agentStates[self.index] = game.AgentState(conf, False)
     return gameState
예제 #13
0
    def chooseAction(self, gameState):
        self.startTime = time.clock()
        self.myPos = gameState.getAgentPosition(self.index)
        self.updateParticleFilters(gameState)

        # Remove old food from personal lists
        isFoodRemoved = False
        for _, l in self.foodLists.iteritems():
            missingFood = [
                l1 for l1 in l if l1 not in self.getFood(gameState).asList()
            ]
            for food in missingFood:
                l.remove(food)
                isFoodRemoved = True

        # Compute position of the nearest food
        if len(self.foodList) > 3:
            self.closestFood = self.getClosestFood(self.foodList,
                                                   self.myPos)[0]
        else:
            self.closestFood = self.getClosestFood(
                self.getFood(gameState).asList(), self.myPos)[0]

        # Check if the enemy is observable
        self.seenEnemies = []
        for i in self.enemyIndices:
            exactPosition = gameState.getAgentPosition(i)
            if exactPosition is not None:
                self.seenEnemies.append(i)

        # Reset strategy to attack
        if gameState.getAgentState(self.index).isPacman or \
            self.myPos == gameState.getInitialAgentPosition(self.index) or (not self.seenEnemies and
            (self.strategies[self.index] == "Chase" or self.strategies[self.index] == "Scatter")):
            self.strategies[self.index] = "Attack"

        # Choose whether or not to change strategy based on seen enemies
        if self.seenEnemies:
            newStrat = None
            closestEnemyDist = None
            for e in self.seenEnemies:
                enemyPos = gameState.getAgentPosition(e)
                dist = self.distancer.getDistance(self.myPos, enemyPos)
                if not closestEnemyDist or dist < closestEnemyDist:
                    self.closestEnemy = e
                    closestEnemyDist = dist
            newStrat = self.evalStratChange(gameState, self.closestEnemy)
            if newStrat is not None:
                self.strategies[self.index] = newStrat

        # Get possible actions
        if isFoodRemoved or not self.seenEnemies:
            actions = self.getGoodLegalActions(gameState, self.index, True)
        else:
            actions = self.getGoodLegalActions(gameState, self.index)
        ''' debugging pellets  
    dist = util.Counter()
    l = 'Bottom' if self.isBottom else 'Top'
    for food in self.foodLists[l]: 
      dist[food] += 1
    dist.normalize()
    self.displayDistributionsOverPositions([dist])
    '''
        ''' debugging target
    dist = util.Counter()
    dist[self.currentTarget] += 1
    dist.normalize()
    self.displayDistributionsOverPositions([dist])
    '''

        # If there's only one action, just take it
        if len(actions) is 1:
            return actions[0]

        # Create simulated game state based on estimated enemy locations
        simState = gameState.deepCopy()
        for i in self.enemyIndices:
            if gameState.getAgentPosition(i) is None:
                mostLikelyPos = self.enemyLocFilters[i].getMostLikelyPos()
                conf = game.Configuration(mostLikelyPos, game.Directions.STOP)
                simState.data.agentStates[i] = game.AgentState(conf, False)

        bestAction = random.choice(actions)
        currBestAction = self.getBestAction(simState, 2, actions)
        bestAction = currBestAction
        return bestAction