class GreedyBustersAgent(BustersAgent):
    "An agent that charges the closest ghost."

    def registerInitialState(self, gameState):
        "Pre-computes the distance between every two points."
        BustersAgent.registerInitialState(self, gameState)
        self.distancer = Distancer(gameState.data.layout, False)

    def chooseAction(self, gameState):
        """
        First computes the most likely position of each ghost that has
        not yet been captured, then chooses an action that brings
        Pacman closest to the closest ghost (according to mazeDistance!).
        """
        pacmanPosition = gameState.getPacmanPosition()
        legal = [a for a in gameState.getLegalPacmanActions()]
        livingGhosts = gameState.getLivingGhosts()
        livingGhostPositionDistributions = \
            [beliefs for i, beliefs in enumerate(self.ghostBeliefs)
             if livingGhosts[i+1]]

        distances = inference.DiscreteDistribution()
        for pos in [x.argMax() for x in livingGhostPositionDistributions]:
            distances[pos] = -self.distancer.getDistance(pacmanPosition, pos)
        targetPos = distances.argMax()

        distances = inference.DiscreteDistribution()
        for action in legal:
            successorPosition = Actions.getSuccessor(pacmanPosition, action)
            distances[action] = -self.distancer.getDistance(targetPos, \
                                    successorPosition)
        return distances.argMax()
Exemple #2
0
class GreedyBustersAgent(BustersAgent):
    "An agent that charges the closest ghost."

    def registerInitialState(self, gameState):
        "Pre-computes the distance between every two points."
        BustersAgent.registerInitialState(self, gameState)
        self.distancer = Distancer(gameState.data.layout, False)

    def chooseAction(self, gameState):
        """
        First computes the most likely position of each ghost that has
        not yet been captured, then chooses an action that brings
        Pacman closest to the closest ghost (according to mazeDistance!).
        """
        pacmanPosition = gameState.getPacmanPosition()
        legal = [a for a in gameState.getLegalPacmanActions()]
        livingGhosts = gameState.getLivingGhosts()
        livingGhostPositionDistributions = \
            [beliefs for i, beliefs in enumerate(self.ghostBeliefs)
             if livingGhosts[i+1]]
        "*** YOUR CODE HERE ***"
        maxBeliefPos = [max(l, key = l.get) for l in livingGhostPositionDistributions]
        closestGhost = min(maxBeliefPos, key = lambda pos: self.distancer.getDistance(pacmanPosition, pos)) 
        successorPositions = [(Actions.getSuccessor(pacmanPosition, action), action) for action in legal]
        closestAction = min([(self.distancer.getDistance(closestGhost, pos), action) for pos, action in successorPositions])[1]
        return closestAction
Exemple #3
0
class GreedyBustersAgent(BustersAgent):
    "An agent that charges the closest ghost."

    def registerInitialState(self, gameState):
        "Pre-computes the distance between every two points."
        BustersAgent.registerInitialState(self, gameState)
        self.distancer = Distancer(gameState.data.layout, False)

    def chooseAction(self, gameState):
        """
        First computes the most likely position of each ghost that has
        not yet been captured, then chooses an action that brings
        Pacman closer to the closest ghost (according to mazeDistance!).

        To find the mazeDistance between any two positions, use:
          self.distancer.getDistance(pos1, pos2)

        To find the successor position of a position after an action:
          successorPosition = Actions.getSuccessor(position, action)

        livingGhostPositionDistributions, defined below, is a list of
        util.Counter objects equal to the position belief
        distributions for each of the ghosts that are still alive.  It
        is defined based on (these are implementation details about
        which you need not be concerned):

          1) gameState.getLivingGhosts(), a list of booleans, one for each
             agent, indicating whether or not the agent is alive.  Note
             that pacman is always agent 0, so the ghosts are agents 1,
             onwards (just as before).

          2) self.ghostBeliefs, the list of belief distributions for each
             of the ghosts (including ghosts that are not alive).  The
             indices into this list should be 1 less than indices into the
             gameState.getLivingGhosts() list.
        """
        pacmanPosition = gameState.getPacmanPosition()
        legal = [a for a in gameState.getLegalPacmanActions()]
        livingGhosts = gameState.getLivingGhosts()
        livingGhostPositionDistributions = \
            [beliefs for i, beliefs in enumerate(self.ghostBeliefs)
             if livingGhosts[i+1]]
        closestPos, minDistance = None, 100000
        for livingGhostPositionDistribution in livingGhostPositionDistributions:
            likelyPosition = self.findLikelyPosition(livingGhostPositionDistribution)
            distance = self.distancer.getDistance(pacmanPosition,likelyPosition)
            if distance < minDistance:
                minDistance, closestPos = distance, likelyPosition
        for action in legal:
            successorPosition =  Actions.getSuccessor(pacmanPosition, action)
            if self.distancer.getDistance(pacmanPosition,closestPos) > \
                self.distancer.getDistance(successorPosition,closestPos):
                return action

    def findLikelyPosition(self, livingGhostPositionDistribution):
        max_prob, max_pos = 0, None
        for pos, prob in livingGhostPositionDistribution.items():
            if prob > max_prob:
                max_prob, max_pos = prob, pos
        return max_pos
Exemple #4
0
class GreedyBustersAgent(BustersAgent):
  "An agent that charges the closest ghost."
  
  def registerInitialState(self, gameState):
    "Pre-computes the distance between every two points."
    BustersAgent.registerInitialState(self, gameState)
    self.distancer = Distancer(gameState.data.layout, False)
    
  def chooseAction(self, gameState):
    """
    First computes the most likely position of each ghost that 
    has not yet been captured, then chooses an action that brings 
    Pacman closer to the closest ghost (in maze distance!).
    
    To find the maze distance between any two positions, use:
    self.distancer.getDistance(pos1, pos2)
    
    To find the successor position of a position after an action:
    successorPosition = Actions.getSuccessor(position, action)
    
    livingGhostPositionDistributions, defined below, is a list of
    util.Counter objects equal to the position belief distributions
    for each of the ghosts that are still alive.  It is defined based
    on (these are implementation details about which you need not be
    concerned):

      1) gameState.getLivingGhosts(), a list of booleans, one for each
         agent, indicating whether or not the agent is alive.  Note
         that pacman is always agent 0, so the ghosts are agents 1,
         onwards (just as before).

      2) self.ghostBeliefs, the list of belief distributions for each
         of the ghosts (including ghosts that are not alive).  The
         indices into this list should be 1 less than indices into the
         gameState.getLivingGhosts() list.
     
    """
    pacmanPosition = gameState.getPacmanPosition()
    legal = [a for a in gameState.getLegalPacmanActions()]
    livingGhosts = gameState.getLivingGhosts()
    livingGhostPositionDistributions = [beliefs for i,beliefs
                                        in enumerate(self.ghostBeliefs)
                                        if livingGhosts[i+1]]
        
    "*** YOUR CODE HERE ***"
    closestDist = None
    closestGhostPos = None
    for ghost, dist in enumerate(livingGhostPositionDistributions):
        positions = dist.keys()
        positions.sort(cmp=lambda a, b: cmp(dist[a], dist[b]))
        pos = positions[-1]
        distance = self.distancer.getDistance(pacmanPosition, pos)     
        if distance < closestDist or closestDist is None:
            closestGhostPos = pos
            closestDist = distance

    actionDistances = [(a, self.distancer.getDistance(Actions.getSuccessor(pacmanPosition, a), closestGhostPos)) for a in legal]
    bestAction = min(actionDistances, key=lambda ad: ad[1])[0]
    
    return bestAction
class GreedyBustersAgent(BustersAgent):
    "An agent that charges the closest ghost."

    def registerInitialState(self, gameState):
        "Pre-computes the distance between every two points."
        BustersAgent.registerInitialState(self, gameState)
        self.distancer = Distancer(gameState.data.layout, False)

    def chooseAction(self, gameState):
        """
        First computes the most likely position of each ghost that has
        not yet been captured, then chooses an action that brings
        Pacman closest to the closest ghost (according to mazeDistance!).
        """
        pacmanPosition = gameState.getPacmanPosition()
        legal = [a for a in gameState.getLegalPacmanActions()]
        livingGhosts = gameState.getLivingGhosts()
        livingGhostPositionDistributions = \
            [beliefs for i, beliefs in enumerate(self.ghostBeliefs)
             if livingGhosts[i+1]]
        "*** YOUR CODE HERE ***"
        maxPositions = [belief.argMax() for belief in livingGhostPositionDistributions]
        dist = [self.distancer.getDistance(pos,pacmanPosition) for pos in maxPositions]
        minPos = maxPositions[dist.index(min(dist))]
        successorDistance = [self.distancer.getDistance(minPos,Actions.getSuccessor(pacmanPosition, action)) for action in legal]
        return legal[successorDistance.index(min(successorDistance))]
Exemple #6
0
class GreedyBustersAgent(BustersAgent):
    "An agent that charges the closest ghost."

    def registerInitialState(self, gameState):
        "Pre-computes the distance between every two points."
        BustersAgent.registerInitialState(self, gameState)
        self.distancer = Distancer(gameState.data.layout, False)

    def chooseAction(self, gameState):
        """
        First computes the most likely position of each ghost that has
        not yet been captured, then chooses an action that brings
        Pacman closer to the closest ghost (according to mazeDistance!).
        """
        pacmanPosition = gameState.getPacmanPosition()
        legal = [a for a in gameState.getLegalPacmanActions()]
        livingGhosts = gameState.getLivingGhosts()
        livingGhostPositionDistributions = \
            [beliefs for i, beliefs in enumerate(self.ghostBeliefs)
             if livingGhosts[i+1]]
        "*** YOUR CODE HERE ***"
        ghost_dist = {}
        for ghost_distribution in livingGhostPositionDistributions:
          most_likely_spot = max(ghost_distribution, key=ghost_distribution.get)
          ghost_dist[most_likely_spot] = self.distancer.getDistance(pacmanPosition, most_likely_spot)

        closest_ghost = min(ghost_dist, key=ghost_dist.get)

        possible_actions = {}
        for action in gameState.getLegalActions():
          next_pacman_spot = Actions.getSuccessor(pacmanPosition, action)
          possible_actions[action] = self.distancer.getDistance(next_pacman_spot, closest_ghost)
        best_action = min(possible_actions, key=possible_actions.get)

        return best_action
class GreedyBustersAgent(BustersAgent):
  "An agent that charges the closest ghost."
  
  def registerInitialState(self, gameState):
    "Pre-computes the distance between every two points."
    BustersAgent.registerInitialState(self, gameState)
    self.distancer = Distancer(gameState.data.layout, False)
    
  def chooseAction(self, gameState):
    """
    First computes the most likely position of each ghost that 
    has not yet been captured, then chooses an action that brings 
    Pacman closer to the closest ghost (in maze distance!).
    
    To find the maze distance between any two positions, use:
    self.distancer.getDistance(pos1, pos2)
    
    To find the successor position of a position after an action:
    successorPosition = Actions.getSuccessor(position, action)
    
    livingGhostPositionDistributions, defined below, is a list of
    util.Counter objects equal to the position belief distributions
    for each of the ghosts that are still alive.  It is defined based
    on (these are implementation details about which you need not be
    concerned):

      1) gameState.getLivingGhosts(), a list of booleans, one for each
         agent, indicating whether or not the agent is alive.  Note
         that pacman is always agent 0, so the ghosts are agents 1,
         onwards (just as before).

      2) self.ghostBeliefs, the list of belief distributions for each
         of the ghosts (including ghosts that are not alive).  The
         indices into this list should be 1 less than indices into the
         gameState.getLivingGhosts() list.
     
    """
    pacmanPosition = gameState.getPacmanPosition()
    legal = [a for a in gameState.getLegalPacmanActions()]
    livingGhosts = gameState.getLivingGhosts()
    livingGhostPositionDistributions = [beliefs for i,beliefs
                                        in enumerate(self.ghostBeliefs)
                                        if livingGhosts[i+1]]
    "*** YOUR CODE HERE ***"
    closest=None
    min = float("inf")
    for x in livingGhostPositionDistributions:
      mostProb = x.argMax()
      dist = self.distancer.getDistance(pacmanPosition,mostProb)
      if dist<min:
        min=dist
        closest=mostProb
    action=None
    min = float("inf")
    for act in legal:
      dist=self.distancer.getDistance(Actions.getSuccessor(pacmanPosition,act),mostProb)
      if dist<min:
        min=dist
        action=act
    return action
class GreedyBustersAgent(BustersAgent):
    "An agent that charges the closest ghost."

    def registerInitialState(self, gameState):
        "Pre-computes the distance between every two points."
        BustersAgent.registerInitialState(self, gameState)
        self.distancer = Distancer(gameState.data.layout, False)

    def mydistance(x, y, pacmanPosition):
        ghostLocation = (x, y)
        return self.distancer.getDistance(pacmanPosition, ghostLocation)

    def chooseAction(self, gameState):
        """
    First computes the most likely position of each ghost that 
    has not yet been captured, then chooses an action that brings 
    Pacman closer to the closest ghost (in maze distance!).
    
    To find the maze distance between any two positions, use:
    self.distancer.getDistance(pos1, pos2)
    
    To find the successor position of a position after an action:
    successorPosition = Actions.getSuccessor(position, action)
    
    livingGhostPositionDistributions, defined below, is a list of
    util.Counter objects equal to the position belief distributions
    for each of the ghosts that are still alive.  It is defined based
    on (these are implementation details about which you need not be
    concerned):

      1) gameState.getLivingGhosts(), a list of booleans, one for each
         agent, indicating whether or not the agent is alive.  Note
         that pacman is always agent 0, so the ghosts are agents 1,
         onwards (just as before).

      2) self.ghostBeliefs, the list of belief distributions for each
         of the ghosts (including ghosts that are not alive).  The
         indices into this list should be 1 less than indices into the
         gameState.getLivingGhosts() list.
     
    """
        pacmanPosition = gameState.getPacmanPosition()
        legal = [a for a in gameState.getLegalPacmanActions()]
        livingGhosts = gameState.getLivingGhosts()
        livingGhostPositionDistributions = [
            beliefs for i, beliefs in enumerate(self.ghostBeliefs) if livingGhosts[i + 1]
        ]

        ghostPositions = [max(distribution, key=distribution.get) for distribution in livingGhostPositionDistributions]
        print "ghostPositions", ghostPositions
        closestGhostPosition = min(ghostPositions, key=lambda x: self.distancer.getDistance(pacmanPosition, x))
        print "closestGhostPosition", closestGhostPosition
        successorPositions = [Actions.getSuccessor(pacmanPosition, action) for action in legal]
        closestSuccessorPosition = min(
            successorPositions, key=lambda x: self.distancer.getDistance(closestGhostPosition, x)
        )
        move = legal[successorPositions.index(closestSuccessorPosition)]

        return move
class GreedyBustersAgent(BustersAgent):
    "An agent that charges the closest ghost."

    def registerInitialState(self, gameState):
        "Pre-computes the distance between every two points."
        BustersAgent.registerInitialState(self, gameState)
        self.distancer = Distancer(gameState.data.layout, False)

    def chooseAction(self, gameState):
        """
        First computes the most likely position of each ghost that has
        not yet been captured, then chooses an action that brings
        Pacman closer to the closest ghost (according to mazeDistance!).

        To find the mazeDistance between any two positions, use:
          self.distancer.getDistance(pos1, pos2)

        To find the successor position of a position after an action:
          successorPosition = Actions.getSuccessor(position, action)

        livingGhostPositionDistributions, defined below, is a list of
        util.Counter objects equal to the position belief
        distributions for each of the ghosts that are still alive.  It
        is defined based on (these are implementation details about
        which you need not be concerned):

          1) gameState.getLivingGhosts(), a list of booleans, one for each
             agent, indicating whether or not the agent is alive.  Note
             that pacman is always agent 0, so the ghosts are agents 1,
             onwards (just as before).

          2) self.ghostBeliefs, the list of belief distributions for each
             of the ghosts (including ghosts that are not alive).  The
             indices into this list should be 1 less than indices into the
             gameState.getLivingGhosts() list.
        """
        pacmanPosition = gameState.getPacmanPosition()
        legal = [a for a in gameState.getLegalPacmanActions()]
        livingGhosts = gameState.getLivingGhosts()
        livingGhostPositionDistributions = \
            [beliefs for i, beliefs in enumerate(self.ghostBeliefs)
             if livingGhosts[i+1]]
        "*** YOUR CODE HERE ***"
        # get max position for every ghost
        def getMaxDictKey(d):
            maxKeys = [k for k in d.keys() if d[k] == max(d.values())]
            return maxKeys[0]

        maxPos = [getMaxDictKey(k) for k in livingGhostPositionDistributions]
        distances = map(lambda x: self.distancer.getDistance(x, pacmanPosition), maxPos)
        minDist = min(distances)
        goalPos = maxPos[distances.index(minDist)]
        for action in legal:
            if self.distancer.getDistance(Actions.getSuccessor(pacmanPosition, action), goalPos) < minDist:
                return action
        # should not end here!
        return legal[0]
class GreedyBustersAgent(BustersAgent):
    "An agent that charges the closest ghost."

    def registerInitialState(self, gameState):
        "Pre-computes the distance between every two points."
        BustersAgent.registerInitialState(self, gameState)
        self.distancer = Distancer(gameState.data.layout, False)

    def chooseAction(self, gameState):
        """
        First computes the most likely position of each ghost that has
        not yet been captured, then chooses an action that brings
        Pacman closer to the closest ghost (according to mazeDistance!).
        To find the mazeDistance between any two positions, use:
          self.distancer.getDistance(pos1, pos2)
        To find the successor position of a position after an action:
          successorPosition = Actions.getSuccessor(position, action)
        livingGhostPositionDistributions, defined below, is a list of
        util.Counter objects equal to the position belief
        distributions for each of the ghosts that are still alive.  It
        is defined based on (these are implementation details about
        which you need not be concerned):
          1) gameState.getLivingGhosts(), a list of booleans, one for each
             agent, indicating whether or not the agent is alive.  Note
             that pacman is always agent 0, so the ghosts are agents 1,
             onwards (just as before).
          2) self.ghostBeliefs, the list of belief distributions for each
             of the ghosts (including ghosts that are not alive).  The
             indices into this list should be 1 less than indices into the
             gameState.getLivingGhosts() list.
        """
        pacmanPosition = gameState.getPacmanPosition()
        legal = [a for a in gameState.getLegalPacmanActions()]
        livingGhosts = gameState.getLivingGhosts()
        livingGhostPositionDistributions = \
            [beliefs for i, beliefs in enumerate(self.ghostBeliefs)
             if livingGhosts[i+1]]
        "*** YOUR CODE HERE ***"

        mostPossiblePosition = lambda ghostPosDist: max(ghostPosDist.items(), key=lambda x: x[1])[0]
        mostPossiblePositions = map(mostPossiblePosition, livingGhostPositionDistributions)
        distToPacman = lambda x: self.distancer.getDistance(pacmanPosition, x)
        closestGhostPos = min(mostPossiblePositions, key=distToPacman)
        minDist = self.distancer.getDistance(pacmanPosition, closestGhostPos)
        delta_pos = set()
        for action in legal:
            successorPosition = Actions.getSuccessor(pacmanPosition, action)
            delta = self.distancer.getDistance(closestGhostPos, successorPosition)-minDist  # the more negative the better
            delta_pos.add((delta, action))
        return min(delta_pos)[1]
        # util.raiseNotDefined()
Exemple #11
0
 def observe(self, gameState, selfIndex, agentIndex):
   noisyAgentDistance = gameState.getAgentDistances()[agentIndex]
   
   distribution = Counter()
   pacmanPosition = gameState.getAgentState(selfIndex).getPosition()
   distancer = Distancer(gameState.data.layout)
   for x in xrange(gameState.data.layout.width):
     for y in xrange(gameState.data.layout.height):
       if not gameState.hasWall(x, y):
         position = (x, y)
         trueDistance = distancer.getDistance(position, pacmanPosition)
         distribution[position] = gameState.getDistanceProb(trueDistance, noisyAgentDistance) * self.distributions[agentIndex][position]
   distribution.normalize()
   self.distributions[agentIndex] = distribution
Exemple #12
0
class GreedyBustersAgent(BustersAgent):
    "An agent that charges the closest ghost."

    def registerInitialState(self, gameState):
        "Pre-computes the distance between every two points."
        BustersAgent.registerInitialState(self, gameState)
        self.distancer = Distancer(gameState.data.layout, False)

    def chooseAction(self, gameState):
        """
        First computes the most likely position of each ghost that has
        not yet been captured, then chooses an action that brings
        Pacman closer to the closest ghost (according to mazeDistance!).

        To find the mazeDistance between any two positions, use:
          self.distancer.getDistance(pos1, pos2)

        To find the successor position of a position after an action:
          successorPosition = Actions.getSuccessor(position, action)

        livingGhostPositionDistributions, defined below, is a list of
        util.Counter objects equal to the position belief
        distributions for each of the ghosts that are still alive.  It
        is defined based on (these are implementation details about
        which you need not be concerned):

          1) gameState.getLivingGhosts(), a list of booleans, one for each
             agent, indicating whether or not the agent is alive.  Note
             that pacman is always agent 0, so the ghosts are agents 1,
             onwards (just as before).

          2) self.ghostBeliefs, the list of belief distributions for each
             of the ghosts (including ghosts that are not alive).  The
             indices into this list should be 1 less than indices into the
             gameState.getLivingGhosts() list.
        """
        pacmanPosition = gameState.getPacmanPosition()
        legal = [a for a in gameState.getLegalPacmanActions()]
        livingGhosts = gameState.getLivingGhosts()
        livingGhostPositionDistributions = \
            [beliefs for i, beliefs in enumerate(self.ghostBeliefs)
             if livingGhosts[i+1]]
        gohstPosition = [dist.argMax() for dist in livingGhostPositionDistributions]
        gohstDistance = [self.distancer.getDistance(pacmanPosition, pos) for pos in gohstPosition]
        closestGhostIndex = gohstDistance.index(min(gohstDistance))
        allPacPotentialPos = [Actions.getSuccessor(pacmanPosition, action) for action in legal]
        distanceToClosestCostAfterAllActions = \
            [self.distancer.getDistance(newPacPost, gohstPosition[closestGhostIndex])\
             for newPacPost in allPacPotentialPos]
        return legal[distanceToClosestCostAfterAllActions.index(min(distanceToClosestCostAfterAllActions))]
Exemple #13
0
class GreedyBustersAgent(BustersAgent):
  "An agent that charges the closest ghost."
  
  def registerInitialState(self, gameState):
    "Pre-computes the distance between every two points."
    BustersAgent.registerInitialState(self, gameState)
    self.distancer = Distancer(gameState.data.layout, False)
    
  def chooseAction(self, gameState):
    """
    First computes the most likely position of each ghost that 
    has not yet been captured, then chooses an action that brings 
    Pacman closer to the closest ghost (in maze distance!).
    
    To find the maze distance between any two positions, use:
    self.distancer.getDistance(pos1, pos2)
    
    To find the successor position of a position after an action:
    successorPosition = Actions.getSuccessor(position, action)
    
    livingGhostPositionDistributions, defined below, is a list of
    util.Counter objects equal to the position belief distributions
    for each of the ghosts that are still alive.  It is defined based
    on (these are implementation details about which you need not be
    concerned):

      1) gameState.getLivingGhosts(), a list of booleans, one for each
         agent, indicating whether or not the agent is alive.  Note
         that pacman is always agent 0, so the ghosts are agents 1,
         onwards (just as before).

      2) self.ghostBeliefs, the list of belief distributions for each
         of the ghosts (including ghosts that are not alive).  The
         indices into this list should be 1 less than indices into the
         gameState.getLivingGhosts() list.
     
    """
    pacman = gameState.getPacmanPosition()
    legal  = [a for a in gameState.getLegalPacmanActions() if a != Directions.STOP]
    ghosts = gameState.getLivingGhosts()
    distributions = [b for i, b in enumerate(self.ghostBeliefs) if ghosts[i + 1]]

    successor = [(Actions.getSuccessor(pacman, a), a) for a in legal]
    positions = [max(d.items(), key=lambda x:x[1])[0] for d in distributions]
    distances = [(self.distancer.getDistance(pacman, d), d) for d in positions]
    choice    = min(distances)[1]   # min distance between current and any ghost
    actions   = [(self.distancer.getDistance(choice, s[0]), s[1]) for s in successor]
    action    = min(actions)[1]     # the action that gets us closer to the close ghost
    return action
class GreedyBustersAgent(BustersAgent):
  "An agent that charges the closest ghost."
  
  def registerInitialState(self, gameState):
    "Pre-computes the distance between every two points."
    BustersAgent.registerInitialState(self, gameState)
    self.distancer = Distancer(gameState.data.layout, False)
    
  def chooseAction(self, gameState):
    """
    First computes the most likely position of each ghost that 
    has not yet been captured, then chooses an action that brings 
    Pacman closer to the closest ghost (in maze distance!).
    
    To find the maze distance between any two positions, use:
    self.distancer.getDistance(pos1, pos2)
    
    To find the successor position of a position after an action:
    successorPosition = Actions.getSuccessor(position, action)
    
    To get a list of booleans, one for each agent, indicating whether
    or not the agent is alive, use gameState.getLivingGhosts()
    Note that pacman is always agent 0, so the ghosts are agents 1, 
    onwards (just as before).
     
    You may remove Directions.STOP from the list of available actions.
    """
    
    mostLikelyGhostPositions = util.Counter();
    for i in range(0,len(gameState.getLivingGhosts())):
      if (gameState.getLivingGhosts()[i]):
         mostLikelyGhostPositions[i] = self.inferenceModules[i-1].getBeliefDistribution().argMax()
    
    minDist = 100000000
    minIndex = 1
    pacmanPosition = gameState.getPacmanPosition()
    for ghostIndex in mostLikelyGhostPositions:
      dist = self.distancer.getDistance(pacmanPosition,mostLikelyGhostPositions[ghostIndex])
      if (min(dist,minDist) != minDist):
        minDist = min(dist,minDist);
        minIndex = ghostIndex;

    legal = [a for a in gameState.getLegalPacmanActions() if a != Directions.STOP]
    legalMoves = util.Counter();
    minGhostPos = mostLikelyGhostPositions[minIndex]
    for action in legal:
       legalMoves[action] = -1*self.distancer.getDistance(minGhostPos,Actions.getSuccessor(pacmanPosition,action))
    return legalMoves.argMax()
class GreedyBustersAgent(BustersAgent):
  "An agent that charges the closest ghost."
  
  def registerInitialState(self, gameState):
    "Pre-computes the distance between every two points."
    BustersAgent.registerInitialState(self, gameState)
    self.distancer = Distancer(gameState.data.layout, False)
    
  def chooseAction(self, gameState):
    """
    First computes the most likely position of each ghost that 
    has not yet been captured, then chooses an action that brings 
    Pacman closer to the closest ghost (in maze distance!).
    
    To find the maze distance between any two positions, use:
    self.distancer.getDistance(pos1, pos2)
    
    To find the successor position of a position after an action:
    successorPosition = Actions.getSuccessor(position, action)
    
    livingGhostPositionDistributions, defined below, is a list of
    util.Counter objects equal to the position belief distributions
    for each of the ghosts that are still alive.  It is defined based
    on (these are implementation details about which you need not be
    concerned):

      1) gameState.getLivingGhosts(), a list of booleans, one for each
         agent, indicating whether or not the agent is alive.  Note
         that pacman is always agent 0, so the ghosts are agents 1,
         onwards (just as before).

      2) self.ghostBeliefs, the list of belief distributions for each
         of the ghosts (including ghosts that are not alive).  The
         indices into this list should be 1 less than indices into the
         gameState.getLivingGhosts() list.
     
    You may remove Directions.STOP from the list of available actions.
    """
    pacmanPosition = gameState.getPacmanPosition()
    legal = [a for a in gameState.getLegalPacmanActions() if a != Directions.STOP]
    livingGhosts = gameState.getLivingGhosts()
    livingGhostPositionDistributions = [beliefs for i,beliefs
                                        in enumerate(self.ghostBeliefs)
                                        if livingGhosts[i+1]]
    "*** YOUR CODE HERE ***"
    minDist = 99999999
    action = 'Stop'

    for la in legal:
      if la == 'North': newPacPos = (pacmanPosition[0],pacmanPosition[1]+1)
      elif la == 'South': newPacPos = (pacmanPosition[0],pacmanPosition[1]-1)
      elif la == 'West': newPacPos = (pacmanPosition[0]-1,pacmanPosition[1])
      elif la == 'East': newPacPos = (pacmanPosition[0]+1,pacmanPosition[1])
      else: print "TESTE"
      
      for i in range(0,len(livingGhostPositionDistributions)):
        ghostPos = livingGhostPositionDistributions[i].argMax()
        minDist, action = min((minDist,action), (self.distancer.getDistance(newPacPos,ghostPos),la))

    return action
Exemple #16
0
class GreedyBustersAgent(BustersAgent):
    "An agent that charges the closest ghost."

    def registerInitialState(self, gameState):
        "Pre-computes the distance between every two points."
        BustersAgent.registerInitialState(self, gameState)
        self.distancer = Distancer(gameState.data.layout, False)

    def chooseAction(self, gameState):
        """
        First computes the most likely position of each ghost that has
        not yet been captured, then chooses an action that brings
        Pacman closer to the closest ghost (according to mazeDistance!).

        To find the mazeDistance between any two positions, use:
          self.distancer.getDistance(pos1, pos2)

        To find the successor position of a position after an action:
          successorPosition = Actions.getSuccessor(position, action)

        livingGhostPositionDistributions, defined below, is a list of
        util.Counter objects equal to the position belief
        distributions for each of the ghosts that are still alive.  It
        is defined based on (these are implementation details about
        which you need not be concerned):

          1) gameState.getLivingGhosts(), a list of booleans, one for each
             agent, indicating whether or not the agent is alive.  Note
             that pacman is always agent 0, so the ghosts are agents 1,
             onwards (just as before).

          2) self.ghostBeliefs, the list of belief distributions for each
             of the ghosts (including ghosts that are not alive).  The
             indices into this list should be 1 less than indices into the
             gameState.getLivingGhosts() list.
        """
        pacmanPosition = gameState.getPacmanPosition()
        legal = [a for a in gameState.getLegalPacmanActions()]
        livingGhosts = gameState.getLivingGhosts()
        livingGhostPositionDistributions = \
            [beliefs for i, beliefs in enumerate(self.ghostBeliefs)
             if livingGhosts[i+1]]
        
        # Get the position of the most likely ghost
        max_prob_pos = []
        for ghost_dist in livingGhostPositionDistributions:
            best_pos = max(ghost_dist, key = ghost_dist.get)
            max_prob_pos.append((best_pos,ghost_dist[best_pos]))
        best_pos = max(max_prob_pos, key=lambda x : x[1])[0]

        # Find the best move
        best_dist = float("inf")
        best_move = None
        for move in legal:
            dist = self.distancer.getDistance(best_pos, Actions.getSuccessor(pacmanPosition, move))
            if dist < best_dist or not best_move:
                best_move = move 
                best_dist = dist
        
        return best_move
class GreedyBustersAgent(BustersAgent):
    "An agent that charges the closest ghost."

    def registerInitialState(self, gameState):
        "Pre-computes the distance between every two points."
        BustersAgent.registerInitialState(self, gameState)
        self.distancer = Distancer(gameState.data.layout, False)

    def chooseAction(self, gameState):
        """
        First computes the most likely position of each ghost that has
        not yet been captured, then chooses an action that brings
        Pacman closer to the closest ghost (according to mazeDistance!).
        """
        pacmanPosition = gameState.getPacmanPosition()
        legal = [a for a in gameState.getLegalPacmanActions()]
        livingGhosts = gameState.getLivingGhosts()
        livingGhostPositionDistributions = \
            [beliefs for i, beliefs in enumerate(self.ghostBeliefs)
             if livingGhosts[i+1]]
        "*** YOUR CODE HERE ***"
        
        #initialize them to worst value possible
        closestDis, minMazeDis = float('inf'), float('inf')
        closestPos = None
        theAction = None

        #find the closest ghost
        for oneSetDistri in livingGhostPositionDistributions:
            biggestDistriPos = oneSetDistri.argMax()
            mazeDis = self.distancer.getDistance(pacmanPosition, biggestDistriPos)

            if (closestDis > mazeDis):
                closestDis = mazeDis
                closestPos = biggestDistriPos

        #greedy approach: always seek for action with closest distance to get to the ghost
        for action in legal:
            successorPosition = Actions.getSuccessor(pacmanPosition, action)
            mazeDis = self.distancer.getDistance(successorPosition, closestPos)

            if (minMazeDis > mazeDis):
                minMazeDis = mazeDis
                theAction = action

        return theAction
class GreedyBustersAgent(BustersAgent):
    "An agent that charges the closest ghost."

    def registerInitialState(self, gameState):
        "Pre-computes the distance between every two points."
        BustersAgent.registerInitialState(self, gameState)
        self.distancer = Distancer(gameState.data.layout, False)

    def chooseAction(self, gameState):
        """
        First computes the most likely position of each ghost that has
        not yet been captured, then chooses an action that brings
        Pacman closer to the closest ghost (according to mazeDistance!).

        To find the mazeDistance between any two positions, use:
          self.distancer.getDistance(pos1, pos2)

        To find the successor position of a position after an action:
          successorPosition = Actions.getSuccessor(position, action)

        livingGhostPositionDistributions, defined below, is a list of
        util.Counter objects equal to the position belief
        distributions for each of the ghosts that are still alive.  It
        is defined based on (these are implementation details about
        which you need not be concerned):

          1) gameState.getLivingGhosts(), a list of booleans, one for each
             agent, indicating whether or not the agent is alive.  Note
             that pacman is always agent 0, so the ghosts are agents 1,
             onwards (just as before).

          2) self.ghostBeliefs, the list of belief distributions for each
             of the ghosts (including ghosts that are not alive).  The
             indices into this list should be 1 less than indices into the
             gameState.getLivingGhosts() list.
        """
        pacmanPosition = gameState.getPacmanPosition()
        legal = [a for a in gameState.getLegalPacmanActions()]
        livingGhosts = gameState.getLivingGhosts()
        livingGhostPositionDistributions = \
            [beliefs for i, beliefs in enumerate(self.ghostBeliefs)
             if livingGhosts[i+1]]
        "*** YOUR CODE HERE ***"
	m_dist = lambda pos1, pos2 : self.distancer.getDistance(pos1, pos2)
	closest = None # closest ghost position
	best_action = None # best action position
	for distrib in livingGhostPositionDistributions:
	  mostLikelyPos = max(distrib.items(), \
	                      key=lambda item : item[1])[0]
	  dist = m_dist(pacmanPosition, mostLikelyPos)
	  if closest == None or dist < closest[1]:
	    closest = (mostLikelyPos, dist)
        for action in legal:	
	  succ_pos = Actions.getSuccessor(pacmanPosition, action)
	  dist = m_dist(succ_pos, closest[0])
	  if best_action == None or dist < best_action[2]:
	    best_action = (action, succ_pos, dist)
	return best_action[0]
Exemple #19
0
    def registerInitialState(self, gameState):
        "Initializes beliefs and inference modules"
        import __main__
        self.display = __main__._display
        self.distancer = Distancer(gameState.data.layout, False)

        for inference in self.inferenceModules: inference.initialize(gameState)
        self.ghostBeliefs = [inf.getBeliefDistribution() for inf in self.inferenceModules]
        self.firstMove = True
Exemple #20
0
class GreedyBustersAgent(BustersAgent):
  "An agent that charges the closest ghost."
  
  def registerInitialState(self, gameState):
    "Pre-computes the distance between every two points."
    BustersAgent.registerInitialState(self, gameState)
    self.distancer = Distancer(gameState.data.layout, False)
    
  def chooseAction(self, gameState):
    """
    First computes the most likely position of each ghost that 
    has not yet been captured, then chooses an action that brings 
    Pacman closer to the closest ghost (in maze distance!).
    
    To find the maze distance between any two positions, use:
    self.distancer.getDistance(pos1, pos2)
    
    To find the successor position of a position after an action:
    successorPosition = Actions.getSuccessor(position, action)
    
    livingGhostPositionDistributions, defined below, is a list of
    util.Counter objects equal to the position belief distributions
    for each of the ghosts that are still alive.  It is defined based
    on (these are implementation details about which you need not be
    concerned):

      1) gameState.getLivingGhosts(), a list of booleans, one for each
         agent, indicating whether or not the agent is alive.  Note
         that pacman is always agent 0, so the ghosts are agents 1,
         onwards (just as before).

      2) self.ghostBeliefs, the list of belief distributions for each
         of the ghosts (including ghosts that are not alive).  The
         indices into this list should be 1 less than indices into the
         gameState.getLivingGhosts() list.
     
    """
    pacmanPosition = gameState.getPacmanPosition()
    legal = [a for a in gameState.getLegalPacmanActions()]
    livingGhosts = gameState.getLivingGhosts()
    livingGhostPositionDistributions = [beliefs for i,beliefs
                                        in enumerate(self.ghostBeliefs)
                                        if livingGhosts[i+1]]
    "*** YOUR CODE HERE ***"
    bestaction = Directions.STOP
    bestdistance = None
    for idx, action in enumerate(legal):
        nextpos = Actions.getSuccessor(pacmanPosition, action)
        for idx, dist in enumerate(livingGhostPositionDistributions):
            items = dist.items()
            ghostpos = sorted(items, cmp=lambda a,b: cmp(b[1], a[1]))[0][0]
            dist = self.distancer.getDistance(nextpos, ghostpos)
            if ((bestdistance == None) or (dist < bestdistance)):
                bestdistance = dist
                bestaction = action

    return bestaction
class GreedyBustersAgent(BustersAgent):
    "An agent that charges the closest ghost."

    def registerInitialState(self, gameState):
        "Pre-computes the distance between every two points."
        BustersAgent.registerInitialState(self, gameState)
        self.distancer = Distancer(gameState.data.layout, False)

    def chooseAction(self, gameState):
        """
        First computes the most likely position of each ghost that has
        not yet been captured, then chooses an action that brings
        Pacman closer to the closest ghost (according to mazeDistance!).

        To find the mazeDistance between any two positions, use:
          self.distancer.getDistance(pos1, pos2)

        To find the successor position of a position after an action:
          successorPosition = Actions.getSuccessor(position, action)

        livingGhostPositionDistributions, defined below, is a list of
        util.Counter objects equal to the position belief
        distributions for each of the ghosts that are still alive.  It
        is defined based on (these are implementation details about
        which you need not be concerned):

          1) gameState.getLivingGhosts(), a list of booleans, one for each
             agent, indicating whether or not the agent is alive.  Note
             that pacman is always agent 0, so the ghosts are agents 1,
             onwards (just as before).

          2) self.ghostBeliefs, the list of belief distributions for each
             of the ghosts (including ghosts that are not alive).  The
             indices into this list should be 1 less than indices into the
             gameState.getLivingGhosts() list.
        """
        pacmanPosition = gameState.getPacmanPosition()
        legal = [a for a in gameState.getLegalPacmanActions()]
        livingGhosts = gameState.getLivingGhosts()
        livingGhostPositionDistributions = \
            [beliefs for i, beliefs in enumerate(self.ghostBeliefs)
             if livingGhosts[i+1]]
        "*** YOUR CODE HERE ***"
        localMax = []
        for belief in livingGhostPositionDistributions:
            localMax.append(belief.argMax())
        goalCoordinate, goalProbability = None, 0
        for index, coordinate in enumerate(localMax):
            if livingGhostPositionDistributions[index][coordinate] >= goalProbability:
                goalCoordinate, goalProbability = coordinate, livingGhostPositionDistributions[index][coordinate]

        tempActions = []
        for action in legal:
            nextLocation = Actions.getSuccessor(pacmanPosition, action)
            tempActions.append((self.distancer.getDistance(nextLocation, goalCoordinate), action))
        return min(tempActions)[1]
Exemple #22
0
class GreedyBustersAgent(BustersAgent):
    "An agent that charges the closest ghost."

    def registerInitialState(self, gameState):
        "Pre-computes the distance between every two points."
        BustersAgent.registerInitialState(self, gameState)
        self.distancer = Distancer(gameState.data.layout, False)

    def chooseAction(self, gameState):
        """
        First computes the most likely position of each ghost that has
        not yet been captured, then chooses an action that brings
        Pacman closer to the closest ghost (according to mazeDistance!).
        """
        pacmanPosition = gameState.getPacmanPosition()
        legal = [a for a in gameState.getLegalPacmanActions()]
        livingGhosts = gameState.getLivingGhosts()
        livingGhostPositionDistributions = \
            [beliefs for i, beliefs in enumerate(self.ghostBeliefs)
             if livingGhosts[i+1]]
        "*** YOUR CODE HERE ***"

        mostLikely = util.Counter()
        for beliefs in livingGhostPositionDistributions:
            likelyPos = beliefs.argMax()
            mostLikely[likelyPos] = beliefs[likelyPos]
        minDist = float('inf')
        closest = None
        for key in mostLikely:
            curDist = self.distancer.getDistance(pacmanPosition,key)
            if curDist < minDist:
                minDist = curDist
                closest = key
        ghostPos = closest
        minDist = self.distancer.getDistance(closest,pacmanPosition)
        bestMove = None
        for action in legal:
            successorPosition = Actions.getSuccessor(pacmanPosition, action)
            dist = self.distancer.getDistance(ghostPos,successorPosition)
            if dist < minDist:
                bestMove = action
                minDist = dist
        return bestMove
Exemple #23
0
class GreedyBustersAgent(BustersAgent):
    "An agent that charges the closest ghost."

    def registerInitialState(self, gameState):
        "Pre-computes the distance between every two points."
        BustersAgent.registerInitialState(self, gameState)
        self.distancer = Distancer(gameState.data.layout, False)

    def chooseAction(self, gameState):
        """
        First computes the most likely position of each ghost that has
        not yet been captured, then chooses an action that brings
        Pacman closer to the closest ghost (according to mazeDistance!).
        """
        pacmanPosition = gameState.getPacmanPosition()
        legal = [a for a in gameState.getLegalPacmanActions()]
        livingGhosts = gameState.getLivingGhosts()
        livingGhostPositionDistributions = \
            [beliefs for i, beliefs in enumerate(self.ghostBeliefs)
             if livingGhosts[i+1]]
        distance = float('inf')

        for ghost in livingGhostPositionDistributions:
            highest_prob = -1
            for key in ghost:
                if ghost[key] > highest_prob:
                    highest_prob = ghost[key]
                    ghost_loc = key
            new_dist = self.distancer.getDistance(pacmanPosition, ghost_loc)
            if new_dist < distance:
                distance = new_dist
                closest = ghost_loc

        distance = float('inf')
        successors = []
        for action in legal:
            successorPosition = Actions.getSuccessor(pacmanPosition, action)
            new_dist = self.distancer.getDistance(successorPosition, closest)
            if new_dist < distance:
                best_action = action
                distance = new_dist
        return best_action
Exemple #24
0
class GreedyBustersAgent(BustersAgent):
    "An agent that charges the closest ghost."

    def registerInitialState(self, gameState):
        "Pre-computes the distance between every two points."
        BustersAgent.registerInitialState(self, gameState)
        self.distancer = Distancer(gameState.data.layout, False)

    def chooseAction(self, gameState):
        """
        First computes the most likely position of each ghost that has
        not yet been captured, then chooses an action that brings
        Pacman closer to the closest ghost (according to mazeDistance!).

        To find the mazeDistance between any two positions, use:
          self.distancer.getDistance(pos1, pos2)

        To find the successor position of a position after an action:
          successorPosition = Actions.getSuccessor(position, action)

        livingGhostPositionDistributions, defined below, is a list of
        util.Counter objects equal to the position belief
        distributions for each of the ghosts that are still alive.  It
        is defined based on (these are implementation details about
        which you need not be concerned):

          1) gameState.getLivingGhosts(), a list of booleans, one for each
             agent, indicating whether or not the agent is alive.  Note
             that pacman is always agent 0, so the ghosts are agents 1,
             onwards (just as before).

          2) self.ghostBeliefs, the list of belief distributions for each
             of the ghosts (including ghosts that are not alive).  The
             indices into this list should be 1 less than indices into the
             gameState.getLivingGhosts() list.
        """
        pacmanPosition = gameState.getPacmanPosition()
        legal = [a for a in gameState.getLegalPacmanActions()]
        # print legal
        livingGhosts = gameState.getLivingGhosts()
        # noisyDistances = gameState.getNoisyGhostDistances()
        livingGhostPositionDistributions = \
            [beliefs for i, beliefs in enumerate(self.ghostBeliefs)
             if livingGhosts[i+1]]
        for inf_mod in self.inferenceModules:
            inf_mod.elapseTime(gameState)
        dist = util.Counter()
        for belief in livingGhostPositionDistributions:
            for p, val in belief.items():
                dist[p] += val
        max_prob = max(dist.items(), key = lambda x: x[1])[0]
        states = [(a, Actions.getSuccessor(pacmanPosition, a)) for a in legal]
        actions = [(a, self.distancer.getDistance(state, max_prob)) for a, state in states]
        return min(actions, key = lambda x: x[1])[0]
Exemple #25
0
class GreedyBustersAgent(BustersAgent):
    """An agent that charges the closest ghost."""

    def registerInitialState(self, gameState):
        """Pre-computes the distance between every two points."""
        BustersAgent.registerInitialState(self, gameState)
        self.distancer = Distancer(gameState.data.layout, False)

    def chooseAction(self, gameState):
        """
        First computes the most likely position of each ghost that has
        not yet been captured, then chooses an action that brings
        Pacman closer to the closest ghost (according to mazeDistance!).

        To find the mazeDistance between any two positions, use:
          self.distancer.getDistance(pos1, pos2)

        To find the successor position of a position after an action:
          successorPosition = Actions.getSuccessor(position, action)

        livingGhostPositionDistributions, defined below, is a list of
        util.Counter objects equal to the position belief
        distributions for each of the ghosts that are still alive.  It
        is defined based on (these are implementation details about
        which you need not be concerned):

          1) gameState.getLivingGhosts(), a list of booleans, one for each
             agent, indicating whether or not the agent is alive.  Note
             that pacman is always agent 0, so the ghosts are agents 1,
             onwards (just as before).

          2) self.ghostBeliefs, the list of belief distributions for each
             of the ghosts (including ghosts that are not alive).  The
             indices into this list should be 1 less than indices into the
             gameState.getLivingGhosts() list.
        """
        pacmanPosition = gameState.getPacmanPosition()
        actions = [a for a in gameState.getLegalPacmanActions()]
        livingGhosts = gameState.getLivingGhosts()
        livingGhostPositionDistributions = [beliefs for i, beliefs in enumerate(self.ghostBeliefs) if livingGhosts[i+1]]
        "*** YOUR CODE HERE ***"
        if len(actions) == 1:
            best_action = actions[0]
        else:
            n_ghosts = len(livingGhostPositionDistributions)
            positions = livingGhostPositionDistributions[0].iterkeys()
            positionDistributions = {p: max(livingGhostPositionDistributions[i][p] for i in xrange(n_ghosts)) for p in positions}
            target = max(positionDistributions.iterkeys(), key=positionDistributions.get)

            def d(action):
                next_position = Actions.getSuccessor(pacmanPosition, action)
                return self.distancer.getDistance(next_position, target)
            best_action = min(actions, key=d)
        return best_action
Exemple #26
0
class GreedyBustersAgent(BustersAgent):
    "An agent that charges the closest ghost."

    def registerInitialState(self, gameState):
        "Pre-computes the distance between every two points."
        BustersAgent.registerInitialState(self, gameState)
        self.distancer = Distancer(gameState.data.layout, False)

    def chooseAction(self, gameState):
        """
        First computes the most likely position of each ghost that has
        not yet been captured, then chooses an action that brings
        Pacman closer to the closest ghost (according to mazeDistance!).
        """
        pacmanPosition = gameState.getPacmanPosition()
        legal = [a for a in gameState.getLegalPacmanActions()]
        livingGhosts = gameState.getLivingGhosts()
        livingGhostPositionDistributions = [
            beliefs for i, beliefs in enumerate(self.ghostBeliefs) if livingGhosts[i + 1]
        ]
        "*** YOUR CODE HERE ***"
        minDistance = float("inf")

        for i in range(len(livingGhostPositionDistributions)):
            ghostPosition = livingGhostPositionDistributions[i].argMax()
            ghostDistance = self.distancer.getDistance(pacmanPosition, ghostPosition)
            if minDistance > ghostDistance:
                minDistance = ghostDistance
                closestPosition = ghostPosition

        minDistance = float("inf")

        for a in legal:
            successorPosition = Actions.getSuccessor(pacmanPosition, a)
            ghostDistance = self.distancer.getDistance(closestPosition, successorPosition)
            if minDistance > ghostDistance:
                minDistance = ghostDistance
                action = a

        return action
class GreedyBustersAgent(BustersAgent):
    "An agent that charges the closest ghost."

    def registerInitialState(self, gameState):
        "Pre-computes the distance between every two points."
        BustersAgent.registerInitialState(self, gameState)
        self.distancer = Distancer(gameState.data.layout, False)

    def chooseAction(self, gameState):
        """
        First computes the most likely position of each ghost that has
        not yet been captured, then chooses an action that brings
        Pacman closer to the closest ghost (according to mazeDistance!).
        """
        pacmanPosition = gameState.getPacmanPosition()
        legal = [a for a in gameState.getLegalPacmanActions()]
        livingGhosts = gameState.getLivingGhosts()
        livingGhostPositionDistributions = \
            [beliefs for i, beliefs in enumerate(self.ghostBeliefs)
             if livingGhosts[i+1]]

        #find closest position
        minDistance = None
        for dist in livingGhostPositionDistributions:
            mostLikelyPosition = dist.argMax()
            distance = self.distancer.getDistance(pacmanPosition, mostLikelyPosition)
            if not minDistance or distance < minDistance:
                minDistance = distance
                closestPosition = mostLikelyPosition

        #find best action
        minDistance = None
        for action in legal:
            newPos = Actions.getSuccessor(pacmanPosition, action)
            distanceAfterAction = self.distancer.getDistance(newPos, closestPosition)
            if not minDistance or distanceAfterAction < minDistance:
                minDistance = distanceAfterAction
                bestAction = action

        return bestAction
class GreedyBustersAgent(BustersAgent):
    "An agent that charges the closest ghost."

    def registerInitialState(self, gameState):
        "Pre-computes the distance between every two points."
        BustersAgent.registerInitialState(self, gameState)
        self.distancer = Distancer(gameState.data.layout, False)

    def chooseAction(self, gameState):
        """
        First computes the most likely position of each ghost that has
        not yet been captured, then chooses an action that brings
        Pacman closest to the closest ghost (according to mazeDistance!).
        """
        pacmanPosition = gameState.getPacmanPosition()
        legal = [a for a in gameState.getLegalPacmanActions()]
        livingGhosts = gameState.getLivingGhosts()
        livingGhostPositionDistributions = \
            [beliefs for i, beliefs in enumerate(self.ghostBeliefs)
             if livingGhosts[i+1]]
        "*** YOUR CODE HERE ***"
        distance = float("inf")
        ghost_position = None
        for dist in livingGhostPositionDistributions:
            temp_pos = dist.argMax()
            temp_distance = self.distancer.getDistance(pacmanPosition, temp_pos)
            if temp_distance < distance:
                distance = temp_distance
                ghost_position = temp_pos

        dist = float("inf")
        action = None
        for a in legal:
            succ_pos = Actions.getSuccessor(pacmanPosition, a)
            temp = self.distancer.getDistance(succ_pos, ghost_position)
            if temp < dist:
                dist = temp
                action = a
        return action
Exemple #29
0
class GreedyBustersAgent(BustersAgent):
    "An agent that charges the closest ghost."

    def registerInitialState(self, gameState):
        "Pre-computes the distance between every two points."
        BustersAgent.registerInitialState(self, gameState)
        self.distancer = Distancer(gameState.data.layout, False)

    def chooseAction(self, gameState):
        pacmanPosition = gameState.getPacmanPosition()
        legal = [a for a in gameState.getLegalPacmanActions()]
        livingGhosts = gameState.getLivingGhosts()
        livingGhostPositionDistributions = [beliefs for i,beliefs
                                            in enumerate(self.ghostBeliefs)
                                            if livingGhosts[i+1]]
        "*** YOUR CODE HERE ***"
	#Get the best distance to each ghost
	best_positions = []
	distances = []
	for ghost_distribution in livingGhostPositionDistributions:
		best_position = ghost_distribution.argMax()
		distances.append(self.distancer.getDistance(pacmanPosition,best_position)) 
		best_positions.append(best_position)
	
	idx = distances.index(min(distances))
	target = best_positions.pop(idx)
	
	actions = []	
	distances = []
	for action in legal:
		new_position = Actions.getSuccessor(pacmanPosition,action)
		distances.append(self.distancer.getDistance(new_position,target)) 
		actions.append(action)
		
	idx = distances.index(min(distances))
	best_action = actions.pop(idx)
	return best_action
Exemple #30
0
class GreedyBustersAgent(BustersAgent):
    "An agent that charges the closest ghost."

    def registerInitialState(self, gameState):
        "Pre-computes the distance between every two points."
        BustersAgent.registerInitialState(self, gameState)
        self.distancer = Distancer(gameState.data.layout, False)

    def chooseAction(self, gameState):
        """
        First computes the most likely position of each ghost that has
        not yet been captured, then chooses an action that brings
        Pacman closest to the closest ghost (according to mazeDistance!).
        """
        pacmanPosition = gameState.getPacmanPosition()
        legal = [a for a in gameState.getLegalPacmanActions()]
        livingGhosts = gameState.getLivingGhosts()
        livingGhostPositionDistributions = \
            [beliefs for i, beliefs in enumerate(self.ghostBeliefs)
             if livingGhosts[i+1]]

        ghosts = [pos.argMax() for pos in livingGhostPositionDistributions]

        # djikstra's algorithm that i created, myself, and not djikstra
        toReturn = None
        for ghost in ghosts:
            distance = self.distancer.getDistance(pacmanPosition, ghost)
            for action in legal:
                new_dist = self.distancer.getDistance(Actions.getSuccessor(pacmanPosition, action), 
                    ghost)
                # if this is the minimum distance for the ghost's new position, choose the
                # action that will get pacman closer to the ghost
                if new_dist < distance:
                    distance = new_dist
                    toReturn = action
        return toReturn
class GreedyBustersAgent(BustersAgent):
    "An agent that charges the closest ghost."

    def registerInitialState(self, gameState):
        "Pre-computes the distance between every two points."
        BustersAgent.registerInitialState(self, gameState)
        self.distancer = Distancer(gameState.data.layout, False)

    def chooseAction(self, gameState):
        """
    First computes the most likely position of each ghost that 
    has not yet been captured, then chooses an action that brings 
    Pacman closer to the closest ghost (in maze distance!).
    
    To find the maze distance between any two positions, use:
    self.distancer.getDistance(pos1, pos2)
    
    To find the successor position of a position after an action:
    successorPosition = Actions.getSuccessor(position, action)
    
    livingGhostPositionDistributions, defined below, is a list of
    util.Counter objects equal to the position belief distributions
    for each of the ghosts that are still alive.  It is defined based
    on (these are implementation details about which you need not be
    concerned):

      1) gameState.getLivingGhosts(), a list of booleans, one for each
         agent, indicating whether or not the agent is alive.  Note
         that pacman is always agent 0, so the ghosts are agents 1,
         onwards (just as before).

      2) self.ghostBeliefs, the list of belief distributions for each
         of the ghosts (including ghosts that are not alive).  The
         indices into this list should be 1 less than indices into the
         gameState.getLivingGhosts() list.
     
    """
        pacmanPosition = gameState.getPacmanPosition()
        legal = [a for a in gameState.getLegalPacmanActions()]
        livingGhosts = gameState.getLivingGhosts()
        livingGhostPositionDistributions = [
            beliefs for i, beliefs in enumerate(self.ghostBeliefs)
            if livingGhosts[i + 1]
        ]
        "*** YOUR CODE HERE ***"
        action = legal[0]
        leastDist = 1000
        ghostpos = []
        for counter in livingGhostPositionDistributions:
            max = 0
            keys = counter.keys()
            pos = keys[0]
            for key in keys:
                if counter[key] > max:
                    pos = key
                    max = counter[key]
            ghostpos.append(pos)

        for next in legal:
            successor = Actions.getSuccessor(pacmanPosition, next)
            for myghost in ghostpos:
                dist = self.distancer.getDistance(myghost, successor)
                if dist < leastDist:
                    leastDist = dist
                    action = next

        return action
 def registerInitialState(self, gameState):
   "Pre-computes the distance between every two points."
   BustersAgent.registerInitialState(self, gameState)
   self.distancer = Distancer(gameState.data.layout, False)
class GreedyBustersAgent(BustersAgent):
    "An agent that charges the closest ghost."

    def registerInitialState(self, gameState):
        "Pre-computes the distance between every two points."
        BustersAgent.registerInitialState(self, gameState)
        self.distancer = Distancer(gameState.data.layout, False)

    def chooseAction(self, gameState):
        """
        First computes the most likely position of each ghost that has
        not yet been captured, then chooses an action that brings
        Pacman closer to the closest ghost (according to mazeDistance!).

        To find the mazeDistance between any two positions, use:
          self.distancer.getDistance(pos1, pos2)

        To find the successor position of a position after an action:
          successorPosition = Actions.getSuccessor(position, action)

        livingGhostPositionDistributions, defined below, is a list of
        util.Counter objects equal to the position belief
        distributions for each of the ghosts that are still alive.  It
        is defined based on (these are implementation details about
        which you need not be concerned):

          1) gameState.getLivingGhosts(), a list of booleans, one for each
             agent, indicating whether or not the agent is alive.  Note
             that pacman is always agent 0, so the ghosts are agents 1,
             onwards (just as before).

          2) self.ghostBeliefs, the list of belief distributions for each
             of the ghosts (including ghosts that are not alive).  The
             indices into this list should be 1 less than indices into the
             gameState.getLivingGhosts() list.
        """

        """localMax = []
        for belief in livingGhostPositionDistributions:
            localMax.append(belief.argMax())
        goalCoordinate, goalProbability = None, 0
        for index, coordinate in enumerate(localMax):
            if livingGhostPositionDistributions[index][coordinate] >= goalProbability:
                goalCoordinate, goalProbability = coordinate, livingGhostPositionDistributions[index][coordinate]

        temp = []
        for action in legal:
            nextLocation = Actions.getSuccessor(pacmanPosition, action)
            temp.append((self.distancer.getDistance(nextLocation, goalCoordinate), action))
        return min(temp)[1]"""
        """
        pacmanPosition = gameState.getPacmanPosition()
        actions = [agent for agent in gameState.getLegalPacmanActions()]

        # get possible ghost position according to the distribution
        live_ghosts = gameState.getLivingGhosts()
        livingGhostPositionDistributions = [beliefs for i, beliefs in enumerate(self.ghostBeliefs) if live_ghosts[i+1]]
        pos_ghosts = []
        for distribution in livingGhostPositionDistributions:
            m_dis, t_pos, t_dis = 0, 0, None
            for key in distribution.keys():
                if distribution[key] > m_dis:
                    m_dis = distribution[key]
                    t_pos = key
                    t_dis = self.distancer.getDistance(pacmanPosition, key)
            pos_ghosts.append((t_pos, t_dis))

        # get closest ghosts
        closest_ghost = min(pos_ghosts, key=lambda x: x[1])[0]

        # get the best action towards the ghost
        successors = [(Actions.getSuccessor(pacmanPosition, action), action) for action in actions]
        best_action = min(successors, key=lambda x: self.distancer.getDistance(x[0], closest_ghost))[1]

        return best_action"""
        pacman = gameState.getPacmanPosition()
        legal  = [a for a in gameState.getLegalPacmanActions() if a != Directions.STOP]
        ghosts = gameState.getLivingGhosts()
        distributions = [b for i, b in enumerate(self.ghostBeliefs) if ghosts[i + 1]]

        successor = [(Actions.getSuccessor(pacman, a), a) for a in legal]
        positions = [max(d.items(), key=lambda x:x[1])[0] for d in distributions]
        distances = [(self.distancer.getDistance(pacman, d), d) for d in positions]
        choice    = min(distances)[1]   # min distance between current and any ghost
        actions   = [(self.distancer.getDistance(choice, s[0]), s[1]) for s in successor]
        action    = min(actions)[1]     # the action that gets us closer to the close ghost
        return action
Exemple #34
0
class GreedyBustersAgent(BustersAgent):
    "An agent that charges the closest ghost."

    def registerInitialState(self, gameState):
        "Pre-computes the distance between every two points."
        BustersAgent.registerInitialState(self, gameState)
        self.distancer = Distancer(gameState.data.layout, False)

    def chooseAction(self, gameState):
        """
        First computes the most likely position of each ghost that
        has not yet been captured, then chooses an action that brings
        Pacman closer to the closest ghost (in maze distance!).

        To find the maze distance between any two positions, use:
        self.distancer.getDistance(pos1, pos2)

        To find the successor position of a position after an action:
        successorPosition = Actions.getSuccessor(position, action)

        livingGhostPositionDistributions, defined below, is a list of
        util.Counter objects equal to the position belief distributions
        for each of the ghosts that are still alive.  It is defined based
        on (these are implementation details about which you need not be
        concerned):

          1) gameState.getLivingGhosts(), a list of booleans, one for each
             agent, indicating whether or not the agent is alive.  Note
             that pacman is always agent 0, so the ghosts are agents 1,
             onwards (just as before).

          2) self.ghostBeliefs, the list of belief distributions for each
             of the ghosts (including ghosts that are not alive).  The
             indices into this list should be 1 less than indices into the
             gameState.getLivingGhosts() list.

        """
        pacmanPosition = gameState.getPacmanPosition()
        legal = [a for a in gameState.getLegalPacmanActions()]
        livingGhosts = gameState.getLivingGhosts()  #first one pacman self
        livingGhostPositionDistributions = [
            beliefs for i, beliefs in enumerate(self.ghostBeliefs)
            if livingGhosts[i + 1]
        ]

        livingGhostPos = []
        closestPos = None
        closestD = float('inf')
        bestAction = None

        for ghost in livingGhostPositionDistributions:
            bestProb = 0
            bestPos = None
            for pos, prob in ghost.items():
                if prob >= bestProb:
                    bestProb = prob
                    bestPos = pos
            livingGhostPos.append(bestPos)

        for x, y in livingGhostPos:
            d = self.distancer.getDistance(pacmanPosition, (x, y))
            if (d < closestD):
                closestD = d
                closestPos = (x, y)

        for action in legal:
            successor = Actions.getSuccessor(pacmanPosition, action)
            newD = self.distancer.getDistance(successor, closestPos)
            if newD < closestD:
                closestD = newD
                bestAction = action

        return bestAction
Exemple #35
0
class BustersKeyboardAgent(BustersAgent, KeyboardAgent):
    "An agent controlled by the keyboard that displays beliefs about ghost positions."

    def __init__(self,
                 index=0,
                 inference="KeyboardInference",
                 ghostAgents=None):
        KeyboardAgent.__init__(self, index)
        BustersAgent.__init__(self, index, inference, ghostAgents)
        self.countActions = 0

    def getAction(self, gameState):
        return BustersAgent.getAction(self, gameState)

    def chooseAction(self, gameState):
        global last_move
        global distWest
        global distEast
        global distNorth
        global distSouth

        distWest = 99999
        distEast = 99999
        distNorth = 99999
        distSouth = 99999

        self.distancer = Distancer(gameState.data.layout, False)
        self.countActions = self.countActions + 1
        last_move = KeyboardAgent.getAction(self, gameState)

        # Almacenamos en variables una serie de datos utiles
        legal = gameState.getLegalActions(0)  ##Legal position from the pacman
        posPacman = gameState.getPacmanPosition()
        walls = gameState.getWalls()
        livingGhosts = gameState.getLivingGhosts()
        #move NORTH
        if Directions.NORTH in legal:
            # Inicia un contador a 1 para no tener el cuenta el pacman
            iterator = 1
            # Almacena la posicion del pacman resultante de ejecutar la accion
            buffPacman = posPacman[0], posPacman[1] + 1
            # Comprueba que la casilla objetivo no contenga un muro
            if walls[buffPacman[0]][buffPacman[1]] == False:
                # Itera sobre los fantasmas
                for g in gameState.getGhostPositions():
                    # Comprueba que los fantasmas estan vivos
                    if livingGhosts[iterator] == True:
                        # Si la distancia minima actual es menor que la almacenada, se sobreescribe
                        if self.distancer.getDistance(g,
                                                      buffPacman) < distNorth:
                            distNorth = self.distancer.getDistance(
                                g, buffPacman)
                    iterator += 1

        #move SOUTH
        if Directions.SOUTH in legal:
            # Inicia un contador a 1 para no tener el cuenta el pacman
            iterator = 1
            # Almacena la posicion del pacman resultante de ejecutar la accion
            buffPacman = posPacman[0], posPacman[1] - 1
            # Comprueba que la casilla objetivo no contenga un muro
            if walls[buffPacman[0]][buffPacman[1]] == False:
                # Itera sobre los fantasmas
                for g in gameState.getGhostPositions():
                    # Comprueba que los fantasmas estan vivos
                    if livingGhosts[iterator] == True:
                        # Si la distancia minima actual es menor que la almacenada, se sobreescribe
                        if self.distancer.getDistance(g,
                                                      buffPacman) < distSouth:
                            distSouth = self.distancer.getDistance(
                                g, buffPacman)
                    iterator += 1

        #move EAST
        if Directions.EAST in legal:
            # Inicia un contador a 1 para no tener el cuenta el pacman
            iterator = 1
            # Almacena la posicion del pacman resultante de ejecutar la accion
            buffPacman = posPacman[0] + 1, posPacman[1]
            # Comprueba que la casilla objetivo no contenga un muro
            if walls[buffPacman[0]][buffPacman[1]] == False:
                # Itera sobre los fantasmas
                for g in gameState.getGhostPositions():
                    # Comprueba que los fantasmas estan vivos
                    if livingGhosts[iterator] == True:
                        # Si la distancia minima actual es menor que la almacenada, se sobreescribe
                        if self.distancer.getDistance(g,
                                                      buffPacman) < distEast:
                            distEast = self.distancer.getDistance(
                                g, buffPacman)
                    iterator += 1

        #move WEST
        if Directions.WEST in legal:
            # Inicia un contador a 1 para no tener el cuenta el pacman
            iterator = 1
            # Almacena la posicion del pacman resultante de ejecutar la accion
            buffPacman = posPacman[0] - 1, posPacman[1]
            # Comprueba que la casilla objetivo no contenga un muro
            if walls[buffPacman[0]][buffPacman[1]] == False:
                # Itera sobre los fantasmas
                for g in gameState.getGhostPositions():
                    # Comprueba que los fantasmas estan vivos
                    if livingGhosts[iterator] == True:
                        # Si la distancia minima actual es menor que la almacenada, se sobreescribe
                        if self.distancer.getDistance(g,
                                                      buffPacman) < distWest:
                            distWest = self.distancer.getDistance(
                                g, buffPacman)
                    iterator += 1

        return last_move

    def printLineData(self, gameState):
        # En esta funcion usaremos 2 variables globales
        global predictN
        global prevState

        # Intentamos abrir el archivo de salida, si no existe, lo creamos usando createWekaFile()
        if (os.path.isfile("training_tutorial1.arff") == False):
            attributesList = [["distNorth",
                               "NUMERIC"], ["distSouth", "NUMERIC"],
                              ["distEast", "NUMERIC"], ["distWest", "NUMERIC"],
                              ["Score", "NUMERIC"], ["NextScore", "NUMERIC"],
                              ["NearestFood", "NUMERIC"],
                              ["lastMove", "{North,South,East,West,Stop}"]]
            self.createWekaFile(attributesList)

        # Necesitamos la funcion distancer para hacer calculos de distancias
        self.distancer = Distancer(gameState.data.layout, False)

        # Si hemos llegado al turno N, imprimimos en el archivo
        if self.countActions > predictN:
            # Cada vez vamos a sacar 9 elementos de la lista (8 atributos + "\n")
            counter = 9
            # Abrimos el archivo de salida
            file = open("training_tutorial1.arff", "a")
            # Obtenemos la puntuacion actual, para usarla de valor predecido
            prevState[5] = gameState.getScore()
            while (counter > 0):
                # Usamos pop para sacar uno a uno los elementos de la lista y escribirlos en el archivo de salida
                x = prevState.pop(0)
                file.write("%s" % (x))
                # Imprimimos comas entre atributos excepto entre los 2 ultimos (last_move y "\n")
                if counter > 2:
                    file.write(",")
                counter -= 1
            # Cerramos el archivo de salida
            file.close()
        # Metemos en la lista las variables de distancia
        prevState.append(distNorth)
        prevState.append(distSouth)
        prevState.append(distEast)
        prevState.append(distWest)
        # Metemos en la lista la puntuacion actual, y un placeholder para nextScore
        prevState.append(gameState.getScore())
        prevState.append(gameState.getScore() - 1)
        # Metemos en la lista la distancia a la comida mas cercana, si no hay, metemos 99999 en su lugar
        if (gameState.getDistanceNearestFood() == None):
            prevState.append(99999)
        else:
            prevState.append(gameState.getDistanceNearestFood())
        # Introducimos el movimiento realizado, que se ha obtenido durante chooseAction()
        prevState.append(last_move)
        prevState.append("\n")

    def createWekaFile(self, attributesList):
        # Abrimos el archivo en modo append, para que no se sobreescriba el archivo
        file = open("training_tutorial1.arff", "a")
        # Escribimos la cabecera del archivo
        file.write("@RELATION 'training_tutorial1'\n\n")
        # Escribimos todos los atributos
        for l in attributesList:
            file.write("@ATTRIBUTE %s %s\n" % (l[0], l[1]))
        # Escribimos el indicador de que empiezan los datos
        file.write("\n@data\n")
Exemple #36
0
    def chooseAction(self, gameState):
        global last_move
        global distWest
        global distEast
        global distNorth
        global distSouth

        distWest = 99999
        distEast = 99999
        distNorth = 99999
        distSouth = 99999

        self.distancer = Distancer(gameState.data.layout, False)
        self.countActions = self.countActions + 1
        last_move = KeyboardAgent.getAction(self, gameState)

        # Almacenamos en variables una serie de datos utiles
        legal = gameState.getLegalActions(0)  ##Legal position from the pacman
        posPacman = gameState.getPacmanPosition()
        walls = gameState.getWalls()
        livingGhosts = gameState.getLivingGhosts()
        #move NORTH
        if Directions.NORTH in legal:
            # Inicia un contador a 1 para no tener el cuenta el pacman
            iterator = 1
            # Almacena la posicion del pacman resultante de ejecutar la accion
            buffPacman = posPacman[0], posPacman[1] + 1
            # Comprueba que la casilla objetivo no contenga un muro
            if walls[buffPacman[0]][buffPacman[1]] == False:
                # Itera sobre los fantasmas
                for g in gameState.getGhostPositions():
                    # Comprueba que los fantasmas estan vivos
                    if livingGhosts[iterator] == True:
                        # Si la distancia minima actual es menor que la almacenada, se sobreescribe
                        if self.distancer.getDistance(g,
                                                      buffPacman) < distNorth:
                            distNorth = self.distancer.getDistance(
                                g, buffPacman)
                    iterator += 1

        #move SOUTH
        if Directions.SOUTH in legal:
            # Inicia un contador a 1 para no tener el cuenta el pacman
            iterator = 1
            # Almacena la posicion del pacman resultante de ejecutar la accion
            buffPacman = posPacman[0], posPacman[1] - 1
            # Comprueba que la casilla objetivo no contenga un muro
            if walls[buffPacman[0]][buffPacman[1]] == False:
                # Itera sobre los fantasmas
                for g in gameState.getGhostPositions():
                    # Comprueba que los fantasmas estan vivos
                    if livingGhosts[iterator] == True:
                        # Si la distancia minima actual es menor que la almacenada, se sobreescribe
                        if self.distancer.getDistance(g,
                                                      buffPacman) < distSouth:
                            distSouth = self.distancer.getDistance(
                                g, buffPacman)
                    iterator += 1

        #move EAST
        if Directions.EAST in legal:
            # Inicia un contador a 1 para no tener el cuenta el pacman
            iterator = 1
            # Almacena la posicion del pacman resultante de ejecutar la accion
            buffPacman = posPacman[0] + 1, posPacman[1]
            # Comprueba que la casilla objetivo no contenga un muro
            if walls[buffPacman[0]][buffPacman[1]] == False:
                # Itera sobre los fantasmas
                for g in gameState.getGhostPositions():
                    # Comprueba que los fantasmas estan vivos
                    if livingGhosts[iterator] == True:
                        # Si la distancia minima actual es menor que la almacenada, se sobreescribe
                        if self.distancer.getDistance(g,
                                                      buffPacman) < distEast:
                            distEast = self.distancer.getDistance(
                                g, buffPacman)
                    iterator += 1

        #move WEST
        if Directions.WEST in legal:
            # Inicia un contador a 1 para no tener el cuenta el pacman
            iterator = 1
            # Almacena la posicion del pacman resultante de ejecutar la accion
            buffPacman = posPacman[0] - 1, posPacman[1]
            # Comprueba que la casilla objetivo no contenga un muro
            if walls[buffPacman[0]][buffPacman[1]] == False:
                # Itera sobre los fantasmas
                for g in gameState.getGhostPositions():
                    # Comprueba que los fantasmas estan vivos
                    if livingGhosts[iterator] == True:
                        # Si la distancia minima actual es menor que la almacenada, se sobreescribe
                        if self.distancer.getDistance(g,
                                                      buffPacman) < distWest:
                            distWest = self.distancer.getDistance(
                                g, buffPacman)
                    iterator += 1

        return last_move
Exemple #37
0
class GreedyBustersAgent(BustersAgent):
    "An agent that charges the closest ghost."

    def registerInitialState(self, gameState):
        "Pre-computes the distance between every two points."
        BustersAgent.registerInitialState(self, gameState)
        self.distancer = Distancer(gameState.data.layout, False)

    def chooseAction(self, gameState):
        """
    First computes the most likely position of each ghost that 
    has not yet been captured, then chooses an action that brings 
    Pacman closer to the closest ghost (in maze distance!).
    
    To find the maze distance between any two positions, use:
    self.distancer.getDistance(pos1, pos2)
    
    To find the successor position of a position after an action:
    successorPosition = Actions.getSuccessor(position, action)
    
    livingGhostPositionDistributions, defined below, is a list of
    util.Counter objects equal to the position belief distributions
    for each of the ghosts that are still alive.  It is defined based
    on (these are implementation details about which you need not be
    concerned):

      1) gameState.getLivingGhosts(), a list of booleans, one for each
         agent, indicating whether or not the agent is alive.  Note
         that pacman is always agent 0, so the ghosts are agents 1,
         onwards (just as before).

      2) self.ghostBeliefs, the list of belief distributions for each
         of the ghosts (including ghosts that are not alive).  The
         indices into this list should be 1 less than indices into the
         gameState.getLivingGhosts() list.
     
    """
        pacmanPosition = gameState.getPacmanPosition()
        legal = [a for a in gameState.getLegalPacmanActions()]
        livingGhosts = gameState.getLivingGhosts()
        livingGhostPositionDistributions = [
            beliefs for i, beliefs in enumerate(self.ghostBeliefs)
            if livingGhosts[i + 1]
        ]
        "*** YOUR CODE HERE ***"
        closestDist = 999999
        count = 0
        positions = []
        prob = 0

        for ghost in livingGhostPositionDistributions:
            positions.append((0, 0))
            for elem in ghost:
                if ghost[elem] > prob:
                    prob = ghost[elem]
                    positions[count] = elem
            count += 1
            prob = 0

        for p in positions:
            dist = self.distancer.getDistance(pacmanPosition, p)
            if dist < closestDist:
                closestDist = dist
                closestGhost = p

        for action in legal:
            successorPosition = Actions.getSuccessor(pacmanPosition, action)
            newDist = self.distancer.getDistance(successorPosition,
                                                 closestGhost)
            if newDist < closestDist:
                return action
Exemple #38
0
class GreedyBustersAgent(BustersAgent):
    "An agent that charges the closest ghost."

    def registerInitialState(self, gameState):
        "Pre-computes the distance between every two points."
        BustersAgent.registerInitialState(self, gameState)
        self.distancer = Distancer(gameState.data.layout, False)

    def chooseAction(self, gameState):
        """
        First computes the most likely position of each ghost that has
        not yet been captured, then chooses an action that brings
        Pacman closer to the closest ghost (according to mazeDistance!).

        To find the mazeDistance between any two positions, use:
          self.distancer.getDistance(pos1, pos2)

        To find the successor position of a position after an action:
          successorPosition = Actions.getSuccessor(position, action)

        livingGhostPositionDistributions, defined below, is a list of
        util.Counter objects equal to the position belief
        distributions for each of the ghosts that are still alive.  It
        is defined based on (these are implementation details about
        which you need not be concerned):

          1) gameState.getLivingGhosts(), a list of booleans, one for each
             agent, indicating whether or not the agent is alive.  Note
             that pacman is always agent 0, so the ghosts are agents 1,
             onwards (just as before).

          2) self.ghostBeliefs, the list of belief distributions for each
             of the ghosts (including ghosts that are not alive).  The
             indices into this list should be 1 less than indices into the
             gameState.getLivingGhosts() list.
        """
        pacmanPosition = gameState.getPacmanPosition()
        legal = [a for a in gameState.getLegalPacmanActions()]
        livingGhosts = gameState.getLivingGhosts()
        livingGhostPositionDistributions = \
            [beliefs for i, beliefs in enumerate(self.ghostBeliefs)
             if livingGhosts[i+1]]
        "*** YOUR CODE HERE ***"

        #Find most likely locations for each ghost
        most_likely_locs = []
        for dist in livingGhostPositionDistributions:
            max_prob = 0
            best_loc = 0
            for loc, prob in dist.iteritems():
                if (prob > max_prob):
                    max_prob = prob
                    best_loc = loc
            most_likely_locs.append(best_loc)

        closest_loc = most_likely_locs[0]
        closest_dist = 10000000000
        # Find closest likely loc
        for loc in most_likely_locs:
            loc_dist = self.distancer.getDistance(pacmanPosition, loc)
            if (loc_dist < closest_dist):
                closest_dist = loc_dist
                closest_loc = loc

        # Move closer to closest ghost
        best_act = 0
        closest_dist = 1000000000
        for a in legal:
            successorPosition = Actions.getSuccessor(pacmanPosition, a)
            loc_dist = self.distancer.getDistance(closest_loc,
                                                  successorPosition)
            if (loc_dist < closest_dist):
                closest_dist = loc_dist
                best_act = a
        return best_act
class GreedyBustersAgent(BustersAgent):
    "An agent that charges the closest ghost."

    def registerInitialState(self, gameState):
        "Pre-computes the distance between every two points."
        BustersAgent.registerInitialState(self, gameState)
        self.distancer = Distancer(gameState.data.layout, False)

    def chooseAction(self, gameState):
        """
        First computes the most likely position of each ghost that has
        not yet been captured, then chooses an action that brings
        Pacman closer to the closest ghost (according to mazeDistance!).

        To find the mazeDistance between any two positions, use:
          self.distancer.getDistance(pos1, pos2)

        To find the successor position of a position after an action:
          successorPosition = Actions.getSuccessor(position, action)

        livingGhostPositionDistributions, defined below, is a list of
        util.Counter objects equal to the position belief
        distributions for each of the ghosts that are still alive.  It
        is defined based on (these are implementation details about
        which you need not be concerned):

          1) gameState.getLivingGhosts(), a list of booleans, one for each
             agent, indicating whether or not the agent is alive.  Note
             that pacman is always agent 0, so the ghosts are agents 1,
             onwards (just as before).

          2) self.ghostBeliefs, the list of belief distributions for each
             of the ghosts (including ghosts that are not alive).  The
             indices into this list should be 1 less than indices into the
             gameState.getLivingGhosts() list.
        """
        pacmanPosition = gameState.getPacmanPosition()
        legal = [a for a in gameState.getLegalPacmanActions()]
        livingGhosts = gameState.getLivingGhosts()
        livingGhostPositionDistributions = \
            [beliefs for i, beliefs in enumerate(self.ghostBeliefs)
             if livingGhosts[i+1]]
        "*** YOUR CODE HERE ***"
        #Find likely ghost positions
        ghostPos = []
        for dist in livingGhostPositionDistributions:
            maxProb = 0
            maxPos = None
            for position, probability in dist.items():
                if (probability > maxProb):
                    maxPos = position
                    maxProb = probability

            ghostPos.append(maxPos)

        #find closest ghost
        minDist = 99999
        minPos = None

        for ghost in ghostPos:
            dist = self.distancer.getDistance(pacmanPosition, ghost)
            if (dist < minDist):
                minPos = ghost
                minDist = dist
        #find the best action
        minDist = 99999
        bestAction = None
        for action in legal:
            succPos = Actions.getSuccessor(pacmanPosition, action)
            dist = self.distancer.getDistance(succPos, minPos)
            if (dist < minDist):
                minDist = dist
                bestAction = action

        return bestAction
Exemple #40
0
class GreedyBustersAgent(BustersAgent):
    "An agent that charges the closest ghost."

    def registerInitialState(self, gameState):
        "Pre-computes the distance between every two points."
        BustersAgent.registerInitialState(self, gameState)
        self.distancer = Distancer(gameState.data.layout, False)

    def chooseAction(self, gameState):
        """
        First computes the most likely position of each ghost that has
        not yet been captured, then chooses an action that brings
        Pacman closer to the closest ghost (according to mazeDistance!).

        To find the mazeDistance between any two positions, use:
          self.distancer.getDistance(pos1, pos2)

        To find the successor position of a position after an action:
          successorPosition = Actions.getSuccessor(position, action)

        livingGhostPositionDistributions, defined below, is a list of
        util.Counter objects equal to the position belief
        distributions for each of the ghosts that are still alive.  It
        is defined based on (these are implementation details about
        which you need not be concerned):

          1) gameState.getLivingGhosts(), a list of booleans, one for each
             agent, indicating whether or not the agent is alive.  Note
             that pacman is always agent 0, so the ghosts are agents 1,
             onwards (just as before).

          2) self.ghostBeliefs, the list of belief distributions for each
             of the ghosts (including ghosts that are not alive).  The
             indices into this list should be 1 less than indices into the
             gameState.getLivingGhosts() list.
        """
        pacmanPosition = gameState.getPacmanPosition()
        legal = [a for a in gameState.getLegalPacmanActions()]
        livingGhosts = gameState.getLivingGhosts()
        livingGhostPositionDistributions = \
            [beliefs for i, beliefs in enumerate(self.ghostBeliefs)
             if livingGhosts[i+1]]
        "*** YOUR CODE HERE ***"
        closestGhostDistance = float("inf")
        ghostClosePos = None

        counterForMinDistanceFromPacToGhost = dict()

        for eachDistribution in livingGhostPositionDistributions:
            ghostPosWithMaxProb = eachDistribution.argMax()
            counterForMinDistanceFromPacToGhost[
                ghostPosWithMaxProb] = self.distancer.getDistance(
                    pacmanPosition, ghostPosWithMaxProb)

        ghostClosePos = min(counterForMinDistanceFromPacToGhost,
                            key=counterForMinDistanceFromPacToGhost.get)
        closestGhostDistance = counterForMinDistanceFromPacToGhost[
            ghostClosePos]
        move = None
        for action in legal:
            distanceTosuccessor = self.distancer.getDistance(
                Actions.getSuccessor(pacmanPosition, action), ghostClosePos)
            if distanceTosuccessor < closestGhostDistance:
                closestGhostDistance = distanceTosuccessor
                move = action

        return move
Exemple #41
0
class BasicAgentAA(BustersAgent):
    def registerInitialState(self, gameState):
        BustersAgent.registerInitialState(self, gameState)
        self.distancer = Distancer(gameState.data.layout, False)
        self.countActions = 0

    ''' Example of counting something'''

    def countFood(self, gameState):
        food = 0
        for width in gameState.data.food:
            for height in width:
                if (height == True):
                    food = food + 1
        return food

    ''' Print the layout'''

    def printGrid(self, gameState):
        table = ""
        #print(gameState.data.layout) ## Print by terminal
        for x in range(gameState.data.layout.width):
            for y in range(gameState.data.layout.height):
                food, walls = gameState.data.food, gameState.data.layout.walls
                table = table + gameState.data._foodWallStr(
                    food[x][y], walls[x][y]) + ","
        table = table[:-1]
        return table

    def printInfo(self, gameState):
        print "---------------- TICK ", self.countActions, " --------------------------"
        # Dimensiones del mapa
        width, height = gameState.data.layout.width, gameState.data.layout.height
        print "Width: ", width, " Height: ", height
        # Posicion del Pacman
        print "Pacman position: ", gameState.getPacmanPosition()
        # Acciones legales de pacman en la posicion actual
        print "Legal actions: ", gameState.getLegalPacmanActions()
        # Direccion de pacman
        print "Pacman direction: ", gameState.data.agentStates[0].getDirection(
        )
        # Numero de fantasmas
        print "Number of ghosts: ", gameState.getNumAgents() - 1
        # Fantasmas que estan vivos (el indice 0 del array que se devuelve corresponde a pacman y siempre es false)
        print "Living ghosts: ", gameState.getLivingGhosts()
        # Posicion de los fantasmas
        print "Ghosts positions: ", gameState.getGhostPositions()
        # Direciones de los fantasmas
        print "Ghosts directions: ", [
            gameState.getGhostDirections().get(i)
            for i in range(0,
                           gameState.getNumAgents() - 1)
        ]
        # Distancia de manhattan a los fantasmas
        print "Ghosts distances: ", gameState.data.ghostDistances
        # Puntos de comida restantes
        print "Pac dots: ", gameState.getNumFood()
        # Distancia de manhattan a la comida mas cercada
        print "Distance nearest pac dots: ", gameState.getDistanceNearestFood()
        # Paredes del mapa
        print "Map:  \n", gameState.getWalls()
        # Puntuacion
        print "Score: ", gameState.getScore()

    def chooseAction(self, gameState):

        # En esta funcion usaremos 5 variables globales, una para la accion, y 4 para las distancias
        global last_move
        global distWest
        global distEast
        global distNorth
        global distSouth
        '''
        # Incrementamos el contador de turnos
        self.countActions = self.countActions + 1
        # Se imprimen datos relevantes sobre la partida
        self.printInfo(gameState)
        # Por defecto, el movimiento a ejecutar es "Stop"
        move = Directions.STOP
        # Se inicializan las distancias a un numero muy alto
        distWest = 99999
        distEast = 99999
        distNorth = 99999
        distSouth = 99999

        # Almacenamos en variables una serie de datos relevantes
        legal = gameState.getLegalActions(0) ##Legal position from the pacman
        posPacman = gameState.getPacmanPosition()
        minDist = 99999
        walls = gameState.getWalls()
        livingGhosts = gameState.getLivingGhosts()
        #move NORTH
        if Directions.NORTH in legal:
            # Inicia un contador a 1 para no tener el cuenta el pacman
            iterator = 1
            # Almacena la posicion del pacman resultante de ejecutar la accion
            buffPacman = posPacman[0], posPacman[1] + 1
            # Comprueba que la casilla objetivo no contenga un muro
            if walls[buffPacman[0]][buffPacman[1]] == False:
                # Itera sobre los fantasmas
                for x in gameState.getGhostPositions():
                    # Comprueba que los fantasmas estan vivos
                    if livingGhosts[iterator] == True:
                        # Si la distancia actual es menor o igual que la previa de este movimiento, se sobreescribe
                        if self.distancer.getDistance(x, buffPacman) <= distNorth:
                            distNorth = self.distancer.getDistance(x, buffPacman)
                            # Se comprueba si la distancia es menor que la minima de todas las acciones
                            if distNorth < minDist:
                                # Se sobreescribe y se cambia el movimiento a realizar
                                minDist = distNorth
                                move = Directions.NORTH
                            # Si la distancia es igual a la minima actual, se comprueba si la casilla nueva contiene comida
                            # de ser asi, se cambia el movimiento a realizar
                            # Esto sirve para que en caso de haber 2 acciones igual de buenas, elija la que tiene comida
                            elif distNorth == minDist:
                                if gameState.hasFood(buffPacman[0],buffPacman[1]):
                                    move = Directions.NORTH
                    iterator = iterator + 1
        #move SOUTH
        if Directions.SOUTH in legal:
            # Inicia un contador a 1 para no tener el cuenta el pacman
            iterator = 1
            # Almacena la posicion del pacman resultante de ejecutar la accion
            buffPacman = posPacman[0], posPacman[1] - 1
            # Comprueba que la casilla objetivo no contenga un muro
            if walls[buffPacman[0]][buffPacman[1]] == False:
                # Itera sobre los fantasmas
                for x in gameState.getGhostPositions():
                    # Comprueba que los fantasmas estan vivos
                    if livingGhosts[iterator] == True:
                        # Si la distancia actual es menor o igual que la previa de este movimiento, se sobreescribe
                         if self.distancer.getDistance(x, buffPacman) <= distSouth:
                            distSouth = self.distancer.getDistance(x, buffPacman)
                            # Se comprueba si la distancia es menor que la minima de todas las acciones
                            if distSouth < minDist:
                                # Se sobreescribe y se cambia el movimiento a realizar
                                minDist = distSouth
                                move = Directions.SOUTH
                            # Si la distancia es igual a la minima actual, se comprueba si la casilla nueva contiene comida
                            # de ser asi, se cambia el movimiento a realizar
                            # Esto sirve para que en caso de haber 2 acciones igual de buenas, elija la que tiene comida
                            elif distSouth == minDist:
                                if gameState.hasFood(buffPacman[0],buffPacman[1]):
                                    move = Directions.SOUTH
                    iterator = iterator + 1
        #move EAST
        if Directions.EAST in legal:
            # Inicia un contador a 1 para no tener el cuenta el pacman
            iterator = 1
            # Almacena la posicion del pacman resultante de ejecutar la accion
            buffPacman = posPacman[0] + 1, posPacman[1]
            # Comprueba que la casilla objetivo no contenga un muro
            if walls[buffPacman[0]][buffPacman[1]] == False:
                # Itera sobre los fantasmas
                for x in gameState.getGhostPositions():
                    # Comprueba que los fantasmas estan vivos
                    if livingGhosts[iterator] == True:
                        # Si la distancia actual es menor o igual que la previa de este movimiento, se sobreescribe
                        if self.distancer.getDistance(x, buffPacman) <= distEast:
                            distEast = self.distancer.getDistance(x, buffPacman)
                            # Se comprueba si la distancia es menor que la minima de todas las acciones
                            if distEast < minDist:
                                # Se sobreescribe y se cambia el movimiento a realizar
                                minDist = distEast
                                move = Directions.EAST
                            # Si la distancia es igual a la minima actual, se comprueba si la casilla nueva contiene comida
                            # de ser asi, se cambia el movimiento a realizar
                            # Esto sirve para que en caso de haber 2 acciones igual de buenas, elija la que tiene comida
                            elif distEast == minDist:
                                if gameState.hasFood(buffPacman[0],buffPacman[1]):
                                    move = Directions.EAST
                    iterator = iterator + 1
        #move WEST
        if Directions.WEST in legal:
            # Inicia un contador a 1 para no tener el cuenta el pacman
            iterator = 1
            # Almacena la posicion del pacman resultante de ejecutar la accion
            buffPacman = posPacman[0] - 1, posPacman[1]
            # Comprueba que la casilla objetivo no contenga un muro
            if walls[buffPacman[0]][buffPacman[1]] == False:
                # Itera sobre los fantasmas
                for x in gameState.getGhostPositions():
                    # Comprueba que los fantasmas estan vivos
                    if livingGhosts[iterator] == True:
                        # Si la distancia actual es menor o igual que la previa de este movimiento, se sobreescribe
                        if self.distancer.getDistance(x, buffPacman) <= distWest:
                            distWest = self.distancer.getDistance(x, buffPacman)
                            # Se comprueba si la distancia es menor que la minima de todas las acciones
                            if distWest < minDist:
                                # Se sobreescribe y se cambia el movimiento a realizar
                                minDist = distWest
                                move = Directions.WEST
                            # Si la distancia es igual a la minima actual, se comprueba si la casilla nueva contiene comida
                            # de ser asi, se cambia el movimiento a realizar
                            # Esto sirve para que en caso de haber 2 acciones igual de buenas, elija la que tiene comida
                            elif distWest == minDist:
                                if gameState.hasFood(buffPacman[0],buffPacman[1]):
                                    move = Directions.WEST
                    iterator = iterator + 1
        # Se almacena last_move para usarla en otras funciones
        last_move = move
        return move
        '''
        # Inicializamos la lista que le pasaremos a Weka
        x = []
        # Inicializamos las variables de distancia a un numero muy alto, que significa que la accion es ilegal
        distWest = 99999
        distEast = 99999
        distNorth = 99999
        distSouth = 99999

        # Almacenamos en variables una serie de datos utiles
        legal = gameState.getLegalActions(0)  ##Legal position from the pacman
        posPacman = gameState.getPacmanPosition()
        walls = gameState.getWalls()
        livingGhosts = gameState.getLivingGhosts()
        #move NORTH
        if Directions.NORTH in legal:
            # Inicia un contador a 1 para no tener el cuenta el pacman
            iterator = 1
            # Almacena la posicion del pacman resultante de ejecutar la accion
            buffPacman = posPacman[0], posPacman[1] + 1
            # Comprueba que la casilla objetivo no contenga un muro
            if walls[buffPacman[0]][buffPacman[1]] == False:
                # Itera sobre los fantasmas
                for g in gameState.getGhostPositions():
                    # Comprueba que los fantasmas estan vivos
                    if livingGhosts[iterator] == True:
                        # Si la distancia minima actual es menor que la almacenada, se sobreescribe
                        if self.distancer.getDistance(g,
                                                      buffPacman) < distNorth:
                            distNorth = self.distancer.getDistance(
                                g, buffPacman)
                    iterator += 1

        #move SOUTH
        if Directions.SOUTH in legal:
            # Inicia un contador a 1 para no tener el cuenta el pacman
            iterator = 1
            # Almacena la posicion del pacman resultante de ejecutar la accion
            buffPacman = posPacman[0], posPacman[1] - 1
            # Comprueba que la casilla objetivo no contenga un muro
            if walls[buffPacman[0]][buffPacman[1]] == False:
                # Itera sobre los fantasmas
                for g in gameState.getGhostPositions():
                    # Comprueba que los fantasmas estan vivos
                    if livingGhosts[iterator] == True:
                        # Si la distancia minima actual es menor que la almacenada, se sobreescribe
                        if self.distancer.getDistance(g,
                                                      buffPacman) < distSouth:
                            distSouth = self.distancer.getDistance(
                                g, buffPacman)
                    iterator += 1

        #move EAST
        if Directions.EAST in legal:
            # Inicia un contador a 1 para no tener el cuenta el pacman
            iterator = 1
            # Almacena la posicion del pacman resultante de ejecutar la accion
            buffPacman = posPacman[0] + 1, posPacman[1]
            # Comprueba que la casilla objetivo no contenga un muro
            if walls[buffPacman[0]][buffPacman[1]] == False:
                # Itera sobre los fantasmas
                for g in gameState.getGhostPositions():
                    # Comprueba que los fantasmas estan vivos
                    if livingGhosts[iterator] == True:
                        # Si la distancia minima actual es menor que la almacenada, se sobreescribe
                        if self.distancer.getDistance(g,
                                                      buffPacman) < distEast:
                            distEast = self.distancer.getDistance(
                                g, buffPacman)
                    iterator += 1

        #move WEST
        if Directions.WEST in legal:
            # Inicia un contador a 1 para no tener el cuenta el pacman
            iterator = 1
            # Almacena la posicion del pacman resultante de ejecutar la accion
            buffPacman = posPacman[0] - 1, posPacman[1]
            # Comprueba que la casilla objetivo no contenga un muro
            if walls[buffPacman[0]][buffPacman[1]] == False:
                # Itera sobre los fantasmas
                for g in gameState.getGhostPositions():
                    # Comprueba que los fantasmas estan vivos
                    if livingGhosts[iterator] == True:
                        # Si la distancia minima actual es menor que la almacenada, se sobreescribe
                        if self.distancer.getDistance(g,
                                                      buffPacman) < distWest:
                            distWest = self.distancer.getDistance(
                                g, buffPacman)
                    iterator += 1

        # Metemos en la lista las variables de distancia
        x.append(distNorth)
        x.append(distSouth)
        x.append(distEast)
        x.append(distWest)
        # -------------------------- COMMENT THIS PART IF USING NoFood_NoScore --------------------------
        # Metemos en la lista la puntuacion
        x.append(gameState.getScore())
        # Metemos en la lista la distancia a la comida mas cercana, si no hay, metemos 99999 en su lugar
        if (gameState.getDistanceNearestFood() == None):
            x.append(99999)
        else:
            x.append(gameState.getDistanceNearestFood())
        # -----------------------------------------------------------------------------------------------

        # Pasamos los datos necesarios a Weka para que los clasifique
        a = self.weka.predict(
            "./Models/Classification/Tutorial1/Samemaps/Unfiltered/LMT.model",
            x, "./Training/training_tutorial1_noNextScore.arff")

        # ------ Estas lineas sirven para evitar que el agente automatico entre en un bucle infinito ----------
        # El bug se produce cuando no se puede hacer la accion "East" y la accion "South" es mejor que "North"
        """ if (distEast == 99999 and distNorth > distSouth and a == 'North'):
            a = 'South' """
        # -----------------------------------------------------------------------------------------------------

        # ------ Estas lineas sirven para que el agente automatico de teclado no haga acciones ilegales -------
        """ if (a == 'East' and distEast == 99999):
            a = 'Stop'
        elif (a == 'West' and distWest == 99999):
            a = 'Stop'
        elif (a == 'North' and distNorth == 99999):
            a = 'Stop'
        elif (a == 'South' and distSouth == 99999):
            a = 'Stop' """
        # ------------------------------------------------------------------------------------------------------
        # Se almacena last_move para usarla en otras funciones
        last_move = a
        return a

    def printLineData(self, gameState):
        # En esta funcion usaremos 2 variables globales
        global predictN
        global prevState

        # Intentamos abrir el archivo de salida, si no existe, lo creamos usando createWekaFile()
        if (os.path.isfile("training_tutorial1.arff") == False):
            attributesList = [["distNorth",
                               "NUMERIC"], ["distSouth", "NUMERIC"],
                              ["distEast", "NUMERIC"], ["distWest", "NUMERIC"],
                              ["Score", "NUMERIC"], ["NextScore", "NUMERIC"],
                              ["NearestFood", "NUMERIC"],
                              ["lastMove", "{North,South,East,West,Stop}"]]
            self.createWekaFile(attributesList)

        # Si hemos llegado al turno N, imprimimos en el archivo
        if self.countActions > predictN:
            # Cada vez vamos a sacar 9 elementos de la lista (8 atributos + "\n")
            counter = 9
            # Abrimos el archivo de salida
            file = open("training_tutorial1.arff", "a")
            # Obtenemos la puntuacion actual, para usarla de valor predecido
            prevState[5] = gameState.getScore()
            while (counter > 0):
                # Usamos pop para sacar uno a uno los elementos de la lista y escribirlos en el archivo de salida
                x = prevState.pop(0)
                file.write("%s" % (x))
                # Imprimimos comas entre atributos excepto entre los 2 ultimos (last_move y "\n")
                if counter > 2:
                    file.write(",")
                counter -= 1
            # Cerramos el archivo de salida
            file.close()
        # Metemos en la lista las variables de distancia
        prevState.append(distNorth)
        prevState.append(distSouth)
        prevState.append(distEast)
        prevState.append(distWest)
        # Metemos en la lista la puntuacion actual, y un placeholder para nextScore
        prevState.append(gameState.getScore())
        prevState.append(gameState.getScore() - 1)
        # Metemos en la lista la distancia a la comida mas cercana, si no hay, metemos 99999 en su lugar
        if (gameState.getDistanceNearestFood() == None):
            prevState.append(99999)
        else:
            prevState.append(gameState.getDistanceNearestFood())
        # Introducimos el movimiento realizado, que se ha obtenido durante chooseAction()
        prevState.append(last_move)
        prevState.append("\n")

    def createWekaFile(self, attributesList):
        # Abrimos el archivo en modo append, para que no se sobreescriba el archivo
        file = open("training_tutorial1.arff", "a")
        # Escribimos la cabecera del archivo
        file.write("@RELATION 'training_tutorial1'\n\n")
        # Escribimos todos los atributos
        for l in attributesList:
            file.write("@ATTRIBUTE %s %s\n" % (l[0], l[1]))
        # Escribimos el indicador de que empiezan los datos
        file.write("\n@data\n")
Exemple #42
0
class GreedyBustersAgent(BustersAgent):
    "An agent that charges the closest ghost."

    def registerInitialState(self, gameState):
        "Pre-computes the distance between every two points."
        BustersAgent.registerInitialState(self, gameState)
        self.distancer = Distancer(gameState.data.layout, False)

    def chooseAction(self, gameState):
        """
        First computes the most likely position of each ghost that has
        not yet been captured, then chooses an action that brings
        Pacman closer to the closest ghost (according to mazeDistance!).

        To find the mazeDistance between any two positions, use:
          self.distancer.getDistance(pos1, pos2)

        To find the successor position of a position after an action:
          successorPosition = Actions.getSuccessor(position, action)

        livingGhostPositionDistributions, defined below, is a list of
        util.Counter objects equal to the position belief
        distributions for each of the ghosts that are still alive.  It
        is defined based on (these are implementation details about
        which you need not be concerned):

          1) gameState.getLivingGhosts(), a list of booleans, one for each
             agent, indicating whether or not the agent is alive.  Note
             that pacman is always agent 0, so the ghosts are agents 1,
             onwards (just as before).

          2) self.ghostBeliefs, the list of belief distributions for each
             of the ghosts (including ghosts that are not alive).  The
             indices into this list should be 1 less than indices into the
             gameState.getLivingGhosts() list.
        """
        pacmanPosition = gameState.getPacmanPosition()
        legal = [a for a in gameState.getLegalPacmanActions()]
        livingGhosts = gameState.getLivingGhosts()
        livingGhostPositionDistributions = \
            [beliefs for i, beliefs in enumerate(self.ghostBeliefs)
             if livingGhosts[i+1]]

        probably = [] # posição mais provável de cada fantasma vivo
        for pos in livingGhostPositionDistributions:
            probably.append(pos.argMax()) 

        action = util.Counter() # guardaremos a menor distancia após tal acao
        #inicializando
        for pos in legal:
            action[pos] = 98765432109876543210 # infinito
        for acao in action.keys():
            successorPosition = Actions.getSuccessor(pacmanPosition, acao) # atualiza posicao depois da acao
            for p in probably:
                mazeDist = self.distancer.getDistance(successorPosition, p)
                if mazeDist < action[acao]: # verifica se é menor que a menor distância
                    action[acao] = mazeDist

        # invertendo sinais do argMax()
        for acao in action.keys():
            action[acao] = action[acao] * - 1 

        return action.argMax()
class GreedyBustersAgent(BustersAgent):
    "An agent that charges the closest ghost."

    def registerInitialState(self, gameState):
        "Pre-computes the distance between every two points."
        BustersAgent.registerInitialState(self, gameState)
        self.distancer = Distancer(gameState.data.layout, False)

    def chooseAction(self, gameState):
        """
        First computes the most likely position of each ghost that has
        not yet been captured, then chooses an action that brings
        Pacman closer to the closest ghost (according to mazeDistance!).

        To find the mazeDistance between any two positions, use:
          self.distancer.getDistance(pos1, pos2)

        To find the successor position of a position after an action:
          successorPosition = Actions.getSuccessor(position, action)

        livingGhostPositionDistributions, defined below, is a list of
        util.Counter objects equal to the position belief
        distributions for each of the ghosts that are still alive.  It
        is defined based on (these are implementation details about
        which you need not be concerned):

          1) gameState.getLivingGhosts(), a list of booleans, one for each
             agent, indicating whether or not the agent is alive.  Note
             that pacman is always agent 0, so the ghosts are agents 1,
             onwards (just as before).

          2) self.ghostBeliefs, the list of belief distributions for each
             of the ghosts (including ghosts that are not alive).  The
             indices into this list should be 1 less than indices into the
             gameState.getLivingGhosts() list.
        """
        pacmanPosition = gameState.getPacmanPosition()
        pacmanLegalAction = [a for a in gameState.getLegalPacmanActions()]
        livingGhosts = gameState.getLivingGhosts()
        livingGhostPositionDistributions = [
            beliefs for i, beliefs in enumerate(self.ghostBeliefs)
            if livingGhosts[i + 1]
        ]
        "*** YOUR CODE HERE ***"

        # Chase the ghost nearest to pacman
        # return action = argmin(action.distance)

        minDistance = float("inf")
        bestAction = None

        for action in pacmanLegalAction:
            successorPosition = Actions.getSuccessor(pacmanPosition, action)

            for ghostPositionDistribution in livingGhostPositionDistributions:
                # Guess the ghostposition = argmax(Prob[position])
                ghostPosition = ghostPositionDistribution.sortedKeys()[0]
                distance = self.distancer.getDistance(successorPosition,
                                                      ghostPosition)

                if distance < minDistance:
                    minDistance = distance
                    bestAction = action

        return bestAction
class P3QLearning(BustersAgent):
    "An agent that charges the closest ghost."

    def __init__(self, index=0, inference="ExactInference", ghostAgents=None):
        BustersAgent.__init__(self, index, inference, ghostAgents)
        self.q_table = self.initQTable()
        self.epsilon = 0.3
        self.alpha = 0.8
        self.discount = 0.8
        self.actions = [
            Directions.NORTH, Directions.WEST, Directions.SOUTH,
            Directions.EAST
        ]
        self.lastState = None
        self.lastAction = None
        self.numGhosts = 4
        self.lastDistance = 100
        self.turns = 0
        self.reward = 0

        #para cada par q_table[(state, action)] habra un valor.

    def registerInitialState(self, gameState):
        BustersAgent.registerInitialState(self, gameState)
        self.distancer = Distancer(gameState.data.layout, False)

    def getAction(self, gameState):
        return self.chooseAction(gameState)

    def getState(self, gameState):
        self.reward = 0
        state = ""
        ghostDist = []
        for i in range(len(gameState.livingGhosts)):
            if gameState.livingGhosts[i] is True:
                ghostDist.append(gameState.getGhostPosition(i))

        pacmanPosition = gameState.getPacmanPosition()
        dists = []
        for i in ghostDist:
            dists.append(self.distancer.getDistance(pacmanPosition, i))

        #get the index of the nearest ghost
        index = dists.index(min(dists))

        if min(dists) < self.lastDistance:
            self.reward = 5

        #get the vector between pacman and the nearest ghost
        vec = (pacmanPosition[0] - ghostDist[index][0],
               pacmanPosition[1] - ghostDist[index][1])
        if vec[0] > 0:
            if vec[1] > 0:
                #print "down left",
                if abs(vec[0]) > abs(vec[1]):
                    state += Directions.WEST
                else:
                    state += Directions.SOUTH
            else:
                #print "up left",
                if abs(vec[0]) > abs(vec[1]):
                    state += Directions.WEST
                else:
                    state += Directions.NORTH
        else:
            if vec[1] > 0:
                #print "down right",
                if abs(vec[0]) > abs(vec[1]):
                    state += Directions.EAST
                else:
                    state += Directions.SOUTH
            else:
                #print "up right",
                if abs(vec[0]) > abs(vec[1]):
                    state += Directions.EAST
                else:
                    state += Directions.NORTH

        state += ","

        state +=\
        str(gameState.hasWall(gameState.getPacmanPosition()[0], gameState.getPacmanPosition()[1] + 1)) + "," +\
        str(gameState.hasWall(gameState.getPacmanPosition()[0] - 1, gameState.getPacmanPosition()[1])) + "," +\
        str(gameState.hasWall(gameState.getPacmanPosition()[0], gameState.getPacmanPosition()[1] - 1)) + "," +\
        str(gameState.hasWall(gameState.getPacmanPosition()[0] + 1, gameState.getPacmanPosition()[1]))

        return state

    def shouldExit(self):
        return self.turns >= 800

    def chooseAction(self, gameState):
        #if the number of turns is bigger than a constant
        #if self.shouldExit():
        #    sys.exit(0)

        #get the current state
        state = self.getState(gameState)

        #get the action
        legalActions = self.getLegalActions(state)
        action = None
        if util.flipCoin(self.epsilon):
            action = self.getPolicy(state)
        else:
            action = random.choice(legalActions)

        #update the table
        if self.lastState != None and self.lastAction != None:
            #if a ghost has been eaten
            if sum(gameState.livingGhosts) < self.numGhosts:
                numGhosts = sum(gameState.livingGhosts)
                self.reward = 100
            self.update(self.lastState, self.lastAction, state, self.reward)

        #update values
        self.lastState = state
        self.lastAction = action
        self.turns += 1
        return action

    def getPolicy(self, state):
        return self.computeActionFromQValues(state)

    def getLegalActions(self, state):
        legalActions = []
        state = state.split(",")[1:]
        for i, s in enumerate(state):
            if s == "False":
                legalActions.append(self.actions[i])
        return legalActions

    def getValue(self, state):
        return self.computeValueFromQValues(state)

    def computeValueFromQValues(self, state):
        """
          Returns max_action Q(state,action)
          where the max is over legal actions.  Note that if
          there are no legal actions, which is the case at the
          terminal state, you should return a value of 0.0.
        """
        legalActions = self.getLegalActions(state)
        if len(legalActions) == 0:
            return 0.0
        tmp = []
        for action in legalActions:
            tmp.append(self.computeQValueFromValues(state, action))
        return max(tmp)

    def computeActionFromQValues(self, state):
        """
          Compute the best action to take in a state.  Note that if there
          are no legal actions, which is the case at the terminal state,
          you should return None.
        """
        legalActions = self.getLegalActions(state)
        if len(legalActions) == 0:
            return None
        tmp = util.Counter()
        for action in legalActions:
            tmp[action] = self.computeQValueFromValues(state, action)
        return tmp.argMax()

    def computeQValueFromValues(self, state, action):
        """
          Returns Q(state,action)
          Should return 0.0 if we have never seen a state
          or the Q node value otherwise
        """
        "*** YOUR CODE HERE ***"
        return self.q_table[(state, action)]

    def initQTable(self):
        table_file = open("qtable.txt", "r")
        table_file.seek(0)
        table = table_file.readlines()
        qvalues = []
        for i, line in enumerate(table):
            qvalues.append(line)
        table_file.close()

        q_table = util.Counter()
        dirs = [
            Directions.NORTH, Directions.SOUTH, Directions.EAST,
            Directions.WEST
        ]
        walls = ["True", "False"]
        actions = [
            Directions.NORTH, Directions.SOUTH, Directions.EAST,
            Directions.WEST
        ]
        i = 0
        for direction in dirs:
            for wall1 in walls:
                for wall2 in walls:
                    for wall3 in walls:
                        for wall4 in walls:
                            for action in actions:
                                state = direction + "," + wall1 + "," + wall2 + "," + wall3 + "," + wall4
                                q_table[(state, action)] = float(qvalues[i])
                                i += 1
        return q_table

    def update(self, state, action, nextState, reward):
        self.q_table[(state,action)] = (1 - self.alpha) * self.q_table[(state, action)] +\
            self.alpha * (reward + self.discount*self.getValue(nextState))

    def writeQtable(self):
        table_file = open("qtable.txt", "w+")
        for key in self.q_table:
            table_file.write(str(self.q_table[key]) + "\n")
        table_file.close()

    def __del__(self):
        self.writeQtable()
class GreedyBustersAgent(BustersAgent):
    "An agent that charges the closest ghost."

    def registerInitialState(self, gameState):
        "Pre-computes the distance between every two points."
        BustersAgent.registerInitialState(self, gameState)
        self.distancer = Distancer(gameState.data.layout, False)

    def chooseAction(self, gameState):
        """
        First computes the most likely position of each ghost that has
        not yet been captured, then chooses an action that brings
        Pacman closer to the closest ghost (according to mazeDistance!).

        To find the mazeDistance between any two positions, use:
          self.distancer.getDistance(pos1, pos2)

        To find the successor position of a position after an action:
          successorPosition = Actions.getSuccessor(position, action)

        livingGhostPositionDistributions, defined below, is a list of
        util.Counter objects equal to the position belief
        distributions for each of the ghosts that are still alive.  It
        is defined based on (these are implementation details about
        which you need not be concerned):

          1) gameState.getLivingGhosts(), a list of booleans, one for each
             agent, indicating whether or not the agent is alive.  Note
             that pacman is always agent 0, so the ghosts are agents 1,
             onwards (just as before).

          2) self.ghostBeliefs, the list of belief distributions for each
             of the ghosts (including ghosts that are not alive).  The
             indices into this list should be 1 less than indices into the
             gameState.getLivingGhosts() list.
        """
        pacmanPosition = gameState.getPacmanPosition()
        legal = [a for a in gameState.getLegalPacmanActions()]
        livingGhosts = gameState.getLivingGhosts()
        livingGhostPositionDistributions = \
            [beliefs for i, beliefs in enumerate(self.ghostBeliefs)
             if livingGhosts[i+1]]
        "*** YOUR CODE HERE ***"
        #util.raiseNotDefined()
        mindistance = 10e4
        nextactiontochose = Directions.STOP
        Total_number_of_ghosts = 0
        for i in range(1, len(livingGhosts)):
            if (livingGhosts[i]) == True:
                Total_number_of_ghosts = Total_number_of_ghosts + 1

        finalghostpos = 0
        probofghost = 0
        for action in legal:
            npos = Actions.getSuccessor(pacmanPosition, action)
            for j in range(Total_number_of_ghosts):
                currentdistributionofghost = livingGhostPositionDistributions[
                    j]
                for ghostposition in currentdistributionofghost.keys():
                    if currentdistributionofghost[ghostposition] > probofghost:
                        finalghostpos = ghostposition
                        probofghost = currentdistributionofghost[ghostposition]
            ghostdistance = self.distancer.getDistance(npos, finalghostpos)
            if ghostdistance < mindistance:
                mindistance = ghostdistance
                nextactiontochose = action
        return nextactiontochose
class GreedyBustersAgent(BustersAgent):
    "An agent that charges the closest ghost."

    def registerInitialState(self, gameState):
        "Pre-computes the distance between every two points."
        BustersAgent.registerInitialState(self, gameState)
        self.distancer = Distancer(gameState.data.layout, False)

    def chooseAction(self, gameState):
        """
    First computes the most likely position of each ghost that
    has not yet been captured, then chooses an action that brings
    Pacman closer to the closest ghost (in maze distance!).

    To find the maze distance between any two positions, use:
    self.distancer.getDistance(pos1, pos2)

    To find the successor position of a position after an action:
    successorPosition = Actions.getSuccessor(position, action)

    livingGhostPositionDistributions, defined below, is a list of
    util.Counter objects equal to the position belief distributions
    for each of the ghosts that are still alive.  It is defined based
    on (these are implementation details about which you need not be
    concerned):

      1) gameState.getLivingGhosts(), a list of booleans, one for each
         agent, indicating whether or not the agent is alive.  Note
         that pacman is always agent 0, so the ghosts are agents 1,
         onwards (just as before).

      2) self.ghostBeliefs, the list of belief distributions for each
         of the ghosts (including ghosts that are not alive).  The
         indices into this list should be 1 less than indices into the
         gameState.getLivingGhosts() list.

    You may remove Directions.STOP from the list of available actions.
    """
        pacmanPosition = gameState.getPacmanPosition()
        legal = [
            a for a in gameState.getLegalPacmanActions()
            if a != Directions.STOP
        ]
        livingGhosts = gameState.getLivingGhosts()
        livingGhostPositionDistributions = [
            beliefs for i, beliefs in enumerate(self.ghostBeliefs)
            if livingGhosts[i + 1]
        ]
        "*** YOUR CODE HERE ***"
        closestGhostDistance = float("inf")
        closestGhostPosition = None

        for distribution in livingGhostPositionDistributions:
            probability = 0
            maxProbPos = None
            for d in distribution:
                if distribution[d] > probability:
                    probability = distribution[d]
                    maxProbPos = d
            currDist = self.distancer.getDistance(pacmanPosition, maxProbPos)
            if currDist < closestGhostDistance:
                closestGhostDistance = currDist
                closestGhostPosition = maxProbPos

        "Now that we have the probabalistic closest ghost position, we need to pick a move"
        "that minimizes the gap"
        minDist = closestGhostDistance
        bestMove = None
        "this loop takes a random best move (in case multiple happen to close the distance equally"
        for action in legal:
            succPos = Actions.getSuccessor(pacmanPosition, action)
            succDist = self.distancer.getDistance(succPos,
                                                  closestGhostPosition)
            if succDist < minDist:
                minDist = succDist
                bestMove = action
            elif succDist == minDist:
                bestMove = random.choice([bestMove, action])

        return bestMove

        util.raiseNotDefined()
class GreedyBustersAgent(BustersAgent):
  "An agent that charges the closest ghost."
  
  def registerInitialState(self, gameState):
    "Pre-computes the distance between every two points."
    BustersAgent.registerInitialState(self, gameState)
    self.distancer = Distancer(gameState.data.layout, False)
    
  def chooseAction(self, gameState):
    """
    First computes the most likely position of each ghost that 
    has not yet been captured, then chooses an action that brings 
    Pacman closer to the closest ghost (in maze distance!).
    
    To find the maze distance between any two positions, use:
    self.distancer.getDistance(pos1, pos2)
    
    To find the successor position of a position after an action:
    successorPosition = Actions.getSuccessor(position, action)
    
    livingGhostPositionDistributions, defined below, is a list of
    util.Counter objects equal to the position belief distributions
    for each of the ghosts that are still alive.  It is defined based
    on (these are implementation details about which you need not be
    concerned):

      1) gameState.getLivingGhosts(), a list of booleans, one for each
         agent, indicating whether or not the agent is alive.  Note
         that pacman is always agent 0, so the ghosts are agents 1,
         onwards (just as before).

      2) self.ghostBeliefs, the list of belief distributions for each
         of the ghosts (including ghosts that are not alive).  The
         indices into this list should be 1 less than indices into the
         gameState.getLivingGhosts() list.
     
    """
    pacmanPosition = gameState.getPacmanPosition()
    legal = [a for a in gameState.getLegalPacmanActions()]
    livingGhosts = gameState.getLivingGhosts()
    livingGhostPositionDistributions = [beliefs for i,beliefs
                                        in enumerate(self.ghostBeliefs)
                                        if livingGhosts[i+1]]

    maxPosList = util.Counter()
    for posList in livingGhostPositionDistributions:
        maxProb = 0
        for pos, prob in posList.items():
            if prob > maxProb:
                maxProb = prob
                maxPos = pos
        maxPosList[maxPos] = self.distancer.getDistance(maxPos, pacmanPosition)
    
    maxPosList.sortedKeys()
    minPos, dis = maxPosList.items()[len(maxPosList)-1]
    currDistance = self.distancer.getDistance(pacmanPosition, minPos)
    for action in legal:
        nextPacPos = Actions.getSuccessor(pacmanPosition, action)
        nextDistance = self.distancer.getDistance(nextPacPos, minPos)
        #print "currDistance", currDistance, "nextDistance", nextDistance
        if nextDistance < currDistance:
            return action
    
    for action in legal:
        if action == "Stop":
            continue
        nextPacPos = Actions.getSuccessor(pacmanPosition, action)
        nextDistance = self.distancer.getDistance(nextPacPos, minPos)
        #print "currDistance", currDistance, "nextDistance", nextDistance
        if nextDistance == currDistance:
            return action
        
    return legal[0]
class GreedyBustersAgent(BustersAgent):
    "An agent that charges the closest ghost."

    def registerInitialState(self, gameState):
        "Pre-computes the distance between every two points."
        BustersAgent.registerInitialState(self, gameState)
        self.distancer = Distancer(gameState.data.layout, False)

    def chooseAction(self, gameState):
        """
        First computes the most likely position of each ghost that has
        not yet been captured, then chooses an action that brings
        Pacman closer to the closest ghost (according to mazeDistance!).

        To find the mazeDistance between any two positions, use:
          self.distancer.getDistance(pos1, pos2)

        To find the successor position of a position after an action:
          successorPosition = Actions.getSuccessor(position, action)

        livingGhostPositionDistributions, defined below, is a list of
        util.Counter objects equal to the position belief
        distributions for each of the ghosts that are still alive.  It
        is defined based on (these are implementation details about
        which you need not be concerned):

          1) gameState.getLivingGhosts(), a list of booleans, one for each
             agent, indicating whether or not the agent is alive.  Note
             that pacman is always agent 0, so the ghosts are agents 1,
             onwards (just as before).

          2) self.ghostBeliefs, the list of belief distributions for each
             of the ghosts (including ghosts that are not alive).  The
             indices into this list should be 1 less than indices into the
             gameState.getLivingGhosts() list.
        """
        pacmanPosition = gameState.getPacmanPosition()
        legal = [a for a in gameState.getLegalPacmanActions()]
        livingGhosts = gameState.getLivingGhosts()
        livingGhostPositionDistributions = \
            [beliefs for i, beliefs in enumerate(self.ghostBeliefs)
             if livingGhosts[i+1]]
        "*** YOUR CODE HERE ***"

        maxPosList = []

        minDist = float("inf")
        minDistIndex = None
        for beliefs in livingGhostPositionDistributions:
            maxPosList.append(beliefs.argMax())
        for maxPos in range(len(maxPosList)):
            minDistance = self.distancer.getDistance(maxPosList[maxPos],
                                                     pacmanPosition)
            if minDistIndex is None or minDistance <= minDist:
                minDistIndex = maxPos
                minDist = minDistance

        minPos = maxPosList[minDistIndex]
        newMinDist = float("inf")
        bestAction = None
        for a in (legal):
            newPos = Actions.getSuccessor(pacmanPosition, a)
            mazeDistance = self.distancer.getDistance(minPos, newPos)
            if bestAction is None or mazeDistance <= newMinDist:
                bestAction = a
                newMinDist = mazeDistance

        return bestAction
Exemple #49
0
class GreedyBustersAgent(BustersAgent):
    "An agent that charges the closest ghost."

    def registerInitialState(self, gameState):
        "Pre-computes the distance between every two points."
        BustersAgent.registerInitialState(self, gameState)
        self.distancer = Distancer(gameState.data.layout, False)

    def chooseAction(self, gameState):
        """
        First computes the most likely position of each ghost that has
        not yet been captured, then chooses an action that brings
        Pacman closer to the closest ghost (according to mazeDistance!).

        To find the mazeDistance between any two positions, use:
          self.distancer.getDistance(pos1, pos2)

        To find the successor position of a position after an action:
          successorPosition = Actions.getSuccessor(position, action)

        livingGhostPositionDistributions, defined below, is a list of
        util.Counter objects equal to the position belief
        distributions for each of the ghosts that are still alive.  It
        is defined based on (these are implementation details about
        which you need not be concerned):

          1) gameState.getLivingGhosts(), a list of booleans, one for each
             agent, indicating whether or not the agent is alive.  Note
             that pacman is always agent 0, so the ghosts are agents 1,
             onwards (just as before).

          2) self.ghostBeliefs, the list of belief distributions for each
             of the ghosts (including ghosts that are not alive).  The
             indices into this list should be 1 less than indices into the
             gameState.getLivingGhosts() list.
        """
        pacmanPosition = gameState.getPacmanPosition()
        print(pacmanPosition)
        legal = [a for a in gameState.getLegalPacmanActions()]
        livingGhosts = gameState.getLivingGhosts()
        print(legal, livingGhosts)
        livingGhostPositionDistributions = self.ghostBeliefs
        "*** YOUR CODE HERE ***"
        # distance + probability*lambda

        total = 0.0
        maxList = []
        for i in range(len(livingGhostPositionDistributions)):
            print(i, livingGhostPositionDistributions[i], '\n')
            argmax = 0.0
            maxPo = pacmanPosition
            for point in livingGhostPositionDistributions[i]:
                prb = livingGhostPositionDistributions[i][point]
                if prb > argmax:
                    argmax = prb
                    maxPo = point
            if livingGhosts[i + 1]:
                maxList.append(maxPo)
        print(pacmanPosition, maxList)
        maxMove = 10000
        choice = legal[0]
        for move in legal:
            newPos = Actions.getSuccessor(pacmanPosition, move)
            for dis in maxList:
                dist = self.distancer.getDistance(dis, newPos)
                if dist < maxMove:
                    maxMove = dist
                    choice = move
        return choice
Exemple #50
0
class GreedyBustersAgent(BustersAgent):
    "An agent that charges the closest ghost."

    def registerInitialState(self, gameState):
        "Pre-computes the distance between every two points."
        BustersAgent.registerInitialState(self, gameState)
        self.distancer = Distancer(gameState.data.layout, False)

    def chooseAction(self, gameState):
        """
        First computes the most likely position of each ghost that has
        not yet been captured, then chooses an action that brings
        Pacman closer to the closest ghost (according to mazeDistance!).

        To find the mazeDistance between any two positions, use:
          self.distancer.getDistance(pos1, pos2)

        To find the successor position of a position after an action:
          successorPosition = Actions.getSuccessor(position, action)

        livingGhostPositionDistributions, defined below, is a list of
        util.Counter objects equal to the position belief
        distributions for each of the ghosts that are still alive.  It
        is defined based on (these are implementation details about
        which you need not be concerned):

          1) gameState.getLivingGhosts(), a list of booleans, one for each
             agent, indicating whether or not the agent is alive.  Note
             that pacman is always agent 0, so the ghosts are agents 1,
             onwards (just as before).

          2) self.ghostBeliefs, the list of belief distributions for each
             of the ghosts (including ghosts that are not alive).  The
             indices into this list should be 1 less than indices into the
             gameState.getLivingGhosts() list.
        """
        pacmanPosition = gameState.getPacmanPosition()
        legal = [a for a in gameState.getLegalPacmanActions()]
        livingGhosts = gameState.getLivingGhosts()
        livingGhostPositionDistributions = \
            [beliefs for i, beliefs in enumerate(self.ghostBeliefs)
             if livingGhosts[i+1]]
        '''What we will end up returning as our best "Greedy" action'''
        actionWithMinimalDistance = 99999
        actionTaken = None

        for action in legal:
            '''Everywhere we can go from our current position'''

            newPosition = Actions.getSuccessor(pacmanPosition, action)

            ghostPosition = (-1, -1)
            ghostProbability = -1
            '''Loop through all the ghosts'''
            for ghost in livingGhostPositionDistributions:
                '''Loop through the probabilities for each ghost'''
                for posProb in ghost:
                    if (ghost[posProb] > ghostProbability):
                        ghostProbability = ghost[posProb]
                        ghostPosition = posProb
            '''
            Ok, so now we have the ghost with which we think we know the position with most likelihood.
            This is where we need to:
                - Compute the distance from ourselves to the position we found, then update the best action if applica-
                ble
            '''
            distFromSuccessorToGhost = self.distancer.getDistance(
                newPosition, ghostPosition)
            if (distFromSuccessorToGhost < actionWithMinimalDistance):
                actionWithMinimalDistance = distFromSuccessorToGhost
                actionTaken = action

        return actionTaken
class GreedyBustersAgent(BustersAgent):
    "An agent that charges the closest ghost."

    def registerInitialState(self, gameState):
        "Pre-computes the distance between every two points."
        BustersAgent.registerInitialState(self, gameState)
        self.distancer = Distancer(gameState.data.layout, False)

    def chooseAction(self, gameState):
        """
        First computes the most likely position of each ghost that has
        not yet been captured, then chooses an action that brings
        Pacman closer to the closest ghost (according to mazeDistance!).

        To find the mazeDistance between any two positions, use:
          self.distancer.getDistance(pos1, pos2)

        To find the successor position of a position after an action:
          successorPosition = Actions.getSuccessor(position, action)

        livingGhostPositionDistributions, defined below, is a list of
        util.Counter objects equal to the position belief
        distributions for each of the ghosts that are still alive.  It
        is defined based on (these are implementation details about
        which you need not be concerned):

          1) gameState.getLivingGhosts(), a list of booleans, one for each
             agent, indicating whether or not the agent is alive.  Note
             that pacman is always agent 0, so the ghosts are agents 1,
             onwards (just as before).

          2) self.ghostBeliefs, the list of belief distributions for each
             of the ghosts (including ghosts that are not alive).  The
             indices into this list should be 1 less than indices into the
             gameState.getLivingGhosts() list.
        """
        pacmanPosition = gameState.getPacmanPosition()
        legal = [a for a in gameState.getLegalPacmanActions()]
        livingGhosts = gameState.getLivingGhosts()
        livingGhostPositionDistributions = \
            [beliefs for i, beliefs in enumerate(self.ghostBeliefs)
             if livingGhosts[i + 1]]
        maxTemp = []
        index = 0
        while index < len(livingGhostPositionDistributions):
            maxTemp.append(livingGhostPositionDistributions[index].argMax())
            index += 1
        goal, probGoal = None, 0

        i = 0
        while i < len(maxTemp):
            if livingGhostPositionDistributions[i][maxTemp[i]] >= probGoal:
                goal = maxTemp[i]
                probGoal = livingGhostPositionDistributions[i][maxTemp[i]]
            i += 1

        j = 0
        temp = []
        while j < len(legal):
            temp.append((self.distancer.getDistance(
                Actions.getSuccessor(pacmanPosition, legal[j]),
                goal), legal[j]))
            j += 1
        return min(temp)[1]
class GreedyBustersAgent(BustersAgent):
    "An agent that charges the closest ghost."

    def registerInitialState(self, gameState):
        "Pre-computes the distance between every two points."
        BustersAgent.registerInitialState(self, gameState)
        self.distancer = Distancer(gameState.data.layout, False)

    def chooseAction(self, gameState):
        """
        First computes the most likely position of each ghost that has
        not yet been captured, then chooses an action that brings
        Pacman closer to the closest ghost (according to mazeDistance!).

        To find the mazeDistance between any two positions, use:
          self.distancer.getDistance(pos1, pos2)

        To find the successor position of a position after an action:
          successorPosition = Actions.getSuccessor(position, action)

        livingGhostPositionDistributions, defined below, is a list of
        util.Counter objects equal to the position belief
        distributions for each of the ghosts that are still alive.  It
        is defined based on (these are implementation details about
        which you need not be concerned):

          1) gameState.getLivingGhosts(), a list of booleans, one for each
             agent, indicating whether or not the agent is alive.  Note
             that pacman is always agent 0, so the ghosts are agents 1,
             onwards (just as before).

          2) self.ghostBeliefs, the list of belief distributions for each
             of the ghosts (including ghosts that are not alive).  The
             indices into this list should be 1 less than indices into the
             gameState.getLivingGhosts() list.
        """

        # Renamed variables
        pacman_pos = gameState.getPacmanPosition()
        legal_actions = [a for a in gameState.getLegalPacmanActions()]
        live_ghosts = gameState.getLivingGhosts()

        living_ghost_pos_dists = [
            beliefs for i, beliefs in enumerate(self.ghostBeliefs)
            if live_ghosts[i + 1]
        ]

        "*** YOUR CODE HERE ***"

        # Map ghosts to most likely positions
        most_likely_ghost_positions = [
            dist.argMax() for dist in living_ghost_pos_dists
        ]

        # Maps moves to distances from most likely positions
        moves_to_distances = {
            move: [
                self.distancer.getDistance(
                    Actions.getSuccessor(pacman_pos, move), most_likely_pos)
                for most_likely_pos in most_likely_ghost_positions
            ]
            for move in legal_actions
        }

        # Pick the move with the smallest minimal distance
        return min(moves_to_distances.keys(),
                   key=lambda move: min(moves_to_distances[move]))
class ClusterAgent (BustersAgent):


    def registerInitialState(self, gameState):
        BustersAgent.registerInitialState(self, gameState)
        self.distancer = Distancer(gameState.data.layout, False)

        #Definimos si se usa la distancia (true para v1 y v2, false para v3)
        self.dis = True

        #Para calcular los valores de la clase en las politicas.
        self.clusters = 8
        self.classes = 4
        self.classCounts = [[0 for i in range(self.classes)]for j in range(self.clusters)]

        self.classIndex = 2
        self.clusterIndex = 3

        self.readInstances()

        #Esto nos servira para guardar las instancias de entrenamiento.
        self.numInstances = 52
        self.numAttributes = 4
        #self.instances = [[" " for i in range(self.numAttributes)] for j in range(self.numInstances)]
        self.ins = [" " for i in range(self.numInstances)]

        #Para usar la libreria debemos usar la maquina virtual de java, JVM
        jvm.start()

        #Creamos el modelo
        loader = Loader(classname="weka.core.converters.ArffLoader")
        data = loader.load_file("/home/dot/Escritorio/Universidad/Machine Learning/practica 2/Outputs/agent_header.arff")

        self.clusterer = Clusterer(classname="weka.clusterers.SimpleKMeans", options=["-N", str(self.clusters)])
        self.clusterer.build_clusterer(data)

        print(self.clusterer)

        #Aplicamos la politica
        self.politicaMax()


    def readInstances(self):

        #Direccion del fichero agente (instancias sin cabecera).
        path = os.getcwd() + "/Outputs/agent.arff"

        f = open(path, 'r')

        index = 0

        #Leemos cacda instancia
        for line in f:

            #Obtenemos los valores de los atributos (String)
            values = line.split(",")

            #Obtenemos el valor de la clase, de Norte a Oeste (0 - 3)
            classValue = 0
            classAtt = values[self.classIndex]
            if (classAtt == "East"):
                classValue = 1
            elif (classAtt == "South"):
                classValue = 2
            elif (classAtt == "West"):
                classValue = 3

            #Obtenemos el valor del cluster.
            cluster = values[self.clusterIndex]

            #Incrementamos la cuenta de la clase para el cluster.
            self.classCounts[int(cluster[-2:]) - 1][classValue] += 1

        f.close()

    #Calcula la clase mayoritaria para cada cluster
    def politicaMax(self):

        self.max = [0 for i in range(self.clusters)]

        for i in range(self.clusters):

            temp_max = 0
            class_index = 0

            for j in range(self.classes):

                if (self.classCounts[i][j] > temp_max):

                    temp_max = self.classCounts[i][j]
                    class_index = j

            self.max[i] = class_index
            #print(class_index)

        '''
        for i in range(self.clusters):
            print(self.max[i])
        '''

    def chooseAction(self, gameState):

        path = os.getcwd() + "/Outputs/newInstance.arff"

        f = open(path, 'w')

        if (self.dis):
            data = "@RELATION pacman\n" \
                    + "@ATTRIBUTE dis NUMERIC\n" \
                    + "@ATTRIBUTE relPos {-1,0,1,2,3,4,5,6,7,8}\n\n" \
                    + "@DATA\n"
        else:
            data = "@RELATION pacman\n" \
                   + "@ATTRIBUTE relPos {-1,0,1,2,3,4,5,6,7,8}\n\n" \
                   + "@DATA\n"


        # Obtenemos la posicion del pacman (x,y)
        pos_pac = gameState.data.agentStates[0].getPosition()


        # Obtenemos las distancias a los fantasmas
        for i in range(1, gameState.getNumAgents()):

            # Calculmos la distancia real (mazedistance) al fantasma i
            pos_ghost = gameState.data.agentStates[i].getPosition()

            distance = self.distancer.getDistance(pos_pac, pos_ghost)

            #Normalizacion: (distance - min)/(max - min): min = 1, max = 21
            distance = (distance - 1) / (21 - 1)

            # Si la distancia es mayor a 1000 significa que el fantasma en cuestion ya ha sido comido
            if (self.dis):
                if (distance > 1000):
                    data = data + ("-1,")
                else:
                    data = data + str(distance) + ","


        # Obtenemos las posiciones relativas de los fantasmas con respecto del pacman
        for i in range(1, gameState.getNumAgents()):

            pos_ghost = gameState.data.agentStates[i].getPosition()

            if (pos_ghost[1] < 3):
                data = data + "-1,"
                continue

            # Si el fantasma esta en la misma posicion lo indicamos como 0
            if (pos_ghost == pos_pac):
                data = data + "0,"

            # Determinamos las posiciones relativas
            # {NORTH = 1, NORTH_EAST = 2, EAST = 3, SOUTH_EAST = 4, SOUTH = 5, SOUTH_WEST = 6, WEST = 7, NORTH_WEST = 8}.
            if (pos_ghost[0] > pos_pac[0]):
                if (pos_ghost[1] > pos_pac[1]):
                    data = data + "2,"
                elif (pos_ghost[1] < pos_pac[1]):
                    data = data + "4,"
                else:
                    data = data + "3,"
            elif (pos_ghost[0] < pos_pac[0]):
                if (pos_ghost[1] > pos_pac[1]):
                    data = data + "8,"
                elif (pos_ghost[1] < pos_pac[1]):
                    data = data + "6,"
                else:
                    data = data + "7,"
            else:
                if (pos_ghost[1] > pos_pac[1]):
                    data = data + "1,"
                else:
                    data = data + "5,"

        data = data + "\n"

        #print(data)

        f.write(data)

        f.close()

        loader = Loader(classname="weka.core.converters.ArffLoader")
        newData = loader.load_file("/home/dot/Escritorio/Universidad/Machine Learning/practica 2/Outputs/newInstance.arff")

        dir = 4
        direction = Directions.STOP

        for inst in newData:
            cl = self.clusterer.cluster_instance(inst)
            #print(cl)
            dir = self.max[cl]
            #print(dir)


        if (dir == 0):
            direction = Directions.NORTH
        elif (dir == 1):
            direction = Directions.EAST
        elif (dir == 2):
            direction = Directions.SOUTH
        elif (dir == 3):
            direction = Directions.WEST

        #print(direction)
        return direction
Exemple #54
0
class GreedyBustersAgent(BustersAgent):
    "An agent that charges the closest ghost."

    def registerInitialState(self, gameState):
        "Pre-computes the distance between every two points."
        BustersAgent.registerInitialState(self, gameState)
        self.distancer = Distancer(gameState.data.layout, False)

    def chooseAction(self, gameState):
        """
        First computes the most likely position of each ghost that has
        not yet been captured, then chooses an action that brings
        Pacman closer to the closest ghost (according to mazeDistance!).

        To find the mazeDistance between any two positions, use:
          self.distancer.getDistance(pos1, pos2)

        To find the successor position of a position after an action:
          successorPosition = Actions.getSuccessor(position, action)

        livingGhostPositionDistributions, defined below, is a list of
        util.Counter objects equal to the position belief
        distributions for each of the ghosts that are still alive.  It
        is defined based on (these are implementation details about
        which you need not be concerned):

          1) gameState.getLivingGhosts(), a list of booleans, one for each
             agent, indicating whether or not the agent is alive.  Note
             that pacman is always agent 0, so the ghosts are agents 1,
             onwards (just as before).

          2) self.ghostBeliefs, the list of belief distributions for each
             of the ghosts (including ghosts that are not alive).  The
             indices into this list should be 1 less than indices into the
             gameState.getLivingGhosts() list.
        """
        pacmanPosition = gameState.getPacmanPosition()
        legal = [a for a in gameState.getLegalPacmanActions()]
        livingGhosts = gameState.getLivingGhosts()
        livingGhostPositionDistributions = \
            [beliefs for i, beliefs in enumerate(self.ghostBeliefs)
             if livingGhosts[i+1]]

        "Build a list of the positions with highest probability of having a ghost"
        closestGhostPositions = []
        for belief in livingGhostPositionDistributions:
            closestGhostPositions.append(belief.argMax())

        "Choose the position that has the highest probability of being the closest ghost"
        closestGhostPosition = None
        highestProbability = 0
        for index, position in enumerate(closestGhostPositions):
            prob = livingGhostPositionDistributions[index][position]
            if prob >= highestProbability:
                closestGhostPosition = position
                highestProbability = prob

        "Choose the action that creates the shortest distance to the closest ghost"
        possibleActions = []
        for action in legal:
            successorPosition = Actions.getSuccessor(pacmanPosition, action)
            distance = self.distancer.getDistance(successorPosition,
                                                  closestGhostPosition)
            possibleActions.append((distance, action))

        return min(possibleActions)[1]
    def registerInitialState(self, gameState):
        BustersAgent.registerInitialState(self, gameState)
        self.distancer = Distancer(gameState.data.layout, False)
        self.countActions = 0

        #Obtenemos la direccion del fichero

        #SameMaps -----------------------------------------------
        path = os.getcwd() + "/Outputs/training_initial_v3_1A.arff"

        #Abrimos el fichero
        f = open(path,'a')

        statInfo = os.stat(path)
        
        if (statInfo.st_size == 0):
            s = "@RELATION pacman\n\n" \
                + "@ATTRIBUTE pacx numeric\n" \
                + "@ATTRIBUTE pacy numeric\n" \
                + "@ATTRIBUTE legal_north {true, false}\n" \
                + "@ATTRIBUTE legal_east {true, false}\n" \
                + "@ATTRIBUTE legal_south {true, false}\n" \
                + "@ATTRIBUTE legal_west {true, false}\n" \
                + "@ATTRIBUTE g1_x NUMERIC\n" \
                + "@ATTRIBUTE g1_y NUMERIC\n" \
                + "@ATTRIBUTE g2_x NUMERIC\n" \
                + "@ATTRIBUTE g2_y NUMERIC\n" \
                + "@ATTRIBUTE g3_x NUMERIC\n" \
                + "@ATTRIBUTE g3_y NUMERIC\n" \
                + "@ATTRIBUTE g4_x NUMERIC\n" \
                + "@ATTRIBUTE g4_y NUMERIC\n" \
                + "@ATTRIBUTE g1_dis NUMERIC\n" \
                + "@ATTRIBUTE g2_dis NUMERIC\n" \
                + "@ATTRIBUTE g3_dis NUMERIC\n" \
                + "@ATTRIBUTE g4_dis NUMERIC\n" \
                + "@ATTRIBUTE num_walls NUMERIC\n" \
                + "@ATTRIBUTE alive_ghosts NUMERIC\n" \
                + "@ATTRIBUTE score NUMERIC\n" \
                + "@ATTRIBUTE future_score NUMERIC\n" \
                + "@ATTRIBUTE future_alive_ghosts NUMERIC\n" \
                + "@ATTRIBUTE last_action {Stop, North, East, South, West}\n" \
                + "@ATTRIBUTE g1_relPos {-1,0,1,2,3,4,5,6,7,8}\n" \
                + "@ATTRIBUTE g2_relPos {-1,0,1,2,3,4,5,6,7,8}\n" \
                + "@ATTRIBUTE g3_relPos {-1,0,1,2,3,4,5,6,7,8}\n" \
                + "@ATTRIBUTE g4_relPos {-1,0,1,2,3,4,5,6,7,8}\n" \
                + "@ATTRIBUTE g1_closest {true, false}\n" \
                + "@ATTRIBUTE g2_closest {true, false}\n" \
                + "@ATTRIBUTE g3_closest {true, false}\n" \
                + "@ATTRIBUTE g4_closest {true, false}\n" \
                + "@ATTRIBUTE north_best {true, false}\n" \
                + "@ATTRIBUTE east_best {true, false}\n" \
                + "@ATTRIBUTE south_best {true, false}\n" \
                + "@ATTRIBUTE west_best {true, false}\n" \
                + "@ATTRIBUTE action {North, East, South, West}\n\n" \
                + "@DATA\n"

            s = "@RELATION pacman\n\n" \
                + "@ATTRIBUTE pacx numeric\n" \
                + "@ATTRIBUTE pacy numeric\n" \
                + "@ATTRIBUTE legal_north {true, false}\n" \
                + "@ATTRIBUTE legal_east {true, false}\n" \
                + "@ATTRIBUTE legal_south {true, false}\n" \
                + "@ATTRIBUTE legal_west {true, false}\n" \
                + "@ATTRIBUTE g1_x NUMERIC\n" \
                + "@ATTRIBUTE g1_y NUMERIC\n" \
                + "@ATTRIBUTE g1_dis NUMERIC\n" \
                + "@ATTRIBUTE num_walls NUMERIC\n" \
                + "@ATTRIBUTE alive_ghosts NUMERIC\n" \
                + "@ATTRIBUTE score NUMERIC\n" \
                + "@ATTRIBUTE future_score NUMERIC\n" \
                + "@ATTRIBUTE future_alive_ghosts NUMERIC\n" \
                + "@ATTRIBUTE last_action {Stop, North, East, South, West}\n" \
                + "@ATTRIBUTE g1_relPos {-1,0,1,2,3,4,5,6,7,8}\n" \
                + "@ATTRIBUTE g1_closest {true, false}\n" \
                + "@ATTRIBUTE north_best {true, false}\n" \
                + "@ATTRIBUTE east_best {true, false}\n" \
                + "@ATTRIBUTE south_best {true, false}\n" \
                + "@ATTRIBUTE west_best {true, false}\n" \
                + "@ATTRIBUTE action {North, East, South, West}\n\n" \
                + "@DATA\n"

            f.write(s)

        f.close()
Exemple #56
0
class GreedyBustersAgent(BustersAgent):
    "An agent that charges the closest ghost."

    def registerInitialState(self, gameState):
        "Pre-computes the distance between every two points."
        BustersAgent.registerInitialState(self, gameState)
        self.distancer = Distancer(gameState.data.layout, False)

    def chooseAction(self, gameState):
        """
        First computes the most likely position of each ghost that
        has not yet been captured, then chooses an action that brings
        Pacman closer to the closest ghost (in maze distance!).

        To find the maze distance between any two positions, use:
        self.distancer.getDistance(pos1, pos2)

        To find the successor position of a position after an action:
        successorPosition = Actions.getSuccessor(position, action)

        livingGhostPositionDistributions, defined below, is a list of
        util.Counter objects equal to the position belief distributions
        for each of the ghosts that are still alive.  It is defined based
        on (these are implementation details about which you need not be
        concerned):

          1) gameState.getLivingGhosts(), a list of booleans, one for each
             agent, indicating whether or not the agent is alive.  Note
             that pacman is always agent 0, so the ghosts are agents 1,
             onwards (just as before).

          2) self.ghostBeliefs, the list of belief distributions for each
             of the ghosts (including ghosts that are not alive).  The
             indices into this list should be 1 less than indices into the
             gameState.getLivingGhosts() list.

        You may remove Directions.STOP from the list of available actions.
        """
        pacmanPosition = gameState.getPacmanPosition()
        legal = [
            a for a in gameState.getLegalPacmanActions()
            if a != Directions.STOP
        ]
        livingGhosts = gameState.getLivingGhosts()
        livingGhostPositionDistributions = [
            beliefs for i, beliefs in enumerate(self.ghostBeliefs)
            if livingGhosts[i + 1]
        ]
        "*** YOUR CODE HERE ***"
        """
        for x in range(0, 100):
            try :
                tempDist = self.distancer.getDistance(pacmanPosition, (x, 1))
            except Exception:
                print "Exception occurred at x = " + str(x)
        
        for y in range(0, 100):
            try :
                tempDist = self.distancer.getDistance(pacmanPosition, (3, y))
            except Exception:
                print "Exception occurred at y = " + str(y)
        """

        ghostLocs = util.Counter()
        for agent in range(0, len(livingGhostPositionDistributions)):
            #xcomList = [(0, 10000), (10, 5), (-20, 5)]
            xcomList = [(pos[0], prob) for pos, prob in
                        livingGhostPositionDistributions[agent].iteritems()]
            ycomList = [(pos[1], prob) for pos, prob in
                        livingGhostPositionDistributions[agent].iteritems()]

            ghostLocs[agent] = self.clampOntoBoard(
                (self.centerOfMass(xcomList), self.centerOfMass(ycomList)),
                pacmanPosition)

        ghostDist = 999999999999
        closestAgent = 0
        for agent in range(0, len(livingGhostPositionDistributions)):

            tempDist = 99999999999
            try:
                tempDist = self.distancer.getDistance(pacmanPosition,
                                                      ghostLocs[agent])
            except Exception:
                print "Exception, not on board: " + str(
                    pacmanPosition) + ", " + str(ghostLocs[agent])

            if (ghostDist >= tempDist):
                ghostDist = tempDist
                closestAgent = agent

        bestDist = 999999999999
        bestAct = legal[0]
        for action in legal:
            tempDist = 999999999
            if ghostLocs[closestAgent] == (0, 0):
                pass
            else:
                tempDist = self.distancer.getDistance(
                    ghostLocs[closestAgent],
                    self.castTuple(Actions.getSuccessor(
                        pacmanPosition, action)))

            if tempDist < bestDist:
                bestDist = tempDist
                bestAct = action

        return bestAct

    # The input should be a tuple of (position, mass)
    def centerOfMass(self, list):

        sumOfProbs = 0
        sumOfProbsXPos = 0

        debugStr = ""
        for pos, prob in list:
            debugStr = debugStr + "(" + str(pos) + ", " + str(prob) + ")"
            sumOfProbs = sumOfProbs + prob
            sumOfProbsXPos = sumOfProbsXPos + (pos * prob)

        if sumOfProbs == 0:
            return 0

        ret = int(round(sumOfProbsXPos / sumOfProbs))

        return ret

    def castTuple(self, tuple):
        x = tuple[0]
        y = tuple[1]

        return (int(x), int(y))

    # If the center of mass equation returns a point that is off the board, this equation will move it onto the board
    def clampOntoBoard(self, point, pacmanPos):

        # clamp the X
        for delta in range(0, 10):
            try:
                self.distancer.getDistance(pacmanPos,
                                           (point[0] - delta, point[1]))
                return (point[0] - delta, point[1])
            except Exception:
                pass

            try:
                self.distancer.getDistance(pacmanPos,
                                           (point[0] + delta, point[1]))
                return (point[0] + delta, point[1])
            except Exception:
                pass

            try:
                self.distancer.getDistance(pacmanPos,
                                           (point[0], point[1] + delta))
                return (point[0], point[1] + delta)
            except Exception:
                pass

            try:
                self.distancer.getDistance(pacmanPos,
                                           (point[0], point[1] - delta))
                return (point[0], point[1] - delta)
            except Exception:
                pass

            delta = delta + 1

        print "Unable to find a clamp for pos " + str(point)

        return (0, 0)
class GreedyBustersAgent(BustersAgent):
    "An agent that charges the closest ghost."

    def registerInitialState(self, gameState):
        "Pre-computes the distance between every two points."
        BustersAgent.registerInitialState(self, gameState)
        self.distancer = Distancer(gameState.data.layout, False)

    def chooseAction(self, gameState):
        """
        First computes the most likely position of each ghost that has
        not yet been captured, then chooses an action that brings
        Pacman closer to the closest ghost (according to mazeDistance!).

        To find the mazeDistance between any two positions, use:
          self.distancer.getDistance(pos1, pos2)

        To find the successor position of a position after an action:
          successorPosition = Actions.getSuccessor(position, action)

        livingGhostPositionDistributions, defined below, is a list of
        util.Counter objects equal to the position belief
        distributions for each of the ghosts that are still alive.  It
        is defined based on (these are implementation details about
        which you need not be concerned):

          1) gameState.getLivingGhosts(), a list of booleans, one for each
             agent, indicating whether or not the agent is alive.  Note
             that pacman is always agent 0, so the ghosts are agents 1,
             onwards (just as before).

          2) self.ghostBeliefs, the list of belief distributions for each
             of the ghosts (including ghosts that are not alive).  The
             indices into this list should be 1 less than indices into the
             gameState.getLivingGhosts() list.
        """
        # borrowed from https://stackoverflow.com/questions/268272/getting-key-with-maximum-value-in-dictionary
        def keywithmaxval(dict):
            v = list(dict.values())
            k = list(dict.keys())
            return k[v.index(max(v))]

        def keywithminval(dict):
            v = list(dict.values())
            k = list(dict.keys())
            return k[v.index(min(v))]

        pacmanPosition = gameState.getPacmanPosition()
        legal = [a for a in gameState.getLegalPacmanActions()]
        livingGhosts = gameState.getLivingGhosts()
        livingGhostPositionDistributions = \
            [beliefs for i, beliefs in enumerate(self.ghostBeliefs)
             if livingGhosts[i+1]]

        most_likely_positions = {}
        for i in range(len(livingGhostPositionDistributions)):
            most_likely_positions[i] = keywithmaxval(livingGhostPositionDistributions[i])

        closest_ghost = keywithmaxval(most_likely_positions)
        closest_pos = most_likely_positions[closest_ghost]

        distances = {}

        for a in legal:
            succPos = Actions.getSuccessor(pacmanPosition, a)
            distances[a] = self.distancer.getDistance(succPos, closest_pos)

        return keywithminval(distances)
Exemple #58
0
 def registerInitialState(self, gameState):
     BustersAgent.registerInitialState(self, gameState)
     self.distancer = Distancer(gameState.data.layout, False)
     self.countActions = 0
Exemple #59
0
 def registerInitialState(self, gameState):
     """Initializes some helper modules"""
     import __main__
     self.display = __main__._display
     self.distancer = Distancer(gameState.data.layout, False)
     self.firstMove = True
Exemple #60
0
class GreedyBustersAgent(BustersAgent):
    "An agent that charges the closest ghost."

    def registerInitialState(self, gameState):
        "Pre-computes the distance between every two points."
        BustersAgent.registerInitialState(self, gameState)
        self.distancer = Distancer(gameState.data.layout, False)

    def chooseAction(self, gameState):
        """
        First computes the most likely position of each ghost that has
        not yet been captured, then chooses an action that brings
        Pacman closer to the closest ghost (according to mazeDistance!).

        To find the mazeDistance between any two positions, use:
          self.distancer.getDistance(pos1, pos2)

        To find the successor position of a position after an action:
          successorPosition = Actions.getSuccessor(position, action)

        livingGhostPositionDistributions, defined below, is a list of
        util.Counter objects equal to the position belief
        distributions for each of the ghosts that are still alive.  It
        is defined based on (these are implementation details about
        which you need not be concerned):

          1) gameState.getLivingGhosts(), a list of booleans, one for each
             agent, indicating whether or not the agent is alive.  Note
             that pacman is always agent 0, so the ghosts are agents 1,
             onwards (just as before).

          2) self.ghostBeliefs, the list of belief distributions for each
             of the ghosts (including ghosts that are not alive).  The
             indices into this list should be 1 less than indices into the
             gameState.getLivingGhosts() list.
        """
        pacmanPosition = gameState.getPacmanPosition()
        legal = [a for a in gameState.getLegalPacmanActions()]
        livingGhosts = gameState.getLivingGhosts()
        livingGhostPosDist = \
            [beliefs for i, beliefs in enumerate(self.ghostBeliefs)
             if livingGhosts[i+1]]
        "*** YOUR CODE HERE ***"

        #for each ghost find the position with the maxiumum likelyhood of the ghost being there
        #then find the distance from pacman to that spot
        closest_dist = float('inf')
        closest_ghost = None
        for i in range(len(livingGhostPosDist)):
            max_val = max(livingGhostPosDist[i].values())
            likelyPos = max(livingGhostPosDist[i],
                            key=livingGhostPosDist[i].get)
            #print likelyPos
            distToPac = self.distancer.getDistance(pacmanPosition, likelyPos)
            if distToPac < closest_dist:
                closest_dist = distToPac
                closest_ghost = likelyPos

        if closest_ghost == None:
            print "FRICK"
            return None
        minDistMove = float('inf')
        chosenMove = None
        for act in legal:
            successorPosition = Actions.getSuccessor(pacmanPosition, act)
            distToGhost = self.distancer.getDistance(closest_ghost,
                                                     successorPosition)
            if distToGhost < minDistMove:
                minDistMove = distToGhost
                chosenMove = act

        return chosenMove