Ejemplo n.º 1
0
  def getFeatures(self, state, action):
    # extract the grid of food and wall locations and get the ghost locations
    food = state.getFood()
    walls = state.getWalls()
    ghosts = state.getGhostPositions()

    features = util.Counter()
    
    features["bias"] = 1.0
    
    # compute the location of pacman after he takes the action
    x, y = state.getPacmanPosition()
    dx, dy = Actions.directionToVector(action)
    next_x, next_y = int(x + dx), int(y + dy)
    
    # count the number of ghosts 1-step away
    features["#-of-ghosts-1-step-away"] = 1 / (
            1 + sum((next_x, next_y) in Actions.getLegalNeighbors(g, walls) for g in ghosts))

    dist = closestFood((next_x, next_y), food, walls)
    if dist is None:
        dist = 0
    features["closest-food"] = 1 / ( 1+ (float(dist) / (walls.width * walls.height)) )
    
    return features.totalCount()
Ejemplo n.º 2
0
    def getFeatures(self, state, action):
        # extract the grid of food and wall locations and get the ghost locations
        food = state.getFood()
        walls = state.getWalls()
        ghosts = state.getGhostPositions()

        features = util.Counter()

        features["bias"] = 1.0

        # compute the location of pacman after he takes the action
        x, y = state.getPacmanPosition()
        dx, dy = Actions.directionToVector(action)
        next_x, next_y = int(x + dx), int(y + dy)

        # count the number of ghosts 1-step away
        features["#-of-ghosts-1-step-away"] = sum((next_x, next_y) in Actions.getLegalNeighbors(g, walls) for g in ghosts)

        # if there is no danger of ghosts then add the food feature
        if not features["#-of-ghosts-1-step-away"] and food[next_x][next_y]:
            features["eats-food"] = 1.0

        dist = closestFood((next_x, next_y), food, walls)
        if dist is not None:
            # make the distance a number less than one otherwise the update
            # will diverge wildly
            features["closest-food"] = float(dist) / (walls.width * walls.height)
        features.divideAll(10.0)
        return features
Ejemplo n.º 3
0
    def getLegalActions( state, ghostIndex ):
        """
        Ghosts cannot stop, and cannot turn around unless they
        reach a dead end, but can turn 90 degrees at intersections.
        """
        agentState = state.data.agentStates[ghostIndex]
        hasLaser = 1
        hasBlast = 1
        if(not(agentState.getLaserPower()) or agentState.scaredTimer > 0):
            hasLaser = 0 #scared ghosts shoot no laser
        if(not(agentState.getBlastPower()) or agentState.scaredTimer > 0):
            hasBlast = 0 #scared ghosts do not use blast

        conf = state.getGhostState( ghostIndex ).configuration
        possibleActions = Actions.getPossibleActions( conf, state.data.walls)
        reverse = Actions.reverseDirection( conf.direction)
        if Directions.STOP in possibleActions:
            possibleActions.remove( Directions.STOP)
        if (not hasLaser) and (Directions.LASER in possibleActions):
            possibleActions.remove( Directions.LASER )
        if (not hasBlast) and (Directions.BLAST in possibleActions):
            possibleActions.remove( Directions.BLAST)
        # TODO:  What if we have removed Laser and Blast at this point?
        if reverse in possibleActions and len( possibleActions ) > (1+hasLaser+hasBlast): #2 instead of 1 because of LASER
            possibleActions.remove(reverse)
        return possibleActions
Ejemplo n.º 4
0
 def chooseAction(self, gameState):
     """
     First computes the most likely position of each ghost that has
     not yet been captured, then chooses an action that brings
     Pacman closer to the closest ghost (according to mazeDistance!).
     """
     pacmanPosition = gameState.getPacmanPosition()
     legal = [a for a in gameState.getLegalPacmanActions()]
     livingGhosts = gameState.getLivingGhosts()
     livingGhostPositionDistributions = \
         [beliefs for i, beliefs in enumerate(self.ghostBeliefs)
          if livingGhosts[i+1]]
     "*** YOUR CODE HERE ***"
     likelyGhostPos = []
     for dist in livingGhostPositionDistributions:
         mostLikelyPos = dist.argMax()
         likelyGhostPos += [mostLikelyPos]
     closestGhost = likelyGhostPos[0]
     for pos in likelyGhostPos[1:]:
         if self.distancer.getDistance(pacmanPosition, pos) < self.distancer.getDistance(pacmanPosition, closestGhost):
             closestGhost = pos
     minDistance = self.distancer.getDistance(Actions.getSuccessor(pacmanPosition, legal[0]), closestGhost)
     retAction = legal[0]
     for action in legal[1:]:
         if self.distancer.getDistance(Actions.getSuccessor(pacmanPosition, action), closestGhost) < minDistance:
             minDistance = self.distancer.getDistance(Actions.getSuccessor(pacmanPosition, action), closestGhost)
             retAction = action
     return retAction
Ejemplo n.º 5
0
    def chooseAction(self, gameState):
        """
        First computes the most likely position of each ghost that
        has not yet been captured, then chooses an action that brings
        Pacman closer to the closest ghost (in maze distance!).

        To find the maze distance between any two positions, use:
        self.distancer.getDistance(pos1, pos2)

        To find the successor position of a position after an action:
        successorPosition = Actions.getSuccessor(position, action)

        livingGhostPositionDistributions, defined below, is a list of
        util.Counter objects equal to the position belief distributions
        for each of the ghosts that are still alive.  It is defined based
        on (these are implementation details about which you need not be
        concerned):

          1) gameState.getLivingGhosts(), a list of booleans, one for each
             agent, indicating whether or not the agent is alive.  Note
             that pacman is always agent 0, so the ghosts are agents 1,
             onwards (just as before).

          2) self.ghostBeliefs, the list of belief distributions for each
             of the ghosts (including ghosts that are not alive).  The
             indices into this list should be 1 less than indices into the
             gameState.getLivingGhosts() list.

        """
        pacmanPosition = gameState.getPacmanPosition()
        legal = [a for a in gameState.getLegalPacmanActions()]
        livingGhosts = gameState.getLivingGhosts()
        livingGhostPositionDistributions = [beliefs for i,beliefs
                                            in enumerate(self.ghostBeliefs)
                                            if livingGhosts[i+1]]
        "*** YOUR CODE HERE ***"
        # print livingGhostPositionDistributions[0]
        maxProb = 0.0
        nextGhostPos = None
        for ghost in livingGhostPositionDistributions:
            for pos, prob in ghost.items():
                if prob > maxProb:
                    maxProb = prob
                    nextGhostPos = pos

        minDistance = self.distancer.getDistance(Actions.getSuccessor(pacmanPosition, legal[0]), nextGhostPos)
        res = legal[0]
        for a in legal:
            successorPosition = Actions.getSuccessor(pacmanPosition, a)
            if self.distancer.getDistance(successorPosition, nextGhostPos) < minDistance:
                minDistance = self.distancer.getDistance(successorPosition, nextGhostPos)
                res = a

        return res
Ejemplo n.º 6
0
  def getFeatures(self, state, action):
    foodpos = state.getFood()
    foodpos = foodpos.asList()
    walls = state.getWalls()
    capsules = state.getCapsules()
    ghosts = state.getGhostPositions()
    
    SimpleExtractorObject = SimpleExtractor()
    # All this to avoid StaticMethod decorator, or subclassing
    features = SimpleExtractorObject.getFeatures(state,action)

    # compute the location of pacman after he takes the action
    x, y = state.getPacmanPosition()
    dx, dy = Actions.directionToVector(action)
    next_x, next_y = int(x + dx), int(y + dy)
    
    ghostStates =  state.getGhostStates()
    
    time_left_for_all_neighbour_ghosts = []
    min_time_left_for_all_neighbour_ghosts = 0
    # Find time left for all neighbouring ghosts
    for ghost in ghostStates:
        if (next_x,next_y) in Actions.getLegalNeighbors(ghost.getPosition(),walls):
            time_left_for_all_neighbour_ghosts += [ghost.scaredTimer]
    # Find min time left for ghosts to remain eatable
    if time_left_for_all_neighbour_ghosts:
        min_time_left_for_all_neighbour_ghosts = min(time_left_for_all_neighbour_ghosts)
    if min_time_left_for_all_neighbour_ghosts > 1:
        features["can-i-eat-ghosts"] = 1 / 10.0
        features["#-of-ghosts-1-step-away"] = 0

    # dist of closest ghost - (slow ?) A useless feature ?
    tot_size = walls.width*walls.height
    num_close_ghosts = numCloseObjects((next_x, next_y), ghosts, walls,3)
    temp = (1.0 + num_close_ghosts) / 10
    features["no-of-close-ghosts"] = num_close_ghosts / 10.0

    # dist of closest capsule (slow)
    dist = closestObject((next_x, next_y), capsules, walls)
    if dist is not None:
      features["closest-capsule"] = float(dist) / (walls.width * walls.height) 


    # Possibly trapped
    poss_actions =  Actions.getLegalNeighbors((next_x,next_y),walls)
    poss_actions = set(poss_actions)
    poss_ghost_positions = []
    for g in ghosts:
        poss_ghost_positions += Actions.getLegalNeighbors(g, walls)
    remaining_action = poss_actions - set(poss_ghost_positions)
    if not remaining_action:
        features["trapped"] = 1 / 10.0

    return features
Ejemplo n.º 7
0
 def get_successors(self, value):
     state = self.state(value)
     config = Config(state[0], Directions.STOP)
     walls_grid = self.game_state.getWalls()
     
     actions = Actions.getPossibleActions(config, walls_grid)
     result = []
     for action in actions:
         position = Actions.getSuccessor(state[0], action)
         result.append(((position, state[1] - 1), action))
     return result
Ejemplo n.º 8
0
    def chooseAction(self, gameState):
        """
        First computes the most likely position of each ghost that has
        not yet been captured, then chooses an action that brings
        Pacman closer to the closest ghost (according to mazeDistance!).

        To find the mazeDistance between any two positions, use:
          self.distancer.getDistance(pos1, pos2)

        To find the successor position of a position after an action:
          successorPosition = Actions.getSuccessor(position, action)

        livingGhostPositionDistributions, defined below, is a list of
        util.Counter objects equal to the position belief
        distributions for each of the ghosts that are still alive.  It
        is defined based on (these are implementation details about
        which you need not be concerned):

          1) gameState.getLivingGhosts(), a list of booleans, one for each
             agent, indicating whether or not the agent is alive.  Note
             that pacman is always agent 0, so the ghosts are agents 1,
             onwards (just as before).

          2) self.ghostBeliefs, the list of belief distributions for each
             of the ghosts (including ghosts that are not alive).  The
             indices into this list should be 1 less than indices into the
             gameState.getLivingGhosts() list.
        """
        pacmanPosition = gameState.getPacmanPosition()
        legal = [a for a in gameState.getLegalPacmanActions()]
        livingGhosts = gameState.getLivingGhosts()
        livingGhostPositionDistributions = \
            [beliefs for i, beliefs in enumerate(self.ghostBeliefs)
             if livingGhosts[i+1]]
        "*** YOUR CODE HERE ***"
        minDist = float("inf")
        closestGhost = None
        for belief in livingGhostPositionDistributions:
            maxProb = float("-inf")
            maxPos = None
            for position in belief:
                if belief[position] > maxProb:
                    maxProb = belief[position]
                    maxPos = position
            if self.distancer.getDistance(pacmanPosition, maxPos)<minDist:
                minDist = self.distancer.getDistance(pacmanPosition, maxPos)
                closestGhost = maxPos
        bestAction = None
        minDist = float("inf")
        for action in legal:
            if self.distancer.getDistance(Actions.getSuccessor(pacmanPosition, action), closestGhost)<minDist:
                minDist = self.distancer.getDistance(Actions.getSuccessor(pacmanPosition, action), closestGhost)
                bestAction = action
        return bestAction
Ejemplo n.º 9
0
  def chooseAction(self, gameState):
    """
    First computes the most likely position of each ghost that 
    has not yet been captured, then chooses an action that brings 
    Pacman closer to the closest ghost (in maze distance!).
    
    To find the maze distance between any two positions, use:
    self.distancer.getDistance(pos1, pos2)
    
    To find the successor position of a position after an action:
    successorPosition = Actions.getSuccessor(position, action)
    
    livingGhostPositionDistributions, defined below, is a list of
    util.Counter objects equal to the position belief distributions
    for each of the ghosts that are still alive.  It is defined based
    on (these are implementation details about which you need not be
    concerned):

      1) gameState.getLivingGhosts(), a list of booleans, one for each
         agent, indicating whether or not the agent is alive.  Note
         that pacman is always agent 0, so the ghosts are agents 1,
         onwards (just as before).

      2) self.ghostBeliefs, the list of belief distributions for each
         of the ghosts (including ghosts that are not alive).  The
         indices into this list should be 1 less than indices into the
         gameState.getLivingGhosts() list.
     
    You may remove Directions.STOP from the list of available actions.
    """
    pacmanPosition = gameState.getPacmanPosition()
    legal = [a for a in gameState.getLegalPacmanActions() if a != Directions.STOP]
    livingGhosts = gameState.getLivingGhosts()
    livingGhostPositionDistributions = [beliefs for i,beliefs
                                        in enumerate(self.ghostBeliefs)
                                        if livingGhosts[i+1]]
    
    "*** YOUR CODE HERE ***"
    possiblePositions = [];
    for belief in livingGhostPositionDistributions:
      possiblePositions.append(belief.argMax());
    likelyPosition = possiblePositions[0];
    minAction = legal[0];
    nextPos = Actions.getSuccessor(pacmanPosition, minAction);
    minDistance = self.distancer.getDistance(likelyPosition, nextPos);
    for nextAction in legal:
      successorPosition = Actions.getSuccessor(pacmanPosition, nextAction);
      for ghostPos in possiblePositions:
        dis = self.distancer.getDistance(successorPosition, ghostPos);
        if (dis < minDistance):
          minDistance = dis;
          minAction = nextAction;
    return minAction;
Ejemplo n.º 10
0
    def getFeatures(self, state, action):
        # extract the grid of food and wall locations and get the ghost locations
        food = self.getFood(state)
        walls = state.getWalls()

        myPosition = state.getAgentState(self.index).getPosition()
        teammatePositions = [state.getAgentPosition(teammate)
                for teammate in self.getTeam(state)]

        capsulePos = self.getCapsules(state)
        isHome = state.isRed(myPosition)
        disFromHome = self.getMazeDistance((state.data.layout.width/2., myposition[1]), myPosition)
        enemy = self.getOpponents(state)

        # state.data.timeleft

        features = util.Counter()

        features["bias"] = 1.0

        # compute the location of pacman after he takes the action
        # x, y = state.getPacmanPosition()
        dx, dy = Actions.directionToVector(action)
        next_x, next_y = int(x + dx), int(y + dy)

        # need to normalize
        feature['dis-from-home'] = float(disFromHome)/ (walls.width * walls.height)
        feature['dis-from-capsules'] = float(min([ self.getMazeDistance(myPosition, dis) for dis in capsulePos]))/ (walls.width * walls.height)

        # if (next_x, next_y) in capsulePos:
        #     feature['power'] = 1.0

        # if (isHome):
        #     feature['is-home'] = 1.0

        # count the number of ghosts 1-step away
        for opponent in enemy:
            pos = state.getAgentPosition(opponent)
            if pos:
                # features["#-of-ghosts-1-step-away"] = sum((next_x, next_y) in Actions.getLegalNeighbors(g, walls) for g in ghosts)
                features["#-of-ghosts-1-step-away"] = sum((next_x, next_y) in Actions.getLegalNeighbors(pos, walls))

        # if there is no danger of ghosts then add the food feature
        if not features["#-of-ghosts-1-step-away"] and food[next_x][next_y]:
            features["eats-food"] = 1.0

        dist = closestFood((next_x, next_y), food, walls)
        if dist is not None:
            # make the distance a number less than one otherwise the update
            # will diverge wildly
            features["closest-food"] = float(dist) / (walls.width * walls.height)
        features.divideAll(10.0)
        return features
Ejemplo n.º 11
0
    def chooseAction(self, gameState):
        """
        First computes the most likely position of each ghost that has
        not yet been captured, then chooses an action that brings
        Pacman closer to the closest ghost (according to mazeDistance!).

        To find the mazeDistance between any two positions, use:
          self.distancer.getDistance(pos1, pos2)

        To find the successor position of a position after an action:
          successorPosition = Actions.getSuccessor(position, action)

        livingGhostPositionDistributions, defined below, is a list of
        util.Counter objects equal to the position belief
        distributions for each of the ghosts that are still alive.  It
        is defined based on (these are implementation details about
        which you need not be concerned):

          1) gameState.getLivingGhosts(), a list of booleans, one for each
             agent, indicating whether or not the agent is alive.  Note
             that pacman is always agent 0, so the ghosts are agents 1,
             onwards (just as before).

          2) self.ghostBeliefs, the list of belief distributions for each
             of the ghosts (including ghosts that are not alive).  The
             indices into this list should be 1 less than indices into the
             gameState.getLivingGhosts() list.
        """
        pacmanPosition = gameState.getPacmanPosition()
        legal = [a for a in gameState.getLegalPacmanActions()]
        livingGhosts = gameState.getLivingGhosts()
        livingGhostPositionDistributions = \
            [beliefs for i, beliefs in enumerate(self.ghostBeliefs)
             if livingGhosts[i+1]]
        "*** YOUR CODE HERE ***"
        assumedPositions = []
        for dist in livingGhostPositionDistributions:
            assumedPositions.append(dist.argMax())
        closest = assumedPositions[0]
        for p in assumedPositions[1:]:
            close = self.distancer.getDistance(closest, pacmanPosition)
            if self.distancer.getDistance(p, pacmanPosition) < close:
                closest = p
        action = legal[0]
        distance = self.distancer.getDistance(Actions.getSuccessor(pacmanPosition, action), closest)
        for a in legal[1:]:
            successor = Actions.getSuccessor(pacmanPosition, a)
            d = self.distancer.getDistance(successor, closest)
            if d < distance:
                action = a
                distance = d
        return action
Ejemplo n.º 12
0
 def getAction(self, state):
     moves = state.getLegalPacmanActions()
     curX = state.getPacmanPosition()[0]
     curY = state.getPacmanPosition()[1]
     for m in moves:
         x = curX + int(Actions.directionToVector(m)[0])
         y = curY + int(Actions.directionToVector(m)[1])
         if state.getFood()[x][y] == True or Actions.directionToVector(m) in state.getCapsules():
             self.lastChoice = m
             return m
     if self.lastChoice == Directions.STOP or self.lastChoice not in moves:
         self.lastChoice = moves[random.randint(0,len(moves)-1)]
     return self.lastChoice
Ejemplo n.º 13
0
 def getLegalActions( state, ghostIndex ):
   """
   Ghosts cannot stop, and cannot turn around unless they 
   reach a dead end, but can turn 90 degrees at intersections.
   """
   conf = state.getGhostState( ghostIndex ).configuration
   possibleActions = Actions.getPossibleActions( conf, state.data.layout.walls )
   reverse = Actions.reverseDirection( conf.direction )
   if Directions.STOP in possibleActions:
     possibleActions.remove( Directions.STOP )
   if reverse in possibleActions and len( possibleActions ) > 1:
     possibleActions.remove( reverse )
   return possibleActions
Ejemplo n.º 14
0
    def get_successors(self, value):
        state = self.state(value)
        config = Config(self.explore(value), Directions.STOP)
        walls_grid = deepcopy(self.game_state.getWalls())
        walls = self.blocks
        for w in walls:
            walls_grid[w[0]][w[1]] = True

        actions = Actions.getPossibleActions(config, walls_grid)
        result = []
        for action in actions:
            position = Actions.getSuccessor(state, action)
            result.append((position, action))
        return result
Ejemplo n.º 15
0
    def getSuccessors(self, state):
        """
    Returns successor states, the actions they require, and a cost of 1.
    
     As noted in search.py:
         For a given state, this should return a list of triples, 
     (successor, action, stepCost), where 'successor' is a 
     successor to the current state, 'action' is the action
     required to get there, and 'stepCost' is the incremental 
     cost of expanding to that successor
    """

        successors = []
        for action in [Directions.NORTH, Directions.SOUTH, Directions.EAST, Directions.WEST]:
            x, y = state
            dx, dy = Actions.directionToVector(action)
            nextx, nexty = int(x + dx), int(y + dy)
            if not self.walls[nextx][nexty]:
                nextState = (nextx, nexty)
                cost = self.costFn(nextState)
                successors.append((nextState, action, cost))

        # Bookkeeping for display purposes
        self._expanded += 1
        if state not in self._visited:
            self._visited[state] = True
            self._visitedlist.append(state)

        return successors
Ejemplo n.º 16
0
    def getSuccessors(self, state):
        """
        Returns successor states, the actions they require, and a cost of 1.

         As noted in search.py:
             For a given state, this should return a list of triples,
         (successor, action, stepCost), where 'successor' is a
         successor to the current state, 'action' is the action
         required to get there, and 'stepCost' is the incremental
         cost of expanding to that successor
        """

        successors = []
        x, y, corners = state

        if (x, y) in self.corners:
            corners = list(corners)
            corners[self.corners.index((x,y))] = True
            # del self.corners[self.corners.index((x, y))]
            corners = tuple(corners)
            # n += 1

        for action in [Directions.NORTH, Directions.SOUTH, Directions.EAST, Directions.WEST]:
            dx, dy = Actions.directionToVector(action)
            nextx, nexty = int(x + dx), int(y + dy)
            if not self.walls[nextx][nexty]:
                # nextState = ( nextx, nexty, self.corners.values().count(True) )
                nextState = ( nextx, nexty, corners )
                successors.append( ( nextState, action, 1) )

        self._expanded += 1
        return successors
    def getDistribution( self, state ):
        # Read variables from state
        ghostState = state.getGhostState( self.index )
        legalActions = state.getLegalActions( self.index )
        pos = state.getGhostPosition( self.index )
        isScared = ghostState.scaredTimer > 0

        speed = 1
        if isScared: speed = 0.5

        actionVectors = [Actions.directionToVector( a, speed ) for a in legalActions]
        newPositions = [( pos[0]+a[0], pos[1]+a[1] ) for a in actionVectors]
        pacmanPosition = state.getPacmanPosition()

        # Select best actions given the state
        distancesToPacman = [manhattanDistance( pos, pacmanPosition ) for pos in newPositions]
        if isScared:
            bestScore = max( distancesToPacman )
            bestProb = self.prob_scaredFlee
        else:
            bestScore = min( distancesToPacman )
            bestProb = self.prob_attack
        bestActions = [action for action, distance in zip( legalActions, distancesToPacman ) if distance == bestScore]

        # Construct distribution
        dist = util.Counter()
        for a in bestActions: dist[a] = bestProb / len(bestActions)
        for a in legalActions: dist[a] += ( 1-bestProb ) / len(legalActions)
        dist.normalize()
        return dist
    def getSuccessors(self, state):
        """
        Returns successor states, the actions they require, and a cost of 1.

         As noted in search.py:
            For a given state, this should return a list of triples, (successor,
            action, stepCost), where 'successor' is a successor to the current
            state, 'action' is the action required to get there, and 'stepCost'
            is the incremental cost of expanding to that successor
        """

        successors = []
        for action in [Directions.NORTH, Directions.SOUTH, Directions.EAST, Directions.WEST]:
            # Add a successor state to the successor list if the action is legal
            # Here's a code snippet for figuring out whether a new position hits a wall:
            x, y = state[0]
            dx, dy = Actions.directionToVector(action)
            nextx, nexty = int(x + dx), int(y + dy)
            if not self.walls[nextx][nexty]:
                nextState = ((nextx, nexty),
                             [s or n for (s, n) in zip(state[1],[(nextx, nexty) == c for c in self.corners])])
                successors.append((nextState, action, 1))

        self._expanded += 1 # DO NOT CHANGE
        return successors
    def getSuccessors(self, state):
        """
        Returns successor states, the actions they require, and a cost of 1.

         As noted in search.py:
            For a given state, this should return a list of triples, (successor,
            action, stepCost), where 'successor' is a successor to the current
            state, 'action' is the action required to get there, and 'stepCost'
            is the incremental cost of expanding to that successor
        """
        
        
        if state.position in self.corners:
            ind = self.corners.index(state.position)
            state.corners[ind]=True
            
            
        successors = []
        for action in [Directions.NORTH, Directions.SOUTH, Directions.EAST, Directions.WEST]:
               x,y = state.position
               dx, dy = Actions.directionToVector(action)
               nextx, nexty = int(x + dx), int(y + dy)
               hitsWall = self.walls[nextx][nexty]
               
                   
               if hitsWall==False:
                   successors.append( (cornerState((nextx,nexty),state.corners),action,1) )

        self._expanded += 1 # DO NOT CHANGE
        return successors
Ejemplo n.º 20
0
    def chooseAction(self, observedState):
        """
        Here, choose pacman's next action based on the current state of the game.
        This is where all the action happens.
        
        This silly pacman agent will move away from the ghost that it is closest
        to. This is not a very good strategy, and completely ignores the features of
        the ghosts and the capsules; it is just designed to give you an example.
        """
        # print observedState.getGoodCapsuleExamples()

        pacmanPosition = observedState.getPacmanPosition()
        ghost_states = observedState.getGhostStates() # states have getPosition() and getFeatures() methods
        legalActs = [a for a in observedState.getLegalPacmanActions()]
        ghost_dists = np.array([self.distancer.getDistance(pacmanPosition,gs.getPosition()) 
                              for gs in ghost_states])
        # find the closest ghost by sorting the distances
        closest_idx = sorted(zip(range(len(ghost_states)),ghost_dists), key=lambda t: t[1])[0][0]
        # take the action that minimizes distance to the current closest ghost
        best_action = Directions.STOP
        best_dist = -np.inf
        for la in legalActs:
            if la == Directions.STOP:
                continue
            successor_pos = Actions.getSuccessor(pacmanPosition,la)
            new_dist = self.distancer.getDistance(successor_pos,ghost_states[closest_idx].getPosition())
            if new_dist > best_dist:
                best_action = la
                best_dist = new_dist
        return best_action
Ejemplo n.º 21
0
    def getSuccessors(self, state):
        """
        Returns successor states, the actions they require, and a cost of 1.

         As noted in search.py:
             For a given state, this should return a list of triples,
         (successor, action, stepCost), where 'successor' is a
         successor to the current state, 'action' is the action
         required to get there, and 'stepCost' is the incremental
         cost of expanding to that successor
        """

        successors = []
        for action in [Directions.NORTH, Directions.SOUTH, Directions.EAST, Directions.WEST]:
            # Add a successor state to the successor list if the action is legal
            # Here's a code snippet for figuring out whether a new position hits a wall:
            x,y = state[0]
            dx, dy = Actions.directionToVector(action)
            nextx, nexty = int(x + dx), int(y + dy)
            hitsWall = self.walls[nextx][nexty]
            if not hitsWall:
                nextPos = (nextx, nexty)
                bottomLeft = 1 if nextPos == self.corners[0] else state[1]
                topLeft = 1 if nextPos == self.corners[1] else state[2]
                bottomRight = 1 if nextPos == self.corners[2] else state[3]
                topRight = 1 if nextPos == self.corners[3] else state[4]
                nextState = tuple((nextPos, bottomLeft, topLeft, bottomRight, topRight))
                successors.append(tuple((nextState, action, 1))) #cost = 1
        self._expanded += 1
        return successors
Ejemplo n.º 22
0
def buildMazeDistanceMap(position, gameState):
    """
      Use BFS to build a map that stores maze distances between position and each point in the layout

    """
    x, y = position
    walls = gameState.getWalls()
    assert not walls[x][y], 'position is a wall: ' + str(position)

    # initialization
    distanceMap = {}
    queue = Queue()
    distance = 0
    queue.push(position)

    while not queue.isEmpty():
    	currPos = queue.pop()

    	if currPos not in distanceMap:
    		distanceMap[currPos] = distance

    		for pos in Actions.getLegalNeighbors(currPos, walls):
    			queue.push(pos)

    	distance += 1

    return distanceMap
Ejemplo n.º 23
0
    def getSuccessors(self, state):
        """
        Returns successor states, the actions they require, and a cost of 1.
        
         As noted in search.py:
             For a given state, this should return a list of triples, 
         (successor, action, stepCost), where 'successor' is a 
         successor to the current state, 'action' is the action
         required to get there, and 'stepCost' is the incremental 
         cost of expanding to that successor
        """
        
        successors = []
        for action in [Directions.NORTH, Directions.SOUTH, Directions.EAST, Directions.WEST]:
          # Add a successor state to the successor list if the action is legal
          # Here's a code snippet for figuring out whether a new position hits a wall:
          #   x,y = currentPosition
          #   dx, dy = Actions.directionToVector(action)
          #   nextx, nexty = int(x + dx), int(y + dy)
          #   hitsWall = self.walls[nextx][nexty]
          
          "*** YOUR CODE HERE ***"
          x,y = state[0]
          dx, dy = Actions.directionToVector(action)
          nextx, nexty = int(x + dx), int(y + dy)
          if not self.walls[nextx][nexty]:
            nextState = (nextx, nexty)

            goalsFound = [state[1],state[2],state[3],state[4]]
            for i in range(0,4):
              if nextState== self.corners[i]:
                goalsFound[i] = True
            successors.append( ((nextState, goalsFound[0], goalsFound[1], goalsFound[2], goalsFound[3]), action, 1) )
        self._expanded += 1
        return successors
Ejemplo n.º 24
0
    def getResult(self, state, action):
        """
        Given a state and an action, returns resulting state 
        """
        # Expanded count is in getActions()
        
        # Add a successor state to the successor list if the action is legal
        # Here's a code snippet for figuring out whether a new position hits a wall:
        #   x,y = currentPosition
        #   dx, dy = Actions.directionToVector(action)
        #   nextx, nexty = int(x + dx), int(y + dy)
        #   hitsWall = self.walls[nextx][nexty]

        "*** YOUR CODE HERE ***"
        (x, y), cornersLeft = state
        dx, dy = Actions.directionToVector(action)
        nextx, nexty = int(x + dx), int(y + dy)
        if ((nextx, nexty) in cornersLeft):
            tempList = list(cornersLeft)
            tempList.remove((nextx, nexty))
            cornersLeft = tuple(tempList)
        if not self.walls[nextx][nexty]:
            nextState = ((nextx, nexty), cornersLeft)
            return nextState
        else:
            warnings.warn("Warning: checking the result of an invalid state, action pair.")
            return state
Ejemplo n.º 25
0
    def getSuccessors(self, state):
        """
        Returns successor states, the actions they require, and a cost of 1.

         As noted in search.py:
             For a given state, this should return a list of triples,
         (successor, action, stepCost), where 'successor' is a
         successor to the current state, 'action' is the action
         required to get there, and 'stepCost' is the incremental
         cost of expanding to that successor
        """
        "*** YOUR CODE HERE ***"
        
        successors = []
        for action in [Directions.NORTH, Directions.SOUTH, Directions.EAST, Directions.WEST]:    
            x,y = state[0]
            visitedCorners = state[1]

            dx, dy = Actions.directionToVector(action)
            nextx, nexty = int(x + dx), int(y + dy)
            if not self.walls[nextx][nexty]:
                nextVisitedCorners = list(visitedCorners)
                nextState = (nextx, nexty)
                if nextState in self.corners:
                    if not nextState in nextVisitedCorners:
                        nextVisitedCorners.append(nextState)
                cost = 1
                successors.append( ((nextState, nextVisitedCorners), action, cost) )

        self._expanded += 1
        return successors
Ejemplo n.º 26
0
    def getSuccessors(self, state):
        """
        Returns successor states, the actions they require, and a cost of 1.

         As noted in search.py:
            For a given state, this should return a list of triples, (successor,
            action, stepCost), where 'successor' is a successor to the current
            state, 'action' is the action required to get there, and 'stepCost'
            is the incremental cost of expanding to that successor
        """

        successors = []
        for action in [Directions.NORTH, Directions.SOUTH, Directions.EAST, Directions.WEST]:
            # Add a successor state to the successor list if the action is legal
            # Here's a code snippet for figuring out whether a new position hits a wall:
            #   x,y = currentPosition
            #   dx, dy = Actions.directionToVector(action)
            #   nextx, nexty = int(x + dx), int(y + dy)
            #   hitsWall = self.walls[nextx][nexty]
            cornerCheck = list(state[1])
            x, y = state[0]
            dx, dy = Actions.directionToVector(action)
            nextx, nexty = int(x + dx), int(y + dy)
            if not self.walls[nextx][nexty]:
                nextState = (nextx, nexty)
                cost = 1
                i = 0
                while i < 4:
                    if self.corners[i] == nextState:
                        cornerCheck[i] = True
                    i = i + 1
                successors.append(((nextState, tuple(cornerCheck)), action, cost))

        self._expanded += 1  # DO NOT CHANGE
        return successors
    def getSuccessors(self, state):
        """
        Returns successor states, the actions they require, and a cost of 1.

         As noted in search.py:
            For a given state, this should return a list of triples, (successor,
            action, stepCost), where 'successor' is a successor to the current
            state, 'action' is the action required to get there, and 'stepCost'
            is the incremental cost of expanding to that successor
        """
        
        x,y = state[0]
        visitedCorners = state[1]
        
        successors = []
        for action in [Directions.NORTH, Directions.SOUTH, Directions.EAST, Directions.WEST]:
            
            dx, dy = Actions.directionToVector(action)
            nextx, nexty = int(x + dx), int(y + dy)
            hitsWall = self.walls[nextx][nexty]
            if not hitsWall:
                successorVisitedCorners = list(visitedCorners)
                nextPosition = (nextx, nexty)
                if nextPosition in self.corners:
                    if not nextPosition in successorVisitedCorners:
                        successorVisitedCorners.append(nextPosition)
                successor = ((nextPosition, successorVisitedCorners), action, 1)
                successors.append(successor)

        self._expanded += 1 # DO NOT CHANGE
        return successors
Ejemplo n.º 28
0
    def getSuccessors(self, state):
        """
        Returns successor states, the actions they require, and a cost of 1.

         As noted in search.py:
             For a given state, this should return a list of triples,
         (successor, action, stepCost), where 'successor' is a
         successor to the current state, 'action' is the action
         required to get there, and 'stepCost' is the incremental
         cost of expanding to that successor
        """

        successors = []
        for action in [Directions.NORTH, Directions.SOUTH, Directions.EAST, Directions.WEST]:
            # Add a successor state to the successor list if the action is legal
            # Here's a code snippet for figuring out whether a new position hits a wall:
            #   x,y = currentPosition
            #   dx, dy = Actions.directionToVector(action)
            #   nextx, nexty = int(x + dx), int(y + dy)
            #   hitsWall = self.walls[nextx][nexty]
            x,y = state[0]
            dx, dy = Actions.directionToVector(action)
            nextx, nexty = int(x + dx), int(y + dy)
            hitsWall = self.walls[nextx][nexty]
            cornersToFind = state[1]
            if not hitsWall:
                cornersToFind = tuple([corner for corner in cornersToFind if corner != state[0] ])
                newLocation = (nextx, nexty)
                newState = ((newLocation, cornersToFind), action, 1)
                successors.append(newState)
        self._expanded += 1
#        print 'successors ex:', successors[0]
        return successors
Ejemplo n.º 29
0
def followActionBasis(self, observedState, action):
	allowedDirections = observedState.getLegalPacmanActions()
	pacmanPosition = observedState.getPacmanPosition()

	if action == 'good':
		target = classifyGhosts.getNearestGoodGhost(observedState, self.distancer)
	elif action == 'bad':
		target = classifyGhosts.getNearestBadGhost(observedState, self.distancer)
	elif action == 'capsule':
		target = classifyCapsule.closest(observedState, self.distancer)
	else:
		raise Exception("Unknown action '%s' passed to action basis" % action)

	# find the direction that moves closes to target
	best_action = Directions.STOP
	best_dist = np.inf
	for la in allowedDirections:
		if la == Directions.STOP:
			continue
		successor_pos = Actions.getSuccessor(pacmanPosition,la)
		new_dist = self.distancer.getDistance(successor_pos,target)
		if new_dist < best_dist:
			best_action = la
			best_dist = new_dist
	return best_action
Ejemplo n.º 30
0
    def getSuccessors(self, state):
        """
        Returns successor states, the actions they require, and a cost of 1.

         As noted in search.py:
            For a given state, this should return a list of triples, (successor,
            action, stepCost), where 'successor' is a successor to the current
            state, 'action' is the action required to get there, and 'stepCost'
            is the incremental cost of expanding to that successor
        """

        successors = []
        for action in [Directions.NORTH, Directions.SOUTH, Directions.EAST, Directions.WEST]:
            # Add a successor state to the successor list if the action is legal
            # Here's a code snippet for figuring out whether a new position hits a wall:
            #   x,y = currentPosition
            #   dx, dy = Actions.directionToVector(action)
            #   nextx, nexty = int(x + dx), int(y + dy)
            #   hitsWall = self.walls[nextx][nexty]

            "*** YOUR CODE HERE ***"
            x,y=state[0]
            dx,dy=Actions.directionToVector(action)
            nextx,nexty=int(x+dx), int(y+dy)
            if not self.walls[nextx][nexty]:
                nextState=(nextx,nexty)
                if nextState in state[1]:
                    tmpState = list(state[1])
                    tmpState.remove(nextState)
                    successors.append(((nextState,tuple(tmpState)), action, 1))
                else:
                    successors.append(((nextState,state[1]), action, 1))

        self._expanded += 1 # DO NOT CHANGE
        return successors
Ejemplo n.º 31
0
 def getLegalActions(state):
     """
     Returns a list of possible actions.
     """
     return Actions.getPossibleActions(state.getPacmanState().configuration,
                                       state.data.layout.walls)
Ejemplo n.º 32
0
 def getLegalActions(state, ghostIndex):
     conf = state.getGhostState(ghostIndex).configuration
     return Actions.getPossibleActions(conf, state.data.layout.walls)
    def getFeatures(self, state, action):
        
        food = self.getFood( state ) 
        foodList = food.asList()
        walls = state.getWalls()
        isPacman = self.getSuccessor(state, action).getAgentState(self.index).isPacman

        # Zone of the board agent is primarily responsible for
        zone = (self.index - self.index % 2) / 2

        teammates = [state.getAgentState(i).getPosition() for i in self.allies]
        opponents = [state.getAgentState(i) for i in self.enemies]
        # chasers = [a for a in opponents if not (a.isPacman) and a.getPosition() != None]
        # prey = [a for a in opponents if a.isPacman and a.getPosition() != None]
        chasers = [a for a in opponents if not a.isPacman]
        prey = [a for a in opponents if a.isPacman ]

        features = util.Counter()
        if action == Directions.STOP:
            features["stopped"] = 1.0
        # compute the location of pacman after he takes the action
        x, y = state.getAgentState(self.index).getPosition()
        dx, dy = Actions.directionToVector(action)
        next_x, next_y = int(x + dx), int(y + dy)

        # count the number of ghosts 1-step away
        for g in chasers:
            if (next_x, next_y) == g.getPosition():
                if g.scaredTimer > 0:
                    features["eats-ghost"] += 1
                    features["eats-food"] += 2
                else:
                    features["#-of-dangerous-ghosts-1-step-away"] = 1
                    features["#-of-harmless-ghosts-1-step-away"] = 0
            elif (next_x, next_y) in Actions.getLegalNeighbors(g.getPosition(), walls):
                if g.scaredTimer > 0:
                    features["#-of-harmless-ghosts-1-step-away"] += 1
                elif isPacman:
                    features["#-of-dangerous-ghosts-1-step-away"] += 1
                    features["#-of-harmless-ghosts-1-step-away"] = 0
        if state.getAgentState(self.index).scaredTimer == 0:
            for g in prey:
                if (next_x, next_y) == g.getPosition:
                    features["eats-invader"] = 1
                elif (next_x, next_y) in Actions.getLegalNeighbors(g.getPosition(), walls):
                    features["invaders-1-step-away"] += 1
        else:
            for g in opponents:
                if g.getPosition() != None:
                    if (next_x, next_y) == g.getPosition:
                        features["eats-invader"] = -10
                    elif (next_x, next_y) in Actions.getLegalNeighbors(g.getPosition(), walls):
                        features["invaders-1-step-away"] += -10

        for capsule_x, capsule_y in state.getCapsules():
            if next_x == capsule_x and next_y == capsule_y and isPacman:
                features["eats-capsules"] = 1.0
        if not features["#-of-dangerous-ghosts-1-step-away"]:
            if food[next_x][next_y]:
                features["eats-food"] = 1.0
            if len(foodList) > 0:  # This should always be True,  but better safe than sorry
                myFood = []
                for food in foodList:
                    food_x, food_y = food
                    if (food_y > zone * walls.height / 3 and food_y < (zone + 1) * walls.height / 3):
                        myFood.append(food)
                if len(myFood) == 0:
                    myFood = foodList
                myMinDist = min([self.getMazeDistance((next_x, next_y), food) for food in myFood])
                if myMinDist is not None:
                    features["closest-food"] = float(myMinDist) / (walls.width * walls.height)

        features.divideAll(10.0)

        return features
Ejemplo n.º 34
0
    def getHomeboundFeatures(self, state, action):
        features = util.Counter()
        x, y = pos = state.getAgentPosition(self.index)
        successor = state.generateSuccessor(self.index, action)
        actions = state.getLegalActions(self.index)
        # Meta data
        walls = state.getWalls()
        opponents = self.getLikelyOppPosition()
        ghosts = []
        oppPacmen = []

        # Fill out opponent arrays
        if self.isOnRedTeam:
            oppindices = state.getBlueTeamIndices()
            for i, opp in enumerate(opponents):
                if opp[0] < self.border:
                    oppPacmen.append(opp)
                else:
                    if state.getAgentState(oppindices[i]).scaredTimer == 0:
                        ghosts.append(opp)
        else:
            oppindices = state.getRedTeamIndices()
            for i, opp in enumerate(opponents):
                if opp[0] >= self.border:
                    oppPacmen.append(opp)
                else:
                    if state.getAgentState(oppindices[i]).scaredTimer == 0:
                        ghosts.append(opp)

        best_dist = 9999
        for spec_action in actions:
            successor = state.generateSuccessor(self.index, spec_action)
            pos2 = successor.getAgentPosition(self.index)
            dist = self.getMazeDistance(self.home, pos2)
            if dist < best_dist:
                best_action = spec_action
                best_dist = dist

        if best_action == action:
            features['best-action'] = 1

        next_x, next_y = self.generateSuccessorPosition(pos, action)

        # count the number of ghosts 1-step away
        ghostsOneStepAway = sum(
            (next_x, next_y) in Actions.getLegalNeighbors(g, walls)
            for g in ghosts)

        # count the number of opponents that are 4 steps or fewer away
        oppFourStepsAway = sum(
            1 for ghost in ghosts
            if self.getMazeDistance((next_x, next_y), ghost) <= 4)

        # Only one feature if a ghost killed us
        if (next_x, next_y) in ghosts:
            features['died'] = 1.0
            features['distance-from-home'] = float(
                self.getMazeDistance(
                    (next_x, next_y),
                    self.start)) / (walls.width * walls.height)
        # Only one feature if we're about to die
        elif ghostsOneStepAway >= 1:
            features['ghosts-1-step-away'] = float(ghostsOneStepAway) / len(
                ghosts)
            features['distance-from-home'] = float(
                self.getMazeDistance(
                    (next_x, next_y),
                    self.start)) / (walls.width * walls.height)
        # Only one feature if there are opponents fewer than 4 steps away
        elif oppFourStepsAway >= 1:
            features['opponents-4-steps-away'] = float(oppFourStepsAway) / len(
                ghosts)
            features['distance-from-home'] = float(
                self.getMazeDistance(
                    (next_x, next_y),
                    self.start)) / (walls.width * walls.height)

        if len(ghosts) >= 1:
            dists = [self.dist((next_x, next_y), pac, walls) for pac in ghosts]
            features['closest-ghost'] = float(
                min(dists)) / (walls.width * walls.height)

        if action == Directions.STOP: features['stop'] = 1

        features.divideAll(10.0)
        return features
Ejemplo n.º 35
0
    def chooseAction(self, gameState):
        """
    First computes the most likely position of each ghost that 
    has not yet been captured, then chooses an action that brings 
    Pacman closer to the closest ghost (in maze distance!).
    
    To find the maze distance between any two positions, use:
    self.distancer.getDistance(pos1, pos2)
    
    To find the successor position of a position after an action:
    successorPosition = Actions.getSuccessor(position, action)
    
    To get a list of booleans, one for each agent, indicating whether
    or not the agent is alive, use gameState.getLivingGhosts()
    Note that pacman is always agent 0, so the ghosts are agents 1, 
    onwards (just as before).
     
    You may remove Directions.STOP from the list of available actions.
    """
        pacmanPosition = gameState.getPacmanPosition()
        legal = [
            a for a in gameState.getLegalPacmanActions()
            if a != Directions.STOP
        ]
        "*** YOUR CODE HERE ***"

        # find most likely ghost
        ghostList = gameState.getLivingGhosts()
        ghostPositionList = [None for i in range(len(ghostList))]

        for ghostId, ghostLiving in enumerate(ghostList):
            if ghostLiving:
                # only process living ghosts
                maxProb = 0
                maxPosition = None
                for key, value in self.ghostBeliefs[
                        ghostId - 1].items():  # ghosts index from 0
                    if value > maxProb:
                        maxProb = value
                        maxPosition = key

                ghostPositionList[ghostId] = maxPosition

        optAction = None
        optDistance = 99999

        # find optimal action
        for action in legal:
            nextPosition = Actions.getSuccessor(pacmanPosition, action)

            ghostDis = 99999
            for ghostPosition in ghostPositionList:
                if ghostPosition == None: continue

                distance = self.distancer.getDistance(nextPosition,
                                                      ghostPosition)
                if distance < ghostDis:
                    ghostDis = distance

            if ghostDis < optDistance:
                optDistance = ghostDis
                optAction = action

        return optAction
Ejemplo n.º 36
0
    def getFeatures(self, state, action):
        # extract the grid of food and wall locations and get the ghost locations
        food = state.getFood()
        walls = state.getWalls()
        ghosts = state.getGhostPositions()
        capsulesLeft = len(state.getCapsules())
        scaredGhost = []
        activeGhost = []
        features = util.Counter()
        for ghost in state.getGhostStates():
            if not ghost.scaredTimer:
                activeGhost.append(ghost)
            else:
                #print (ghost.scaredTimer)
                scaredGhost.append(ghost)

        pos = state.getPacmanPosition()

        def getManhattanDistances(ghosts):
            return map(lambda g: util.manhattanDistance(pos, g.getPosition()),
                       ghosts)

        distanceToClosestActiveGhost = distanceToClosestScaredGhost = 0

        features["bias"] = 1.0

        # compute the location of pacman after he takes the action
        x, y = state.getPacmanPosition()
        dx, dy = Actions.directionToVector(action)
        next_x, next_y = int(x + dx), int(y + dy)

        # count the number of ghosts 1-step away
        features["#-of-ghosts-1-step-away"] = sum(
            (next_x, next_y) in Actions.getLegalNeighbors(g, walls)
            for g in ghosts)

        # if there is no danger of ghosts then add the food feature
        if not features["#-of-ghosts-1-step-away"] and food[next_x][next_y]:
            features["eats-food"] = 1.0

        dist = closestFood((next_x, next_y), food, walls)
        if dist is not None:
            # make the distance a number less than one otherwise the update
            # will diverge wildly
            features["closest-food"] = float(dist) / (walls.width *
                                                      walls.height)
        if scaredGhost:  # and not activeGhost:
            distanceToClosestScaredGhost = min(
                getManhattanDistances(scaredGhost))
            if activeGhost:
                distanceToClosestActiveGhost = min(
                    getManhattanDistances(activeGhost))
            else:
                distanceToClosestActiveGhost = 10
            features["capsules"] = capsulesLeft
            #features["dist-to-closest-active-ghost"] = 2*(1./distanceToClosestActiveGhost)
            if distanceToClosestScaredGhost <= 8 and distanceToClosestActiveGhost >= 2:  #features["#-of-ghosts-1-step-away"] >= 1:
                features["#-of-ghosts-1-step-away"] = 0
                features["eats-food"] = 0.0
                #features["closest-food"] = 0

                #print(features)
        features.divideAll(10.0)
        return features
Ejemplo n.º 37
0
    def chooseAction(self, gameState):
        """
        First computes the most likely position of each ghost that has
        not yet been captured, then chooses an action that brings
        Pacman closer to the closest ghost (according to mazeDistance!).

        To find the mazeDistance between any two positions, use:
          self.distancer.getDistance(pos1, pos2)

        To find the successor position of a position after an action:
          successorPosition = Actions.getSuccessor(position, action)

        livingGhostPositionDistributions, defined below, is a list of
        util.Counter objects equal to the position belief
        distributions for each of the ghosts that are still alive.  It
        is defined based on (these are implementation details about
        which you need not be concerned):

          1) gameState.getLivingGhosts(), a list of booleans, one for each
             agent, indicating whether or not the agent is alive.  Note
             that pacman is always agent 0, so the ghosts are agents 1,
             onwards (just as before).

          2) self.ghostBeliefs, the list of belief distributions for each
             of the ghosts (including ghosts that are not alive).  The
             indices into this list should be 1 less than indices into the
             gameState.getLivingGhosts() list.
        """
        pacmanPosition = gameState.getPacmanPosition()
        legal = [a for a in gameState.getLegalPacmanActions()]
        livingGhosts = gameState.getLivingGhosts()
        livingGhostPositionDistributions = \
            [beliefs for i, beliefs in enumerate(self.ghostBeliefs)
             if livingGhosts[i+1]]
        "*** YOUR CODE HERE ***"
        import random
        livingGhostsPos = []
        for d in livingGhostPositionDistributions:
            max_ = -1000000
            max_distri = None
            for distance in d:
                if d[distance] > max_:
                    max_ = d[distance]
                    max_distri = distance
            if max_ >= 0:
                livingGhostsPos.append(max_distri)

        livingD = []
        for ghost in livingGhostsPos:
            livingD.append((ghost, self.distancer.getDistance(pacmanPosition, ghost)))

        minimunDistance = 1000000
        minimunPos = None
        for item in livingD:
            if item[1] < minimunDistance:
                minimunPos = item[0]

        move = []
        min_ = 10000000
        for action in legal:
            move.append((action, self.distancer.getDistance(Actions.getSuccessor(pacmanPosition, action), minimunPos)))
        for item in move:
            if item[1] < min_:
                minimunDistance = item[1]
                min_ = minimunDistance
        actions = []
        for action in move:
            if action[1] == minimunDistance:
                actions.append(action[0])
        return random.choice(actions)
Ejemplo n.º 38
0
    def isDeadRoad(self, gameState, position1, position2):

        global deadRoadRecord
        if (position1, position2) in deadRoadRecord:
            return deadRoadRecord[(position1, position2)]

        x2, y2 = position1

        from util import PriorityQueue
        openList = PriorityQueue()
        closeList = []
        path = []

        walls = gameState.getWalls().deepCopy()

        walls[int(x2)][int(y2)] = True

        def findPath(node):
            if node[1]:  # If it is not a start state
                findPath(node[1])
                path.append(node[2])

        def heruistic1(positionx):
            x, y = positionx
            return abs(x - self.safeX)

        startPosition = position2
        startNode = (startPosition, [], [], 0)
        openList.push(startNode, heruistic1(startPosition))

        while not openList.isEmpty():
            currentNode = openList.pop()
            currentPosition = currentNode[0]
            if currentPosition not in closeList:
                if self.isHome(currentPosition):
                    findPath(currentNode)
                    for p in closeList:
                        deadRoadRecord[(position1, p)] = False
                    deadRoadRecord[(position1, position2)] = False
                    return False
                closeList.append(currentPosition)
                for position in Actions.getLegalNeighbors(currentPosition, walls):
                    lastNode = currentNode[1]
                    if lastNode:
                        if not position == currentPosition and not position == lastNode[0]:
                            if (position1, position) in deadRoadRecord:
                                for p in closeList:
                                    deadRoadRecord[(position1, p)] = deadRoadRecord[(position1, position)]
                                return deadRoadRecord[(position1, position)]
                            action = Actions.vectorToDirection(
                                (position[0] - currentPosition[0], position[1] - currentPosition[1]))
                            openList.push((position, currentNode, action, currentNode[3] + 1),
                                          currentNode[3] + 1 + heruistic1(position))
                    else:
                        if not position == currentPosition:
                            if (position1, position) in deadRoadRecord:
                                for p in closeList:
                                    deadRoadRecord[(position1, p)] = deadRoadRecord[(position1, position)]
                                return deadRoadRecord[(position1, position)]
                            action = Actions.vectorToDirection(
                                (position[0] - currentPosition[0], position[1] - currentPosition[1]))
                            openList.push((position, currentNode, action, currentNode[3] + 1),
                                          currentNode[3] + 1 + heruistic1(position))

        global deadRoad
        reverseposition = (self.mapWidth - 1 - position1[0], self.mapHeight - 1 - position1[1])
        if reverseposition not in deadRoad:
            deadRoad[reverseposition] = []

        for p in closeList:
            deadRoadRecord[(position1, p)] = True
            deadRoad[reverseposition].append((self.mapWidth - 1 - p[0], self.mapHeight - 1 - p[1]))
        return True
    def getFeatures(self, state, action):
        # extract the grid of food and wall locations and get the ghost locations
        food = state.getFood()
        walls = state.getWalls()
        ghosts = state.getGhostPositions()
        capsules = state.getCapsules()
        capsules = map((lambda coord: (int(coord[0]), int(coord[1]))), capsules)

        features = util.Counter()

        features["bias"] = 1.0

        if self.initial_food == 0:
            self.initial_food = food.count()
        ifood = self.initial_food

        # NOTA: En el paper se sugiere un atributo que indique si se preserva la misma direccion que antes. Intente implementarlo pero cuando se usa el Pacman en algun momento da una accion "None".
        #if self.past_action == action:
        #    features["stay-in-direction"] = 1.0
        #else:
        #    features["stay-in-direction"] = 0.0

        # compute the location of pacman after he takes the action
        x, y = state.getPacmanPosition()
        dx, dy = Actions.directionToVector(action)
        next_x, next_y = int(x + dx), int(y + dy)
        past_coord = coordVectSub((x, y), (dx, dy))
        ns_ghosts = [g for g in ghosts if not isScared(state, g)]

        # count the number of ghosts 1-step away
        ghosts_besides = sum((next_x, next_y) in Actions.getLegalNeighbors(g, walls) for g in ghosts if not isScared(state, g))
        features["#-of-ghosts-1-step-away"] = ghosts_besides
        
        # calculate distances
        #NOTA: getCoords limita el area de la que el pacman se aleja alrededor de un fantasma
        area = getCoordsArea(ns_ghosts, int(round(2*pow(food.count()/float(ifood),2))))
        food_dist = closestFood((next_x, next_y), food, ns_ghosts, walls, area, capsules)
        ns_ghosts_dist = distanceToCoords((next_x, next_y), ns_ghosts, walls)

        
        # if there is no danger of ghosts then add the food feature
        #(not ghosts_besides and food[next_x][next_y]) and
        is_food_next = food[next_x][next_y] or (next_x, next_y) in capsules
        if (len(ns_ghosts_dist) == 0 or food_dist < min(ns_ghosts_dist))/2 and (is_food_next):
            features["eats-food"] = 1.0            

            # Distance to scared ghosts
            #for gi in range(len(ghostsDists)):
            #    if isScared(state, ghosts[gi]):
            #        features["distance-to-scared-ghost-"+str(gi+1)] = (float(ghostsDists[gi]) / (walls.width * walls.height))

        # Distance to closest capsule
        #capsules_dists = distanceToCoords((next_x, next_y), capsules, walls)
        #if len(capsules_dists) != 0:
        #    features["distance-to-closest-capsule"] = min(capsules_dists) / (walls.width * walls.height)
        
        # Distance to ghosts
        gi = 0
        for g in ghosts:
            if not isScared(state, g):
                features["ghost-"+str(gi+1)+"-distance"] = (float(ns_ghosts_dist[gi]) / (walls.width * walls.height))
                gi += 1

        # Distance to closest intersection
        #if action != 'Stop':
        #    closestIntersection = getClosestIntersection((next_x, next_y), walls, action)
        #    if closestIntersection is not None:
        #        features["closest-intersection-distance"] = closestIntersection[0] / (walls.width * walls.height)
        
        # b(c): Distancia entre el fantasma mas cercano y la interseccion mas cercana (con respecto a ese fantasma)

        # Ghost danger
        #a = walls.width + walls.height
        #not_scared_ghosts = [g for g in ghosts if not isScared(state, g)]
        #if action != 'Stop' and len(not_scared_ghosts) > 1:
        #    closestGhostCoords = closestCoord((next_x, next_y), ghosts, walls)
        #    closestIntersection = getClosestIntersection((next_x, next_y), walls, action)
        #    if closestIntersection is not None:
        #        ghost2intersection = distanceToCoord(closestGhostCoords, closestIntersection[1], walls)
        #        features["ghost-danger"] = (a + closestIntersection[0] - ghost2intersection) / float(a)
            #print features["ghost-danger"]


        # Distance to closest food
        if food_dist is not None:
            # make the distance a number less than one otherwise the update
            # will diverge wildly
            features["closest-food"] = float(food_dist) / (walls.width * walls.height)

        features.divideAll(10.0)

        self.past_action = action
        return features
Ejemplo n.º 40
0
    def getFeatures(self, state, action, agent):
        features = util.Counter()

        ###### Feature 1: bias
        features["bias"] = 1.0
        ###################################################

        ###### Feature 2: closest-food
        food = state.getBlueFood()
        walls = state.getWalls()
        x, y = state.getAgentPosition(agent)
        dx, dy = Actions.directionToVector(action)
        next_x, next_y = int(x + dx), int(y + dy)

        dist = closestFood((next_x, next_y), food, walls)
        if dist is not None:
            features["closest-food"] = 10 * float(dist) / (walls.width *
                                                           walls.height)
        else:
            features['returns-food-home'] = 1.0

        if food[next_x][next_y]:
            features['eats-food'] = 1.0

        # features["#-of-ghosts-1-step-away"] = sum((next_x, next_y) in Actions.getLegalNeighbors(e, walls) for e in ghosts)
        ###################################################

        ###### Feature 3: food-carrying
        next_state = state.generateSuccessor(agent, action)
        features["carrying-food"] = next_state.getAgentState(
            agent).numCarrying / 20
        ###################################################

        ###### Feature 4: food-carrying-enemies
        # for enemy_index in state.getBlueTeamIndices():
        #     features["carrying-food-enemies"] += next_state.getAgentState(enemy_index).numCarrying / 20
        ###################################################

        ###### Feature 5: invader-distances
        ###### Feature 6: non-invader-distances
        ###### Feature 7: #-of-invaders
        agent_state = next_state.getAgentState(agent)
        for enemy_index in state.getBlueTeamIndices():
            enemy_state = next_state.getAgentState(enemy_index)
            enemy_pos = next_state.getAgentPosition(enemy_index)
            enemy_md = distance((next_x, next_y), enemy_pos, walls)
            if enemy_state.isPacman and not agent_state.isPacman:
                features['invader-distances'] += 1.0 / len(state.getBlueTeamIndices()) * \
                (10 * float(enemy_md) / (walls.width * walls.height))
            elif not enemy_state.isPacman and agent_state.isPacman:
                features['non-invader-distances'] += 1.0 / len(state.getBlueTeamIndices()) * \
                (10 * float(enemy_md) / (walls.width * walls.height))
        ###################################################

        ###### Feature 8: #-of-us-our-terit
        # for friend_index in state.getRedTeamIndices():
        #     # TODO prepisati da koristi agentState
        #     friend_pos = next_state.getAgentPosition(friend_index)
        #     flag = True if friend_pos[0] <= (walls.width / 2) else False
        #     features["#-of-us-our-terit"] += 1.0 / len(state.getBlueTeamIndices()) if flag else 0.0
        ###################################################

        ###### Feature 9: returns-food-home
        # TODO prepisati da koristi agentState
        if (x == walls.width / 2) and (next_x == walls.width / 2 - 1):
            features['returns-food-home'] = next_state.getAgentState(
                agent).numCarrying / 20
        ###################################################

        ###### Feature 10: eats-enemy
        # agent_state = next_state.getAgentState(agent)
        # for enemy_index in state.getBlueTeamIndices():
        #     enemy_pos = next_state.getAgentPosition(enemy_index)
        #     t = (abs(next_x - enemy_pos[0]), abs(next_y - enemy_pos[1]))
        #     if t == (0, 1) or t == (1, 0) or t == (0, 0) or t == (1, 1):
        #         if not agent_state.isPacman and agent_state.scaredTimer == 0:
        #             features['can-eat-enemy'] = 1.0
        #         if not agent_state.isPacman and agent_state.scaredTimer != 0:
        #             features['can-get-eaten'] = 1.0
        ###################################################

        ##### Feature 11: enemy-eaten
        for enemy_index in state.getBlueTeamIndices():
            enemy_pos = state.getAgentPosition(enemy_index)
            next_enemy_pos = next_state.getAgentPosition(enemy_index)
            t = (abs(next_enemy_pos[0] - enemy_pos[0]),
                 abs(next_enemy_pos[1] - enemy_pos[1]))
            if not (t == (0, 1) or t == (1, 0) or t == (0, 0)):
                features['enemy-eaten'] = 1.0
                # TODO razlika da li je nas duh pojeo pakmena
                # il je nas pakmen pojeo uplasenog duha
        ###################################################

        ###### Feature 12: can-get-eaten
        # prev_state = self.getPreviousObservation()
        # if prev_state is not None:
        #     prev_pos = prev_state.getAgentPosition(agent)
        #     pos = next_state.getAgentPosition(agent)
        #     t = (abs(pos[0] - prev_pos[0]), abs(pos[1] - prev_pos[1]))
        #     if not(t == (0, 1) or t == (1, 0) or t == (0, 0)):
        #         features['got-eaten'] = 1.0

        agent_state = next_state.getAgentState(agent)
        agent_pos = next_state.getAgentPosition(agent)
        for enemy_index in state.getBlueTeamIndices():
            enemy_pos = next_state.getAgentPosition(enemy_index)
            enemy_state = next_state.getAgentState(enemy_index)
            if agent_state.isPacman and not enemy_state.isPacman and enemy_state.scaredTimer == 0:
                dist = distance(enemy_pos, agent_pos, walls)
                if dist >= 7:
                    features['non-invader-distances'] = 0.0
                else:
                    if dist <= 6 and dist > 3:
                        # features['can-get-eaten'] = 0.8
                        features['closest-food'] = 0.0
                        home_dist = 100
                        min_j = 0
                        for j in range(0, int(walls.height) - 1):
                            if walls.data[int(
                                    walls.width /
                                    2)][j] or walls.data[int(walls.width / 2) -
                                                         1][j]:
                                continue
                            dist = distance((walls.width / 2 - 1, j),
                                            agent_pos, walls)
                            if dist < home_dist:
                                min_j = j
                                home_dist = dist
                        features['home-distance'] = 10 * float(home_dist) / (
                            walls.width * walls.height)
                    elif dist <= 3:
                        features['closest-food'] = 0.0
                        home_dist = 100
                        min_j = 0
                        for j in range(0, int(walls.height) - 1):
                            if walls.data[int(
                                    walls.width /
                                    2)][j] or walls.data[int(walls.width / 2) -
                                                         1][j]:
                                continue
                            dist = distance((walls.width / 2 - 1, j),
                                            agent_pos, walls)
                            if dist < home_dist:
                                min_j = j
                                home_dist = dist
                        features['home-distance'] = 10 * float(home_dist) / (
                            walls.width * walls.height)

                # if dist < 8 and dist > 4:
                #     # features['can-get-eaten'] = 0.8
                #     features['closest-food'] = 0.0
                # elif dist <= 4:
                #     # features['can-get-eaten'] = 1.0
                #     features['closest-food'] = 0.0
            elif not agent_state.isPacman and enemy_state.isPacman and agent_state.scaredTimer == 0:
                dist = distance(enemy_pos, agent_pos, walls)
                if dist < 30:
                    # features['can-eat-enemy'] = 0.2
                    features['closest-food'] = 0.0
                if dist < 20:
                    # features['can-eat-enemy'] = 0.5
                    features['closest-food'] = 0.0
                elif dist < 8 and dist > 4:
                    # features['can-eat-enemy'] = 0.8
                    features['closest-food'] = 0.0

                elif dist <= 4:
                    # features['can-eat-enemy'] = 1.0
                    features['closest-food'] = 0.0
            # RETURNING HOME WITHOUT REWARD
            elif not agent_state.isPacman and not enemy_state.isPacman:
                # SAFE OPCIJA ZA BEG
                dist = distance(enemy_pos, agent_pos, walls)
                if dist <= 3:
                    if (x == walls.width / 2) and (next_x
                                                   == walls.width / 2 - 1):
                        features['returns-food-home'] = 1.0
                    else:
                        features['closest-food'] = 0.0
            elif not agent_state.isPacman and enemy_state.isPacman and agent_state.scaredTimer != 0:
                features['invader-distances'] = 0.0

        if agent_state.numCarrying >= 6 and features['returns-food-home'] < 0.8:
            features['closest-food'] = 0.0
            home_dist = 100
            min_j = 0
            for j in range(0, int(walls.height) - 1):
                if walls.data[int(
                        walls.width /
                        2)][j] or walls.data[int(walls.width / 2) - 1][j]:
                    continue
                dist = distance((walls.width / 2 - 1, j), agent_pos, walls)
                if dist < home_dist:
                    min_j = j
                    home_dist = dist
            features['home-distance'] = 10 * float(home_dist) / (walls.width *
                                                                 walls.height)

        ###################################################
        # TODO feature udaljenost od svoje polovine
        # TODO feature agent-eaten
        return features
Ejemplo n.º 41
0
def aStarSearchGhost(problem, gameState, ghostIndex, heuristic=nullHeuristic):
    """
    Search the node that has the lowest combined cost and heuristic first, as a ghost.
    Inspired by BFS/A* lecture notes pseudo-code
    """
    open_node_list = PriorityQueue()
    closed_node_list = {}
    prior_future_map = {}
    ghost_path = []
    path_cost = 0
    heuristic_cost = 0
    reverse_direction = Actions.reverseDirection(gameState.getGhostState(ghostIndex).configuration.direction)

    start_state = problem.getStartState()
    prior_future_map[start_state] = []
    open_node_list.push(start_state, heuristic_cost)

    def traverse_path(prior_node):
        temp = 0
        while True:
            map_row = prior_future_map[prior_node]
            if (len(map_row) == 4):  # map_row[prior_node, direction, gvalue, fvalue]
                prior_node = map_row[0]
                direction = map_row[1]
                ghost_path.append(direction)
                temp = temp + 1
            else:
                break
        return ghost_path

    while (open_node_list.isEmpty() == False):

        prior_node = open_node_list.pop()

        if (prior_node != problem.getStartState()):
            path_cost = prior_future_map[prior_node][2]

        if (problem.isGoalState(prior_node)):
            path_list = traverse_path(prior_node)
            path_list.reverse()
            return path_list

        elif (closed_node_list.has_key(prior_node) == False):
            closed_node_list[prior_node] = []
            sucessor_states = problem.getSuccessors(prior_node)
            num_successors = len(sucessor_states)
            if (num_successors > 0):
                temp = 0
                while (temp < num_successors):
                    future_nodes = sucessor_states[temp]
                    future_state = future_nodes[0];
                    future_action = future_nodes[1];

                    ######################################################################
                    # CPSC 481 - make cost of illegal move very high so it's never chosen
                    ######################################################################
                    # instead of prior_node == start_state, we could do a check if location
                    # of prior state is within a given range of current state
                    if future_action == reverse_direction and prior_node == start_state:
                        # this could return an invalid direction after moving .5 while scared?
                        future_cost = 99999999999999

                    else:
                        future_cost = future_nodes[2];
                    ################################################################################
                    # CPSC 481 - make ghosts attempt to avoid PacMan if they're able to when scared
                    ################################################################################
                    if gameState.getGhostState(ghostIndex).scaredTimer > 0:
                        pacman_position = gameState.getPacmanPosition()
                        pos_x, pos_y = gameState.getGhostPosition(ghostIndex)
                        distance_to_pacman = manhattanDistance(gameState.getGhostPosition(ghostIndex), pacman_position)
                        ghost_position_next = Actions.getSuccessor((pos_x, pos_y), future_action)
                        # pacman_position_next = Actions.getSuccessor(pacmanPosition, gameState.getPacmanState().getDirection())
                        pacman_position_next = Actions.getSuccessor(pacman_position, Directions.STOP)
                        distance_next = manhattanDistance(ghost_position_next, pacman_position_next)
                        if distance_next < distance_to_pacman:
                            future_cost += 99999
                        elif distance_next == distance_to_pacman:
                            future_cost += 99
                    ################################################################################

                    heuristic_cost = heuristic(future_state, problem)
                    gvalue = path_cost + future_cost
                    fvalue = gvalue + heuristic_cost

                    if (closed_node_list.has_key(future_state) == False):
                        open_node_list.push(future_state, fvalue)
                    if (prior_future_map.has_key(future_state) == False):
                        prior_future_map[future_state] = [prior_node, future_action, gvalue, fvalue]
                    else:
                        if (future_state != start_state):
                            stored_fvalue = prior_future_map[future_state][3]
                            if (stored_fvalue > fvalue):
                                prior_future_map[future_state] = [prior_node, future_action, gvalue, fvalue]
                    temp = temp + 1
Ejemplo n.º 42
0
 def getDirectionalVector(self, action):
     """
 Returns the (dx, dy) directional movement from a particular action.
 """
     return Actions.directionToVector(action)
Ejemplo n.º 43
0
    def escape(self, gameState):

        from util import PriorityQueue
        openList = PriorityQueue()
        closeList = []
        path = []

        walls = gameState.getWalls().deepCopy()

        for a in self.ghosts:
            x, y = a.getPosition()

            walls[int(x)][int(y)] = True
            # print x1, y1
            if not x + 1 == self.safeX:
                walls[int(x + 1)][int(y)] = True

            walls[int(x)][int(y + 1)] = True
            if not x - 1 == self.safeX:
                walls[int(x - 1)][int(y)] = True

            walls[int(x)][int(y - 1)] = True


        if self.myState.scaredTimer > 0:
            for a in self.invaders:
                x, y = a.getPosition()

                # print x1, y1
                walls[int(x)][int(y)] = True
                walls[int(x + 1)][int(y)] = True
                walls[int(x)][int(y + 1)] = True
                walls[int(x - 1)][int(y)] = True
                walls[int(x)][int(y - 1)] = True

        for a in self.invaders:
            x, y = a.getPosition()
            if x == self.safeX:
                if self.red:
                    walls[int(x + 1)][int(y)] = True
                else:
                    walls[int(x - 1)][int(y)] = True

        def findPath(node):
            if node[1]:  # If it is not a start state
                findPath(node[1])
                path.append(node[2])

        def heruistic2(position1):
            x, y = position1
            return abs(x - self.safeX)

        myState = gameState.getAgentState(self.index)
        startPosition = myState.getPosition()
        startNode = (startPosition, [], [], 0)
        openList.push(startNode, heruistic2(startPosition))

        while not openList.isEmpty():
            currentNode = openList.pop()
            currentPosition = currentNode[0]
            if currentPosition not in closeList:
                if self.isHome(currentPosition):
                    findPath(currentNode)
                    self.safeDistance = len(path)
                    return path[0]
                closeList.append(currentPosition)
                for position in Actions.getLegalNeighbors(currentPosition, walls):
                    action = Actions.vectorToDirection(
                        (position[0] - currentPosition[0], position[1] - currentPosition[1]))
                    openList.push((position, currentNode, action, currentNode[3] + 1),
                                  currentNode[3] + 1 + heruistic2(position))

        self.safeDistance = 0

        if gameState.getLegalActions(self.index):
            legalActions = gameState.getLegalActions(self.index)
            distance = []
            for action in legalActions:
                successor = self.getSuccessor(gameState, action)
                myState = successor.getAgentState(self.index)
                distance.append(self.getGhostDistance(myState))

            maxD = max(distance)
            bestActions = [a for a, v in zip(legalActions, distance) if v == maxD]
            return random.choice(bestActions)
        else:
            return Directions.STOP
Ejemplo n.º 44
0
    def getFeatures(self, state, action):
        # extract the grid of food and wall locations and get the ghost locations
        food = state.getFood()
        walls = state.getWalls()
        capsules = state.getCapsules()
        ghost_states = state.getGhostStates()

        scared_ghosts = list()
        normal_ghosts = list()

        for g in ghost_states:
            if g.scaredTimer and (g.scaredTimer > 2):
                scared_ghosts.append(g)
            else:
                normal_ghosts.append(g)

        scared_ghosts_positions = map(lambda g: g.getPosition(), scared_ghosts)
        normal_ghosts_positions = map(lambda g: g.getPosition(), normal_ghosts)

        features = util.Counter()

        features["bias"] = 1.0

        # compute the location of pacman after he takes the action
        x, y = state.getPacmanPosition()
        dx, dy = Actions.directionToVector(action)
        next_x, next_y = int(x + dx), int(y + dy)

        closest_food = None
        closest_scared_ghost = None
        closest_normal_ghost = None
        closest_capsule = None

        fringe = [(next_x, next_y, 0)]
        expanded = set()

        while fringe:
            pos_x, pos_y, dist = fringe.pop(0)
            if (pos_x, pos_y) in expanded:
                continue
            expanded.add((pos_x, pos_y))
            # if we find a food at this location then exit
            if closest_food is None:
                if food[pos_x][pos_y]:
                    closest_food = dist

            if closest_normal_ghost is None:
                if (pos_x, pos_y) in normal_ghosts_positions:
                    closest_normal_ghost = dist

            if closest_scared_ghost is None:
                if (pos_x, pos_y) in scared_ghosts_positions:
                    closest_scared_ghost = dist

            if closest_capsule is None:
                if (pos_x, pos_y) in capsules:
                    closest_capsule = dist

            # otherwise spread out from the location to its neighbours
            nbrs = Actions.getLegalNeighbors((pos_x, pos_y), walls)
            for nbr_x, nbr_y in nbrs:
                fringe.append((nbr_x, nbr_y, dist + 1))

        if closest_food is not None:
            features["closest-food"] = float(closest_food) / (walls.width *
                                                              walls.height)

        if closest_normal_ghost is not None:
            features["closest-normal-ghost"] = float(closest_normal_ghost) / (
                walls.width * walls.height)

        if closest_scared_ghost is not None:
            features["closest-scared-ghost"] = float(closest_scared_ghost) / (
                walls.width * walls.height)

        scared_ghost_ratio = len(scared_ghosts) / len(ghost_states)

        if closest_capsule is not None:
            features["closest-cap"] = float(closest_capsule) / (walls.width *
                                                                walls.height)

        # count the number of ghosts 1-step away
        # features["#-of-ghosts-1-step-away"] = sum((next_x, next_y) in Actions.getLegalNeighbors(g, walls) for g in normal_ghosts_positions)

        # features["#-of-sghosts-1-step-away"] = sum((next_x, next_y) in Actions.getLegalNeighbors(g, walls) for g in scared_ghosts_positions)

        num_normal_ghosts_one_step_away = 0
        num_scared_ghosts_one_step_away = 0
        num_normal_ghosts_two_step_away = 0
        num_scared_ghosts_two_step_away = 0

        for g in normal_ghosts_positions:
            for p in Actions.getLegalNeighbors(g, walls):
                if p == (next_x, next_y):
                    num_normal_ghosts_one_step_away += 1

                for q in Actions.getLegalNeighbors(p, walls):
                    if q == (next_x, next_y):
                        num_normal_ghosts_two_step_away += 1

        for g in scared_ghosts_positions:
            for p in Actions.getLegalNeighbors(g, walls):
                if p == (next_x, next_y):
                    num_scared_ghosts_one_step_away += 1

                for q in Actions.getLegalNeighbors(p, walls):
                    if q == (next_x, next_y):
                        num_scared_ghosts_two_step_away += 1

        features["#-of-ghosts-1-step-away"] = num_normal_ghosts_one_step_away
        features["#-of-sghosts-1-step-away"] = num_scared_ghosts_one_step_away

        features["#-of-ghosts-2-step-away"] = num_normal_ghosts_two_step_away
        features["#-of-sghosts-2-step-away"] = num_scared_ghosts_two_step_away

        # if there is no danger of ghosts then add the food feature
        if not features["#-of-ghosts-1-step-away"] and food[next_x][next_y]:
            features["eats-food"] = 1.0

        if not features["#-of-sghosts-1-step-away"] and (
            (next_x, next_y) in scared_ghosts_positions) and (
                (next_x, next_y) not in normal_ghosts_positions):
            features["eats-ghost"] = 1.5

        if not features["#-of-ghosts-1-step-away"] and ((next_x, next_y)
                                                        in capsules):
            features["eats-cap"] = 1.0

        features.divideAll(10.0)
        return features
Ejemplo n.º 45
0
    def getSuccessors(self, state):
        """
        Returns successor states, the actions they require, and a cost of 1.

         As noted in search.py:
             For a given state, this should return a list of triples,
         (successor, action, stepCost), where 'successor' is a
         successor to the current state, 'action' is the action
         required to get there, and 'stepCost' is the incremental
         cost of expanding to that successor
        """
        position, foods, capsule, _ = state
        #print("the state in getSuccessors is :", state)
        #print("the capsule location is: ",self.gameState.getCapsules())
        successors = []
        for action in [
                Directions.NORTH, Directions.SOUTH, Directions.EAST,
                Directions.WEST
        ]:
            #state_ = state.copy()
            state_ = state
            #print("the state in getSuccessors is :", state_)
            x, y = position
            dx, dy = Actions.directionToVector(action)
            nextx, nexty = int(x + dx), int(y + dy)
            if not self.walls[nextx][nexty]:
                #if capsule has not been eaten
                if not capsule or not foods[nextx][nexty]:
                    # if next position is not a food, we take it
                    #if not state_[2][nextx][nexty]:
                    # if it is a capsule we eat it
                    newFood = foods.copy()
                    newCapsule = capsule.copy()
                    newFood[nextx][nexty] = False
                    if (nextx, nexty) in capsule:
                        newCapsule.remove((nextx, nexty))
                        #reInitial the visited points
                        #print("the state is :", state_)
                        #print("the state 3 is :", state_[3])
                        #state_[3] = []
                        #print("UUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUU", state_[3])
                    nextState = [(nextx, nexty), newFood, newCapsule,
                                 state_[3]]
                    cost = self.costFn(nextState)
                    #print("actions are :", action)
                    successors.append((nextState, action, cost))

                #if capsule has been eaten:
                #else:
                #    print("^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^")
                # if next postion is a food, we take it
                #    if state_[2][nextx][nexty]:
                #        print("@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@")
                #        state_[2][nextx][nexty] = False
                #reInitial the visited points
                #        print("the state 3 is ++++++++++++++++++++++++++++++++++++++++++++++++++++:", state_[3])
                #        state_[3] = []
                #        print("UUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUU", state_[3])

                #    nextState = [(nextx, nexty), state_[1].copy(), state_[2].copy(), state_[3].copy()]
                #    cost = self.costFn(nextState)
                #    successors.append((nextState, action, cost))
                #    print(" the actions is :", action)

        # Bookkeeping for display purposes
        self._expanded += 1  # DO NOT CHANGE
        #if state not in self._visited:
        #    self._visited[state] = True
        #    self._visitedlist.append(state)

        return successors
Ejemplo n.º 46
0
 def getActionCoordinates(self, action, previousCoordinates):
     dx, dy = Actions.directionToVector(action)
     return (previousCoordinates[0] + dx, previousCoordinates[1] + dy)
    def chooseAction(self, gameState):
        """
        First computes the most likely position of each ghost that has
        not yet been captured, then chooses an action that brings
        Pacman closer to the closest ghost (according to mazeDistance!).

        To find the mazeDistance between any two positions, use:
          self.distancer.getDistance(pos1, pos2)

        To find the successor position of a position after an action:
          successorPosition = Actions.getSuccessor(position, action)

        livingGhostPositionDistributions, defined below, is a list of
        util.Counter objects equal to the position belief
        distributions for each of the ghosts that are still alive.  It
        is defined based on (these are implementation details about
        which you need not be concerned):

          1) gameState.getLivingGhosts(), a list of booleans, one for each
             agent, indicating whether or not the agent is alive.  Note
             that pacman is always agent 0, so the ghosts are agents 1,
             onwards (just as before).

          2) self.ghostBeliefs, the list of belief distributions for each
             of the ghosts (including ghosts that are not alive).  The
             indices into this list should be 1 less than indices into the
             gameState.getLivingGhosts() list.
        """
        pacmanPosition = gameState.getPacmanPosition()
        legal = [a for a in gameState.getLegalPacmanActions()]
        livingGhosts = gameState.getLivingGhosts()
        livingGhostPositionDistributions = \
            [beliefs for i, beliefs in enumerate(self.ghostBeliefs)
             if livingGhosts[i+1]]
        "*** YOUR CODE HERE ***"
        minDis=float("inf")
        minDisPos=None

        for gd in livingGhostPositionDistributions:
            tem=0
            temPos=None
            for pos in gd.keys():
                if gd[pos]>tem:
                    tem=gd[pos]
                    temPos=pos
            if self.distancer.getDistance(pacmanPosition,temPos)<minDis:
                minDis=self.distancer.getDistance(pacmanPosition,temPos)
                minDisPos=temPos

        optimalAction=[]
        for action in legal:
            successor=Actions.getSuccessor(pacmanPosition, action)
            successorDis=self.distancer.getDistance(minDisPos, successor)
            if successorDis<minDis:
                minDis=successorDis
        

        for action in legal:
            successor=Actions.getSuccessor(pacmanPosition, action)
            successorDis=self.distancer.getDistance(minDisPos, successor)
            if successorDis==minDis:
                optimalAction.append(action)

        return random.choice(optimalAction)
    def getFeatures(self, state, action):
        # Boiler plate
        # Declare features
        features = util.Counter()

        # Get current positions of all agents
        ghost_a_pos = state.getGhostPosition(1)
        ghost_b_pos = state.getGhostPosition(2)
        pacman_pos = state.getPacmanPosition()
        ghost_a_x, ghost_a_y = state.getGhostPosition(1)
        ghost_b_x, ghost_b_y = state.getGhostPosition(2)
        pacman_x, pacman_y = state.getPacmanPosition()

        # Get pacman's next position
        dx, dy = Actions.directionToVector(action)
        next_x, next_y = int(pacman_pos[0] + dx), int(pacman_pos[1] + dy)
        pacman_next_pos = (next_x, next_y)

        # Get capsules left in state
        capsules = state.getCapsules()

        # Get walls in state
        walls = state.getWalls()

        # if state.getGhostPosition(1) == (1, 9) or state.getGhostPosition(1) == (2, 9):
        if state.getGhostScore() <= -30:
            x, y = state.getGhostPosition(2)
            dx, dy = Actions.directionToVector(action)
            next_x, next_y = int(x + dx), int(y + dy)

            ghost_b_dist = ghostDistance((next_x, next_y), pacman_pos, walls)
            if ghost_b_dist is not None:
                features["ghost_b_dist"] = float(ghost_b_dist) / \
                    (walls.width * walls.height) * 40
        else:
            x, y = state.getGhostPosition(1)
            dx, dy = Actions.directionToVector(action)
            next_x, next_y = int(x + dx), int(y + dy)

            ghost_a_dist = closestCapsule((next_x, next_y), [(1, 9)], walls)
            # # ghost_b_dist = ghostDistance(ghost_b_pos, capsules[1], walls)
            if ghost_a_dist is not None:
                features["ghost_dist"] = float(ghost_a_dist) / \
                    (walls.width * walls.height)

        # # if blue makes it to (1,9)
        # if state.getGhostPosition(1) == (1, 9) or state.getGhostPosition(1) == (2, 9):
        #     # features['ghost_b_pacman_real_distance'] = 1 / \
        #     #     (pacmanDistanceBFS(ghost_b_pos, pacman_next_pos, walls) + 10) * 10
        #     # print(features)
        #     # features['ghost_b_pacman_proximity'] = util.manhattanDistance(
        #     #     ghost_b_pos, pacman_pos)
        #     # if(features['ghost_b_pacman_proximity'] >= 0.4):
        #     #     features['ghost_b_chase'] = 10
        #     # if len(state.getCapsules()) > 1 or state.getGhostState(1).scaredTimer > 0:
        #     #     features["ghost_b_dist"] = 0

        #     # x, y = state.getGhostPosition(2)
        #     # dx, dy = Actions.directionToVector(action)
        #     # next_x, next_y = int(x + dx), int(y + dy)

        #     # ghost_b_dist = ghostDistance((next_x, next_y), pacman_pos, walls)
        #     # if ghost_b_dist is not None:
        #     #     features["ghost_b_dist"] = float(ghost_b_dist) / \
        #     #         (walls.width * walls.height) * 10
        #     if len(state.getCapsules()) == 2:
        #         x, y = state.getGhostPosition(2)
        #         dx, dy = Actions.directionToVector(action)
        #         next_x, next_y = int(x + dx), int(y + dy)

        #         ghost_b_dist = closestCapsule(
        #             (next_x, next_y), [(1, 9)], walls)
        #         # # ghost_b_dist = ghostDistance(ghost_b_pos, capsules[1], walls)
        #         if ghost_b_dist is not None:
        #             features["ghost_dist"] = float(ghost_b_dist) / \
        #                 (walls.width * walls.height)

        #     # if len(state.getCapsules()) > 1 or state.getGhostState(1).scaredTimer > 0:
        #     #     features["ghost_b_dist"] = 0
        #     # else:
        #     #     features["ghost_dist"] = 0
        #     #     features["ghost_b_dist"] = 1 / util.manhattanDistance(
        #     #         ghost_b_pos, pacman_pos) * 10
        #     if len(state.getCapsules()) == 1 and state.getGhostState(1).scaredTimer > 0:
        #         features["ghost_dist"] = 0
        #         x, y = state.getGhostPosition(2)
        #         dx, dy = Actions.directionToVector(action)
        #         next_x, next_y = int(x + dx), int(y + dy)

        #         ghost_b_dist = closestCapsule(
        #             (next_x, next_y), [(1, 1)], walls)
        #         if ghost_b_dist is not None:
        #             features["ghost_1_1_dist"] = float(ghost_b_dist) / \
        #                 (walls.width * walls.height)
        #     if len(state.getCapsules()) == 1 and state.getGhostState(1).scaredTimer == 0:
        #         features["ghost_dist"] = 0
        #         features["ghost_1_1_dist"] = 0
        #         features["ghost_start_chase"] = util.manhattanDistance(
        #             ghost_b_pos, pacman_pos) * 3
        # else:
        #     x, y = state.getGhostPosition(1)
        #     dx, dy = Actions.directionToVector(action)
        #     next_x, next_y = int(x + dx), int(y + dy)

        #     ghost_a_dist = closestCapsule((next_x, next_y), [(1, 9)], walls)
        #     # # ghost_b_dist = ghostDistance(ghost_b_pos, capsules[1], walls)
        #     if ghost_a_dist is not None:
        #         features["ghost_dist"] = float(ghost_a_dist) / \
        #             (walls.width * walls.height)

        # elif len(state.getCapsules()) <= 1:

        # ## Feature: Distance from pacman
        # features['ghost_a_pacman_proximity'] = util.manhattanDistance(
        #     ghost_a_pos, pacman_pos)
        # features['ghost_b_pacman_proximity'] = util.manhattanDistance(
        #     ghost_b_pos, pacman_pos)

        # ## Feature: Find distance to pacman's next position
        # features['ghost_a_pacman_real_distance'] = 1 / (pacmanDistanceBFS(ghost_a_pos, pacman_next_pos, walls) + 10)
        # features['ghost_b_pacman_real_distance'] = 1 / (pacmanDistanceBFS(ghost_b_pos, pacman_next_pos, walls) + 10)

        # ## Feature: Scared Ghost
        # features['ghost_is_scared'] = 0
        # if state.getGhostState(1).scaredTimer > 0:
        #     features['ghost_is_scared'] = 12
        #     features['ghost_a_pacman_real_distance'] = 0
        #     features['ghost_b_pacman_real_distance'] = 0

        # Feature: Scared Ghost
        # features['ghost_is_scared'] = 0
        # if state.getGhostState(1).scaredTimer > 0:
        #     features['ghost_is_scared'] = 12
        #     features['ghost_a_pacman_real_distance'] *= -1
        #     features['ghost_b_pacman_real_distance'] *= -1

        # Feature: Get num of capsules
        # features['1_capsule_left'] = 0
        # if capsules == 1:
        #     features['1_capsule_left'] = 5

        # ## Feature: If pacman dist is near, additional feature to give chase
        # if(features['ghost_a_pacman_proximity'] >= 0.4):
        #     features['ghost_a_chase'] = 10
        # if(features['ghost_b_pacman_proximity'] >= 0.4):
        #     features['ghost_b_chase'] = 10

        # Feature: Run away when Pacman is near capsule
        # cap_dist = closestCapsule((next_x, next_y), capsules, walls)
        # if cap_dist is not None:
        #     features["run_away"] = float(cap_dist) / (walls.width * walls.height) * -1

        # Return all features
        # print('features = ' + str(features))
        features.divideAll(10)
        return features
    def getFeatures(self, state, action):
        # extract the grid of food and wall locations and get the ghost locations
        food = state.getFood()
        walls = state.getWalls()
        ghosts = state.getGhostPositions()

        features = util.Counter()

        features["bias"] = 1.0

        # compute the location of pacman after he takes the action
        x, y = state.getPacmanPosition()
        dx, dy = Actions.directionToVector(action)
        next_x, next_y = int(x + dx), int(y + dy)
        pos_g = state.getGhostPositions()
        #print "ghost pos",state.getGhostPositions()
        #xg =[]
        #yg =[]
        distg = []
        for i in range(0, len(pos_g)):
            #xg.append(pos_g[i][0])
            #yg.append(pos_g[i][1])
            distg.append(LA.norm([x - pos_g[i][0], y - pos_g[i][1]]))

        #print "xg and yg" , xg, yg
        """
        for i in range(0, len(xg)):
            distg.append(LA.norm([x-xg[i], y-yg[i]]))
        """
        features["min-ghost-dist"] = min(distg) / (walls.width * walls.height)

        # count the number of ghosts 1-step away
        ##features["#-of-ghosts-1-step-away"] = sum((next_x, next_y) in Actions.getLegalNeighbors(g, walls) for g in ghosts)
        tempfeature = sum(
            (next_x, next_y) in Actions.getLegalNeighbors(g, walls)
            for g in ghosts)
        # if there is no danger of ghosts then add the food feature
        if not tempfeature and food[next_x][next_y]:
            features["eats-food"] = 1.0
        else:
            features["eats-food"] = 0

        #print"get capsules" ,state.getCapsules()
        #xf = []
        #yf = []
        #xf,yf = state.getCapsules()

        #distf= []
        #for i in range(0, len(xf)):
        #    distf.append(LA.norm([x-xf[i],y-yf[i]]))
        #features["min-food-dist"] = min(distf)

        dist = closestFood((next_x, next_y), food, walls)
        if dist is not None:
            # make the distance a number less than one otherwise the update
            # will diverge wildly
            features["closest-food"] = float(dist) / (walls.width *
                                                      walls.height)
        features.divideAll(10.0)
        #features["all-food"] = food
        #features["walls"] = walls
        #features["ghosts"] = ghosts
        #features.divideAll(10.0)
        #print "features",features;
        return features
Ejemplo n.º 50
0
  def getFeatures(self, gameState, action):
    features = util.Counter()

    succ = self.find_succ(gameState, action)
    features['successorScore'] = self.getScore(succ)

    currState = succ.getAgentState(self.index)
    currPos = currState.getPosition()
    X, Y = currPos
    x, y = Actions.directionToVector(action)
    nX, nY = int(X + x), int(Y + y)

    # DOUBT
    isFirst = self.index<2
    # DOUBT
    isFirstOffending = False 
    isSecondOffending = False 

    foodList = self.getFood(gameState).asList()

    walls = gameState.getWalls()
    yMax = walls.height/2

    
    enemies = []
    for e in self.getOpponents(succ):
      enemies.append(succ.getAgentState(e))
    
    invaders = []
    for enemy in enemies:
      if enemy.isPacman and enemy.getPosition()!=0:
        invaders.append(enemy)
    
    chasers = []
    for enemy in enemies:
      if enemy.getPosition()!=None and not(enemy.isPacman):
        chasers.append(enemy)
    
    dist_invaders = []
    for invader in invaders:
      if invader.getPosition()!=None:
        dist_invaders.append(self.getMazeDistance(currPos, invader.getPosition()))

    pos_invaders = []
    for invader in invaders:
      if invader.getPosition()!=None:
        pos_invaders.append(invader.getPosition())
    # DOUBT
    # ghost_dists = [self.getMazeDistance(currPos,ghost.getPosition()) for ghost in enemies if ghost.getPosition()!=None]
    dist_ghosts = []
    for enemy in enemies:
      if not enemy.isPacman:
        dist_ghosts.append(self.getMazeDistance(currPos, enemy.getPosition()))

  
    
    if len(foodList) > 0:
      
      firstFood = []
      secondFood = []
      firstMinDistance, secondMinDistance, foodMinDistance = 1e18, 1e18, 1e18
      for food in foodList:
        currDist = self.getMazeDistance(currPos, food)
        foodMinDistance = min(foodMinDistance, currDist)
        if food[1] > 1.5 * yMax:
          firstFood.append(food)
          firstMinDistance = min(firstMinDistance, currDist)
        else:
          secondFood.append(food)
          secondMinDistance = min(secondMinDistance, currDist)
      
      if len(firstFood) == 0:
        firstMinDistance = 0
      if len(secondFood) == 0:
        secondMinDistance = 0
      

    

    
    if len(invaders) == 0:
      isFirstOffending = True 
      isSecondOffending = True
      features['firstEatFood'] = firstMinDistance
      features['secondEatFood'] = secondMinDistance
      features['eatInvader'] = 0
      
      dist_capsules = []
      for capsule in gameState.getBlueCapsules():
        dist_capsules.append(self.getMazeDistance(currPos, capsule))

      if len(dist_capsules) > 0:
        first2pill = min(dist_capsules)
        second2pill = min(dist_capsules)
      else:
        first2pill = 0
        second2pill = 0
      
      features['eatPowerPill'] = first2pill if isFirst else second2pill

      
      for chaser in chasers:
        if chaser.scaredTimer > 0:
          
          features['firstEatFood'] = 0
          features['secondEatFood'] = 0
          features['eatGhost'] = min(dist_ghosts)
          features['ghostNearby'] = 0
        else:
          features['eatGhost'] = 0
          features['ghostNearby'] = min(dist_ghosts)

        
    else:
      if isFirst and currState.isPacman:
        
        features['firstEatFood'] = foodMinDistance
        
        features['ghostNearby'] = min(dist_ghosts) if len(dist_ghosts)>0 else 0
        
        features['secondEatFood'] = 1/(max(dist_invaders)+1)**2 if len(dist_invaders)>0 else 0

        features['eatInvader'] = 0 
        
        dist_capsules = []
        for capsule in gameState.getBlueCapsules():
          dist_capsules.append(self.getMazeDistance(currPos, capsule))
        first2pill = min(dist_capsules) if len(dist_capsules)>0 else 0

        
        features['eatPowerPill'] = first2pill if len(dist_ghosts)>0 and first2pill < min(dist_ghosts) else 0

        for chaser in chasers:
          if chaser.scaredTimer > 0:
            features['firstEatFood'] = 0
            features['eatGhost'] = min(dist_ghosts)
          else: 
            features['eatGhost'] = 0
            
            features['ghostNearby'] = min(dist_ghosts) if len(dist_ghosts)>0 else 0

      
      elif isFirst and not currState.isPacman:
        
        features['firstEatFood'] = -1
        features['secondEatFood'] = foodMinDistance
        
        features['eatInvader'] = 0

      


    if action == Directions.STOP:
      features['stop'] = 1
    rev = Directions.REVERSE[gameState.getAgentState(self.index).configuration.direction]
    if action == rev:
      features['reverse'] = 1

    xMid = walls.width//2
    prevState = gameState.getAgentState(self.index)
    if prevState.numCarrying > 0:
      features['return'] = currPos[0] - xMid - 1
      # features['return'] = min([self.getMazeDistance(currPos, point) for point in border_openings])

    return features
Ejemplo n.º 51
0
    def aStarSearch(self,
                    startPosition,
                    gameState,
                    goalPositions,
                    avoidPositions=[],
                    returnPosition=False):
        """
        Finds the distance between the agent with the given index and its nearest goalPosition
        """
        walls = gameState.getWalls()
        width = walls.width
        height = walls.height
        walls = walls.asList()

        actions = [
            Directions.NORTH, Directions.SOUTH, Directions.EAST,
            Directions.WEST
        ]
        actionVectors = [
            Actions.directionToVector(action) for action in actions
        ]
        # Change action vectors to integers so they work correctly with indexing
        actionVectors = [
            tuple(int(number) for number in vector) for vector in actionVectors
        ]

        # Values are stored a 3-tuples, (Position, Path, TotalCost)

        currentPosition, currentPath, currentTotal = startPosition, [], 0
        # Priority queue uses the maze distance between the entered point and its closest goal position to decide which comes first
        queue = util.PriorityQueueWithFunction(
            lambda entry: entry[2] +  # Total cost so far
            width * height if entry[0] in avoidPositions else 0
            +  # Avoid enemy locations like the plague
            min(
                util.manhattanDistance(entry[0], endPosition)
                for endPosition in goalPositions))

        # Keeps track of visited positions
        visited = set([currentPosition])

        while currentPosition not in goalPositions:

            possiblePositions = [
                ((currentPosition[0] + vector[0],
                  currentPosition[1] + vector[1]), action)
                for vector, action in zip(actionVectors, actions)
            ]
            legalPositions = [(position, action)
                              for position, action in possiblePositions
                              if position not in walls]

            for position, action in legalPositions:
                if position not in visited:
                    visited.add(position)
                    queue.push(
                        (position, currentPath + [action], currentTotal + 1))

            # This shouldn't ever happen...But just in case...
            if len(queue.heap) == 0:
                return None
            else:
                currentPosition, currentPath, currentTotal = queue.pop()

        if returnPosition:
            return currentPath, currentPosition
        else:
            return currentPath
Ejemplo n.º 52
0
    def getFlowNetwork(self,
                       gameState,
                       startingPositions=None,
                       endingPositions=None,
                       defenseOnly=True):
        '''
        Returns the flow network.
        If starting positions are provided, also returns the source node
        If ending positions are provided, also returns the sink node
        Note: Always returns tuple
        '''
        source = (-1, -1)
        sink = (-2, -2)

        walls = gameState.getWalls()
        wallPositions = walls.asList()
        possiblePositions = [
            (x, y) for x in range(walls.width) for y in range(walls.height)
            if (x, y) not in wallPositions and (
                not defenseOnly or self.positionIsHome((x, y), walls.width))
        ]

        actions = [
            Directions.NORTH, Directions.SOUTH, Directions.EAST,
            Directions.WEST
        ]
        actionVectors = [
            Actions.directionToVector(action) for action in actions
        ]
        # Change vectors from float to int
        actionVectors = [
            tuple(int(number) for number in vector) for vector in actionVectors
        ]

        # Make source and sink

        network = FlowNetwork()

        # Add all vertices
        for position in possiblePositions:
            network.AddVertex(position)
        network.AddVertex(source)
        network.AddVertex(sink)

        # Add normal edges
        edges = EdgeDict()
        for position in possiblePositions:
            for vector in actionVectors:
                newPosition = (position[0] + vector[0],
                               position[1] + vector[1])
                if newPosition in possiblePositions:
                    edges[(position, newPosition)] = 1

        # Add edges attached to source
        for position in startingPositions or []:
            edges[(source, position)] = float('inf')

        for position in endingPositions or []:
            edges[(position, sink)] = float('inf')

        for edge in edges:
            network.AddEdge(edge[0], edge[1], edges[edge])

        retval = (network, )

        if startingPositions is not None:
            retval = retval + (source, )
        if endingPositions is not None:
            retval = tuple(retval) + (sink, )

        return retval
Ejemplo n.º 53
0
    def chooseAction(self, gameState):
        """
        First computes the most likely position of each ghost that has
        not yet been captured, then chooses an action that brings
        Pacman closer to the closest ghost (according to mazeDistance!).

        To find the mazeDistance between any two positions, use:
          self.distancer.getDistance(pos1, pos2)

        To find the successor position of a position after an action:
          successorPosition = Actions.getSuccessor(position, action)

        livingGhostPositionDistributions, defined below, is a list of
        util.Counter objects equal to the position belief
        distributions for each of the ghosts that are still alive.  It
        is defined based on (these are implementation details about
        which you need not be concerned):

          1) gameState.getLivingGhosts(), a list of booleans, one for each
             agent, indicating whether or not the agent is alive.  Note
             that pacman is always agent 0, so the ghosts are agents 1,
             onwards (just as before).

          2) self.ghostBeliefs, the list of belief distributions for each
             of the ghosts (including ghosts that are not alive).  The
             indices into this list should be 1 less than indices into the
             gameState.getLivingGhosts() list.
        """
        pacmanPosition = gameState.getPacmanPosition()
        legal = [a for a in gameState.getLegalPacmanActions()]
        livingGhosts = gameState.getLivingGhosts()
        #print "self.ghostBeliefs",self.ghostBeliefs

        livingGhostPositionDistributions = \
            [beliefs for i, beliefs in enumerate(self.ghostBeliefs)
             if livingGhosts[i+1]]
        "*** YOUR CODE HERE ***"
        #print"gameState.getLivingGhosts()",gameState.getLivingGhosts()
        #print"self.ghostBeliefs",self.ghostBeliefs
        #print"self.legalPositions",self.legalPosition
        #print"pacmanPosition",pacmanPosition
        #print"livingGhostPositionDistributions",livingGhostPositionDistributions
        #print"livingGhosts",livingGhosts
        MaxBelief = []
        for belief in livingGhostPositionDistributions:

            MaxBelief.append(belief.argMax())
        MaxBeliefCoordinate, MaxBeliefgoalProbability = None, 0
        #print"MaxBeliefCoordinate, MaxBeliefgoalProbability",MaxBeliefCoordinate, MaxBeliefgoalProbability
        for i, c in enumerate(MaxBelief):
            if livingGhostPositionDistributions[i][
                    c] >= MaxBeliefgoalProbability:
                #print"coordinate",c
                #print"index",index
                #print"goalProbablity",MaxBeliefgoalProbability

                GhostPositionDistribution = livingGhostPositionDistributions[
                    i][c]
                MaxBeliefCoordinate, MaxBeliefgoalProbability = c, GhostPositionDistribution

        nextSteps = []
        for action in legal:
            nextPosition = Actions.getSuccessor(pacmanPosition, action)
            nextSteps.append(
                (self.distancer.getDistance(nextPosition,
                                            MaxBeliefCoordinate), action))

        #print nextSteps
        return min(nextSteps)[1]

        util.raiseNotDefined()
Ejemplo n.º 54
0
    def chooseAction(self, gameState):
        """
        First computes the most likely position of each ghost that has
        not yet been captured, then chooses an action that brings
        Pacman closer to the closest ghost (according to mazeDistance!).

        To find the mazeDistance between any two positions, use:
          self.distancer.getDistance(pos1, pos2)

        To find the successor position of a position after an action:
          successorPosition = Actions.getSuccessor(position, action)

        livingGhostPositionDistributions, defined below, is a list of
        util.Counter objects equal to the position belief
        distributions for each of the ghosts that are still alive.  It
        is defined based on (these are implementation details about
        which you need not be concerned):

          1) gameState.getLivingGhosts(), a list of booleans, one for each
             agent, indicating whether or not the agent is alive.  Note
             that pacman is always agent 0, so the ghosts are agents 1,
             onwards (just as before).

          2) self.ghostBeliefs, the list of belief distributions for each
             of the ghosts (including ghosts that are not alive).  The
             indices into this list should be 1 less than indices into the
             gameState.getLivingGhosts() list.
        """
        pacmanPosition = gameState.getPacmanPosition()
        legal = [a for a in gameState.getLegalPacmanActions()]
        livingGhosts = gameState.getLivingGhosts()
        livingGhostPositionDistributions = \
            [beliefs for i, beliefs in enumerate(self.ghostBeliefs)
             if livingGhosts[i+1]]
        "*** YOUR CODE HERE ***"

        # find the best probablity of all livng ghost
        livingGhostCurrentPosition = []
        for GhostPositionDistributions in livingGhostPositionDistributions:
            currentPosition = GhostPositionDistributions.argMax()
            livingGhostCurrentPosition.append(currentPosition)

        # get distance to all living ghost
        livingGhostDistance = []
        for currentPosition in livingGhostCurrentPosition:
            distance = self.distancer.getDistance(pacmanPosition,
                                                  currentPosition)
            livingGhostDistance.append(distance)

        # get nearest living ghost
        nearestGhostDistance = min(livingGhostDistance)
        nearestGhostPosition = livingGhostCurrentPosition[
            livingGhostDistance.index(nearestGhostDistance)]

        # get best action to get nearest living ghost
        actions = []
        for action in legal:
            nextPosition = Actions.getSuccessor(pacmanPosition, action)
            actions.append(
                (self.distancer.getDistance(nextPosition,
                                            nearestGhostPosition), action))
        return min(actions)[1]
Ejemplo n.º 55
0
    def escapeEnemy(self, gameState):

        from util import PriorityQueue
        openList = PriorityQueue()
        closeList = []
        path = []

        walls = gameState.getWalls().deepCopy()

        for a in self.ghosts:
            x, y = a.getPosition()

            walls[int(x)][int(y)] = True
            # print x1, y1
            if not x + 1 == self.safeX:
                walls[int(x + 1)][int(y)] = True

            walls[int(x)][int(y + 1)] = True
            if not x - 1 == self.safeX:
                walls[int(x - 1)][int(y)] = True

            walls[int(x)][int(y - 1)] = True


        if self.myState.scaredTimer > 0:
            for a in self.invaders:
                x, y = a.getPosition()

                # print x1, y1
                walls[int(x)][int(y)] = True
                walls[int(x + 1)][int(y)] = True
                walls[int(x)][int(y + 1)] = True
                walls[int(x - 1)][int(y)] = True
                walls[int(x)][int(y - 1)] = True

        for a in self.invaders:
            x, y = a.getPosition()
            if x == self.safeX:
                if self.red:
                    walls[int(x + 1)][int(y)] = True
                else:
                    walls[int(x - 1)][int(y)] = True

        def findPath(node):
            if node[1]:  # If it is not a start state
                findPath(node[1])
                path.append(node[2])

        def heruistic3(position1):
            x, y = position1
            if self.getCapsules(gameState):
                capsuleD = min([abs(x - c[0]) + abs(y - c[1]) for c in self.getCapsules(gameState)])
                return min(abs(x - self.safeX), capsuleD)
            return abs(x - self.safeX)

        myState = gameState.getAgentState(self.index)
        startPosition = myState.getPosition()
        startNode = (startPosition, [], [], 0)
        openList.push(startNode, heruistic3(startPosition))

        while not openList.isEmpty():
            currentNode = openList.pop()
            currentPosition = currentNode[0]
            if currentPosition not in closeList:
                if self.isSafePosition(currentPosition, gameState):
                    findPath(currentNode)
                    return path[0]
                closeList.append(currentPosition)
                for position in Actions.getLegalNeighbors(currentPosition, walls):
                    action = Actions.vectorToDirection(
                        (position[0] - currentPosition[0], position[1] - currentPosition[1]))
                    openList.push((position, currentNode, action, currentNode[3] + 1),
                                  currentNode[3] + 1 + heruistic3(position))

        return self.escapeAction
Ejemplo n.º 56
0
    def getFeatures(self, state, action):
        # Agressive features
        features = util.Counter()
        x, y = pos = state.getAgentPosition(self.index)
        successor = state.generateSuccessor(self.index, action)
        agentState = state.getAgentState(self.index)
        capsules = state.getBlueCapsules(
        ) if self.isOnRedTeam else state.getRedCapsules()
        # Meta data
        walls = state.getWalls()
        opponents = self.getLikelyOppPosition()
        ghosts = []
        oppPacmen = []

        # Fill out opponent arrays
        if self.isOnRedTeam:
            oppindices = state.getBlueTeamIndices()
            food = state.getBlueFood()
            for i, opp in enumerate(opponents):
                if opp[0] < self.border:
                    oppPacmen.append(opp)
                else:
                    if state.getAgentState(oppindices[i]).scaredTimer == 0:
                        ghosts.append(opp)
            friendPos = state.getAgentPosition(
                [x for x in state.getRedTeamIndices() if x != self.index][0])
            if pos[0] > self.border and friendPos[0] > self.border:
                bothOffside = True
            else:
                bothOffside = False
        else:
            food = state.getRedFood()
            oppindices = state.getRedTeamIndices()
            for i, opp in enumerate(opponents):
                if opp[0] >= self.border:
                    oppPacmen.append(opp)
                else:
                    if state.getAgentState(oppindices[i]).scaredTimer == 0:
                        ghosts.append(opp)
            friendPos = state.getAgentPosition(
                [x for x in state.getBlueTeamIndices() if x != self.index][0])
            if pos[0] <= self.border and friendPos[0] <= self.border:
                bothOffside = True
            else:
                bothOffside = False

        next_x, next_y = self.generateSuccessorPosition(pos, action)

        # count the number of ghosts 1-step away
        ghostsOneStepAway = sum(
            (next_x, next_y) in Actions.getLegalNeighbors(g, walls)
            for g in ghosts)

        # count the number of opponents that are 4 steps or fewer away
        oppFourStepsAway = sum(
            1 for ghost in ghosts
            if self.getMazeDistance((next_x, next_y), ghost) <= 4)

        # Only one feature if a ghost killed us
        if (next_x, next_y) in ghosts:
            features['died'] = 1.0
            features['distance-from-home'] = float(
                self.getMazeDistance(
                    (next_x, next_y),
                    self.start)) / (walls.width * walls.height)
        # Only one feature if we're about to die
        elif ghostsOneStepAway >= 1:
            features['ghosts-1-step-away'] = float(ghostsOneStepAway) / len(
                ghosts)
            features['distance-from-home'] = float(
                self.getMazeDistance(
                    (next_x, next_y),
                    self.start)) / (walls.width * walls.height)
        # Only one feature if there are opponents fewer than 4 steps away
        elif oppFourStepsAway >= 1:
            features['opponents-4-steps-away'] = float(oppFourStepsAway) / len(
                ghosts)
            features['distance-from-home'] = float(
                self.getMazeDistance(
                    (next_x, next_y),
                    self.start)) / (walls.width * walls.height)
        # Otherwise, we have regular features
        else:
            features['successor-food-count'] = -food.count(True)
            if food[next_x][next_y]:
                features['eats-food'] = 1.0

            if bothOffside:
                features['distance-to-friend'] = float(
                    self.getMazeDistance(
                        pos, friendPos)) / (walls.width * walls.height)

            dist = self.closestFood((next_x, next_y), food, walls)
            if dist is not None:
                # make the distance a number less than one otherwise the update
                # will diverge wildly
                features['closest-food'] = float(dist) / (walls.width *
                                                          walls.height)

            if len(ghosts) >= 1:
                dists = [
                    self.dist((next_x, next_y), pac, walls) for pac in ghosts
                ]
                features['closest-ghost'] = float(
                    min(dists)) / (walls.width * walls.height)

            if action == Directions.STOP: features['stop'] = 1
            rev = Directions.REVERSE[state.getAgentState(
                self.index).configuration.direction]
            if action == rev: features['reverse'] = 1.0

        if (next_x, next_y) in capsules:
            features['eats-capsule'] = 1.0

        features.divideAll(10.0)
        return features
Ejemplo n.º 57
0
    def findFood(self, gameState):

        from util import PriorityQueue
        openList = PriorityQueue()
        closeList = []
        path = []

        walls = gameState.getWalls().deepCopy()

        myTeam = self.getTeam(gameState)
        for each in myTeam:  # assume myTeam only has 2 indices
            if (each != self.index):  friendIndex = each

        a, b = gameState.getAgentState(friendIndex).getPosition()
        if (abs(a - self.safeX) < 3 or gameState.getAgentState(friendIndex).isPacman) and not (a, b) == self.position:
            walls[int(a)][int(b)] = True
            walls[int(a - 1)][int(b)] = True
            walls[int(a + 1)][int(b)] = True
            walls[int(a)][int(b - 1)] = True
            walls[int(a)][int(b + 1)] = True

        for a in self.ghosts:
            x, y = a.getPosition()

            # print x1, y1
            walls[int(x)][int(y)] = True
            if not x + 1 == self.safeX:
                walls[int(x + 1)][int(y)] = True

            walls[int(x)][int(y + 1)] = True
            if not x - 1 == self.safeX:
                walls[int(x - 1)][int(y)] = True

            walls[int(x)][int(y - 1)] = True

            if abs(x - (self.mapWidth - 1 - self.safeX)) <= 1:
                if y + 2 < self.mapHeight:
                    walls[int(x)][int(y + 2)] = True
                if y - 2 >= 0:
                    walls[int(x)][int(y - 2)] = True

        if self.myState.scaredTimer > 0:
            for a in self.invaders:
                x, y = a.getPosition()

                # print x1, y1
                walls[int(x)][int(y)] = True
                walls[int(x + 1)][int(y)] = True
                walls[int(x)][int(y + 1)] = True
                walls[int(x - 1)][int(y)] = True
                walls[int(x)][int(y - 1)] = True

        for a in self.invaders:
            x, y = a.getPosition()
            if x == self.safeX:
                if self.red:
                    walls[int(x + 1)][int(y)] = True
                else:
                    walls[int(x - 1)][int(y)] = True

        def findPath(node):
            if node[1]:  # If it is not a start state
                findPath(node[1])
                path.append(node[2])

        def heruistic4(position1, gameState):
            foodList = self.getFood(gameState).asList()
            if len(foodList) > 0:  # it should not happen
                return min([abs(position1[0] - food[0]) + abs(position1[1] - food[1]) for food in foodList])
            return 9999

        myState = gameState.getAgentState(self.index)
        startPosition = myState.getPosition()
        startNode = (startPosition, [], [], 0)
        openList.push(startNode, heruistic4(startPosition, gameState))

        while not openList.isEmpty():
            currentNode = openList.pop()
            currentPosition = currentNode[0]
            if currentPosition not in closeList:
                if self.isFood(currentPosition, gameState):

                    if currentPosition in self.getCapsules(
                            gameState):  # assume eat capsules can easily get out of hutong
                        if not self.safePosition and self.isDeadRoad(gameState, startPosition, currentPosition):
                            self.safePosition = startPosition
                        findPath(currentNode)
                        return path[0]

                    if self.safePosition:
                        self.isDeadRoad(gameState, startPosition, currentPosition)
                        if self.allghosts:
                            ghostsDistance = [
                                max(self.getMazeDistance(a.getPosition(), self.safePosition), a.scaredTimer) for a in
                                self.allghosts]
                            if self.getMazeDistance(startPosition, currentPosition) + self.getMazeDistance(
                                    currentPosition,
                                    self.safePosition) < min(
                                ghostsDistance):
                                findPath(currentNode)
                                return path[0]
                        else:
                            findPath(currentNode)
                            return path[0]
                    else:
                        if self.isPacman:
                            if self.isDeadRoad(gameState, startPosition, currentPosition):
                                safePosition = startPosition
                                if self.allghosts:
                                    ghostsDistance = [
                                        max(self.getMazeDistance(a.getPosition(), safePosition), a.scaredTimer) for a in
                                        self.allghosts]
                                    if self.getMazeDistance(startPosition, currentPosition) + self.getMazeDistance(
                                            currentPosition,
                                            safePosition) < min(
                                        ghostsDistance):
                                        findPath(currentNode)
                                        self.safePosition = safePosition
                                        return path[0]

                                else:
                                    findPath(currentNode)
                                    self.safePosition = safePosition
                                    return path[0]
                            else:
                                findPath(currentNode)
                                return path[0]
                        else:
                            findPath(currentNode)
                            return path[0]

                closeList.append(currentPosition)
                for position in Actions.getLegalNeighbors(currentPosition, walls):
                    action = Actions.vectorToDirection(
                        (position[0] - currentPosition[0], position[1] - currentPosition[1]))
                    openList.push((position, currentNode, action, currentNode[3] + 1),
                                  currentNode[3] + 1 + heruistic4(position, gameState))


        if self.escapeAction:
            return self.escapeAction
        else:
            global defend
            if self.getScore(gameState) > 0 and not defend:
                self.attack = False
                defend = True
                return self.getDefendAction(gameState)
            else:
                if Actions.getLegalNeighbors(startPosition, walls):
                    return random.choice([Actions.vectorToDirection(
                        (position[0] - startPosition[0], position[1] - startPosition[1])) for position in
                        Actions.getLegalNeighbors(startPosition, walls)])
                else:
                    return Directions.STOP
Ejemplo n.º 58
0
    def chooseAction(self, gameState):
        """
        First computes the most likely position of each ghost that
        has not yet been captured, then chooses an action that brings
        Pacman closer to the closest ghost (in maze distance!).

        To find the maze distance between any two positions, use:
        self.distancer.getDistance(pos1, pos2)

        To find the successor position of a position after an action:
        successorPosition = Actions.getSuccessor(position, action)

        livingGhostPositionDistributions, defined below, is a list of
        util.Counter objects equal to the position belief distributions
        for each of the ghosts that are still alive.  It is defined based
        on (these are implementation details about which you need not be
        concerned):

          1) gameState.getLivingGhosts(), a list of booleans, one for each
             agent, indicating whether or not the agent is alive.  Note
             that pacman is always agent 0, so the ghosts are agents 1,
             onwards (just as before).

          2) self.ghostBeliefs, the list of belief distributions for each
             of the ghosts (including ghosts that are not alive).  The
             indices into this list should be 1 less than indices into the
             gameState.getLivingGhosts() list.

        """
        pacmanPosition = gameState.getPacmanPosition()
        legal = [a for a in gameState.getLegalPacmanActions()]
        livingGhosts = gameState.getLivingGhosts()
        livingGhostPositionDistributions = [
            beliefs for i, beliefs in enumerate(self.ghostBeliefs)
            if livingGhosts[i + 1]
        ]
        "*** YOUR CODE HERE ***"
        #find the max location
        ghostLocation = []
        max = -1
        # for every distribution and for the position and probability of the distribution
        # if the probability is greater than the max value: set at -1 as a default lowest
        # you get a new max spot
        for d in livingGhostPositionDistributions:
            for position, probability in d.items():
                if probability > max:
                    max = probability
                    maxSpot = position
            ghostLocation.append(maxSpot)
            max = -1
        # get the successors for agent with the legal moves
        successors = []
        for move in legal:
            successors.append(
                (move, Actions.getSuccessor(pacmanPosition, move)))
        #find min distance between pacman and the ghosts
        minDistances = []
        for p in ghostLocation:
            minDistances.append(self.distancer.getDistance(pacmanPosition, p))

        minValue = minDistances.index(min(minDistances))
        ghost = ghostLocation[minValue]
        minDist = None
        move = None
        #for every successor gotten, if current is less than min then the new move is the move of the succ
        for succ in successors:
            currentDistance = self.distancer.getDistance(succ[1], ghost)
            if currentDistance < minDist or minDist is None:
                minDist = currentDistance
                move = succ[0]
        #after previous for loop, get the best move avalible with data given then return move
        return move
Ejemplo n.º 59
0
    def getFeatures(self, state, action, agent):
        # extract the grid of food and wall locations and get the ghost locations
        food = state.getBlueFood()
        walls = state.getWalls()
        # ghosts = state.getGhostPositions()
        blue_team = state.getBlueTeamIndices()
        enemies = [state.getAgentPosition(index) for index in blue_team]

        features = util.Counter()

        features["bias"] = 1.0

        # compute the location of pacman after he takes the action
        # x, y = state.getPacmanPosition()
        x, y = state.getAgentPosition(agent)
        dx, dy = Actions.directionToVector(action)
        next_x, next_y = int(x + dx), int(y + dy)

        # Compute distance to closest ghost
        # Distanca od 1 znaci najveca udaljenost
        # Agent treba da podesi tezine da vrednuju ovaj feature:
        # Da ukoliko je ghostDistance velik, njegov uticaj na Q bude
        # pozitivan, tj. ukoliko je mali (duh je blizu) da uticaj bude negativan
        # Tako agent bira stanja kod kojih je ghostDistance sto veci
        features['GhostDistance' + str(agent)] = 1
        opponentsState = []
        for i in state.getBlueTeamIndices():
            opponentsState.append(state.getAgentState(i))
        visible = []
        for op in opponentsState:
            if not op.isPacman:
                visible.append(op)
        if len(visible) > 0:
            positions = [agent.getPosition() for agent in visible]
            closest = min(positions,
                          key=lambda xx: manhattanDistance((x, y), xx))
            closestDist = manhattanDistance((x, y), closest)
            print("DUH")
            print(closestDist)
            if closestDist <= 10:
                print("POVECAJ ZA weight * ", closestDist /
                      10)  # delimo sa 100 da feature bude manji od 1
                features['GhostDistance' + str(agent)] = closestDist / 10

        # count the number of ghosts 1-step away
        # features["#-of-ghosts-1-step-away"] = sum((next_x, next_y) in Actions.getLegalNeighbors(g, walls) for g in ghosts)
        features["#-of-enemies-1-step-away"] += sum(
            (next_x, next_y) in Actions.getLegalNeighbors(e, walls)
            for e in enemies)

        # if there is no danger of ghosts then add the food feature
        if food[next_x][next_y]:
            features["eats-food" + str(agent)] += 1.0

        dist = closestFood((next_x, next_y), food, walls)
        if dist is not None:
            # make the distance a number less than one otherwise the update
            # will diverge wildly
            features["closest-food" +
                     str(agent)] = 1 - 10 * float(dist) / (walls.width *
                                                           walls.height)

        # foodList = food.asList()
        # features['successorScore'] = -len(foodList)

        nextState2 = state.generateSuccessor(agent, action)
        features["carryingFood" +
                 str(agent)] = state.getAgentState(agent).numCarrying
        # if features["carryingFood"] > 5:
        #     features["closest-food"] = 0

        capsulesChasing = state.getBlueCapsules()
        capsulesChasingDistances = [
            manhattanDistance((x, y), capsule) for capsule in capsulesChasing
        ]
        minCapsuleDistance = min(capsulesChasingDistances) if len(
            capsulesChasingDistances) else 0
        features[
            "distanceToCapsule" +
            str(agent)] = minCapsuleDistance / 100  # da bude 0 < feature < 1

        enemiesAgents = [
            state.getAgentState(i) for i in state.getBlueTeamIndices()
        ]
        invaders = [a for a in enemiesAgents if a.isPacman]
        # features['numInvaders'] = 1 / len(invaders) if len(invaders) != 0 else 1 # sto je manji broj to je gore - inverzan feature
        if len(invaders) > 0:
            dists = [
                manhattanDistance((x, y), a.getPosition()) for a in invaders
            ]
            features[
                'invaderDistance' +
                str(agent)] = 1 - min(dists) / 100  # da bude 0 < feature < 1
            features['invaderDistance' + str(agent)] /= 10

        ###########################################
        # Feature: 'scores'
        # Agent indeksa 'agent' ce u narednom potezu postici rezultat
        # Opseg vrednosti: 0.00 ili 1.00
        ###########################################
        nextState = state.generateSuccessor(agent, action)
        features['scores' + str(agent)] = 1.0 if nextState.getScore(
        ) > state.getScore() else 0.0

        #features.divideAll(10.0)
        return features
Ejemplo n.º 60
0
 def get_legal_actions(state):
     """
     Returns a list of possible actions.
     """
     return Actions.get_possible_actions(
         state.get_pacman_state().configuration, state.data.layout.walls)