Beispiel #1
0
    def evaluationFunction(self, currentGameState, action):
        """
        Design a better evaluation function here.

        The evaluation function takes in the current `pacai.bin.pacman.PacmanGameState`
        and an action, and returns a number, where higher numbers are better.
        Make sure to understand the range of different values before you combine them
        in your evaluation function.
        """

        successorGameState = currentGameState.generatePacmanSuccessor(action)

        # Useful information you can extract.
        newPosition = successorGameState.getPacmanPosition()
        newFood = successorGameState.getFood()
        oldFood = currentGameState.getFood()
        newGhostStates = successorGameState.getGhostStates()
        distance = 0

        if newFood.count() == oldFood.count():
            distance = manhattan(newFood.asList()[0], newPosition)
            for pos in newFood.asList():
                if manhattan(pos, newPosition) < distance:
                    distance = manhattan(pos, newPosition)
        for ghost in newGhostStates:
            distance += 2**(2 - manhattan(ghost.getPosition(), newPosition))
        return 0 - distance
Beispiel #2
0
    def evaluationFunction(self, currentGameState, action):
        """
        Design a better evaluation function here.

        The evaluation function takes in the current `pacai.bin.pacman.PacmanGameState`
        and an action, and returns a number, where higher numbers are better.
        Make sure to understand the range of different values before you combine them
        in your evaluation function.
        """

        successorGameState = currentGameState.generatePacmanSuccessor(action)

        # Useful information you can extract.
        # newPosition = successorGameState.getPacmanPosition()
        # oldFood = currentGameState.getFood()
        # newGhostStates = successorGameState.getGhostStates()
        # newScaredTimes = [ghostState.getScaredTimer() for ghostState in newGhostStates]

        newPosition = successorGameState.getPacmanPosition()
        newGhostStates = successorGameState.getGhostStates()
        # found out through trial and error, and hardcoding the constants
        # this gives us the average ghost distance
        ghostDistance = sum([
            manhattan(newPosition, ghostState.getPosition())
            for ghostState in newGhostStates
        ]) / (currentGameState.getNumAgents() - 1)
        oldFood = successorGameState.getFood()
        foodDistances = [
            manhattan(newPosition, food) for food in oldFood.asList()
        ] + [10000]
        return successorGameState.getScore() + 10 / (min(foodDistances) + 0.0001) - \
            10 / (ghostDistance + 0.0001)
    def offenseEvaluationFunction(self, currentGameState, action):

        successorGameState = currentGameState.generateSuccessor(self.index, action)

        # Useful information you can extract.
        newPosition = successorGameState.getAgentPosition(self.index)
        oldFood = self.getOppFood(currentGameState)
        newGhostStates = self.getOppGhostState(successorGameState)
        newGhostPositions = self.getOppGhost(successorGameState)
        oldOppPacmanPositions = self.getOppPacman(currentGameState)
        score = currentGameState.getScore() * 100  # weighted current score
        closestDistToFood = float('-inf')
        if newPosition in oldOppPacmanPositions:
            return 10000
        if self.onOppositeSide(newPosition, successorGameState):
            score += 100
        for food in oldFood.asList():
            dist = distance.maze(newPosition, food, currentGameState)
            if dist == 0:  # check if the new position is food
                score += 1000
            else:  # add reciprocol manhattan dist to food (b/c close food = more points) * weight
                score += (1 / dist) * 100
        # calculate ghost score
        eat_ghost = 1  # coefficient to switch values if ghost are edible
        for i in range(len(newGhostPositions)):
            if newGhostStates[i].getScaredTimer() > 0:  # if ghosts edible, change weight to inverse reciprocol
                eat_ghost = -0.1
            if distance.manhattan(newGhostPositions[i], newPosition) <= 1:  # if near ghost run away
                score -= 50 * eat_ghost
            else:  # subtract reciprocol ghost distance (b/c closer ghost = less points) * weight
                score -= (1 / distance.manhattan(newGhostPositions[i], newPosition)) * 50 * eat_ghost
        return score
    def ABEvaluationFunction(self, currentGameState):

        food = self.getOppFood(currentGameState).asList()

        minManhattanFood = None
        minFoodDist = 999999

        for f in food:
            curDist = distance.manhattan(f, self.getPosition(currentGameState))
            if curDist < minFoodDist:
                minFoodDist = curDist
                minManhattanFood = f

        features = {
            "closestDistToFood":
            distance.maze(minManhattanFood, self.getPosition(currentGameState),
                          currentGameState),
            "closestDistToGhost":
            min([
                distance.manhattan(self.getPosition(currentGameState), ghost)
                for ghost in self.getOppGhost(currentGameState)
            ])
        }

        weights = {"closestDistToFood": -1, "closestDistToGhost": 100}

        return sum(
            [features[feat] * weights[feat] for feat in features.keys()])
Beispiel #5
0
def betterEvaluationFunction(currentGameState):
    """
    Your extreme ghost-hunting, pellet-nabbing, food-gobbling, unstoppable evaluation function.

    DESCRIPTION: <write something here so we know what you did>
    This is nearly the same as the reflex one, but with modified constants,
    and I added food and capsule distances
    """

    oldFood = currentGameState.getFood()
    foodDistances = [
        manhattan(currentGameState.getPacmanPosition(), food)
        for food in oldFood.asList()
    ] + [10000]
    minDist = min(foodDistances)
    newGhostStates = currentGameState.getGhostStates()
    ghostDistance = sum([
        manhattan(currentGameState.getPacmanPosition(),
                  ghostState.getPosition()) for ghostState in newGhostStates
    ]) / (currentGameState.getNumAgents() - 1)

    capsules = currentGameState.getCapsules()
    capsuleDistances = [
        manhattan(currentGameState.getPacmanPosition(), capsule)
        for capsule in capsules
    ] + [10000]
    minCapDist = min(capsuleDistances)

    return 100 * currentGameState.getScore() + 10 / (minDist + 0.001) +\
        10 / (ghostDistance + 0.001) + 100 / (minCapDist + 0.001)
Beispiel #6
0
    def evaluationFunction(self, currentGameState, action):
        """
        Design a better evaluation function here.

        The evaluation function takes in the current `pacai.bin.pacman.PacmanGameState`
        and an action, and returns a number, where higher numbers are better.
        Make sure to understand the range of different values before you combine them
        in your evaluation function.
        """

        successorGameState = currentGameState.generatePacmanSuccessor(action)

        # Useful information you can extract.
        newPosition = successorGameState.getPacmanPosition()
        oldFood = currentGameState.getFood()
        newGhostStates = successorGameState.getGhostStates()
        newScaredTimes = [ghostState.getScaredTimer() for ghostState in newGhostStates]

        # *** Your Code Here ***
        x = oldFood.getWidth()
        y = oldFood.getHeight()
        maxlen = distance.manhattan((x, y), (0, 0))
        halflen = maxlen
        minfooddist = maxlen
        baddist = maxlen
        badflag = True

        for i in range(x):
            for j in range(y):
                m = oldFood[i][j]
                if m:
                    d = distance.manhattan((i, j), newPosition)
                    if d < minfooddist:
                        minfooddist = d

        for i in range(len(newScaredTimes)):
            ghost = newGhostStates[i]
            d = distance.manhattan(newPosition, ghost._position)
            if newScaredTimes[i] == 0:
                badflag = False
                if d < baddist:
                    baddist = d

        if baddist == 0:
            return -100
        if badflag:
            baddist = 1
        elif baddist > halflen:
            baddist = 1
        ev = (-1 / baddist) + (successorGameState.getScore() / 2)
        if minfooddist == 0:
            ev += 10
        else:
            ev += (2 / minfooddist)
        if badflag:
            ev += 1
        elif baddist > halflen:
            ev += 1
        return ev
Beispiel #7
0
    def ABEvaluationFunction(self, currentGameState):

        food = self.getOppFood(currentGameState).asList()

        minManhattanFood = None
        minFoodDist = 999999

        for f in food:
            curDist = distance.manhattan(f, self.getPosition(currentGameState))
            if curDist < minFoodDist:
                minFoodDist = curDist
                minManhattanFood = f

        currentPos = self.getPosition(currentGameState)
        numWalls = self.getNumWalls(currentPos, currentGameState)
        # print("Num Walls =", numWalls)
        ateCapsule = self.oppIsScared(currentGameState)
        ghostDistMultiplier = 1
        if ateCapsule:
            # print ("ate capsule")
            ghostDistMultiplier = -2

        closestDistToGhost = 0

        try:
            closestDistToGhost = min([
                distance.manhattan(self.getPosition(currentGameState), ghost)
                for ghost in self.getOppGhost(currentGameState)
            ])
        except:
            pass

        if closestDistToGhost > 10:
            return -10000

        features = {
            "closestDistToFood":
            distance.maze(minManhattanFood, self.getPosition(currentGameState),
                          currentGameState),
            "closestDistToGhost":
            min([
                distance.manhattan(self.getPosition(currentGameState), ghost)
                for ghost in self.getOppGhost(currentGameState)
            ]),
            # "closestDistToGhost": closestDistToGhost,
            "surroundingWalls":
            numWalls
        }

        weights = {
            "closestDistToFood": -50,
            "closestDistToGhost": 100 * ghostDistMultiplier,
            "surroundingWalls": -100
        }

        return sum(
            [features[feat] * weights[feat] for feat in features.keys()])
Beispiel #8
0
    def applyAction(state, action, agentIndex):
        """
        Edits the state to reflect the results of the action.
        """

        legal = AgentRules.getLegalActions(state, agentIndex)
        if (action not in legal):
            raise ValueError('Illegal action: ' + str(action))

        agentState = state.getAgentState(agentIndex)

        # Update position.
        vector = Actions.directionToVector(action, AgentRules.AGENT_SPEED)
        agentState.updatePosition(vector)

        # Eat.
        nextPosition = agentState.getPosition()
        nearest = nearestPoint(nextPosition)
        if (agentState.isPacman() and manhattan(nearest, nextPosition) <= 0.9):
            AgentRules.consume(nearest, state, state.isOnRedTeam(agentIndex))

        # Potentially change agent type.
        if (nextPosition == nearest):
            # Agents are pacmen when they are not on their own side.
            position = agentState.getPosition()
            agentState.setIsPacman(state.isOnRedTeam(agentIndex) != state.isOnRedSide(position))
Beispiel #9
0
    def checkDeath(state, agentIndex):
        agentState = state.getAgentState(agentIndex)

        if (state.isOnRedTeam(agentIndex)):
            teamPointModifier = 1
            otherTeam = state.getBlueTeamIndices()
        else:
            teamPointModifier = -1
            otherTeam = state.getRedTeamIndices()

        for otherAgentIndex in otherTeam:
            otherAgentState = state.getAgentState(otherAgentIndex)

            # Ignore agents with a matching type (e.g. two ghosts).
            if (agentState.isPacman() == otherAgentState.isPacman()):
                continue

            otherPosition = otherAgentState.getPosition()

            # Ignore other agents that are too far away.
            if (otherPosition is None
                    or manhattan(otherPosition, agentState.getPosition()) > COLLISION_TOLERANCE):
                continue

            # If we are a brave ghost or they are a scared ghost, then we will eat them.
            # Otherwise, we are being eatten.
            if (agentState.isBraveGhost() or otherAgentState.isScaredGhost()):
                state.addScore(teamPointModifier * KILL_POINTS)
                otherAgentState.respawn()
            else:
                state.addScore(teamPointModifier * -KILL_POINTS)
                agentState.respawn()
def cornersHeuristic(state, problem):
    """
    A heuristic for the CornersProblem that you defined.

    This function should always return a number that is a lower bound
    on the shortest path from the state to a goal of the problem;
    i.e. it should be admissible.
    (You need not worry about consistency for this heuristic to receive full credit.)
    """

    # Useful information.
    corners = problem.corners  # These are the corner coordinates
    currentPosition = state[0]
    runningPerimeter = 0
    unvisitedCorners = set(corners) - state[1]
    # the min function chooses the min of the first element in each tuple
    # within it is a list comprehendsion of all the possible corner distances
    while len(unvisitedCorners) != 0:
        (dist, cornerVisited) = min([
            (distance.manhattan(currentPosition, corner), corner)
            for corner in unvisitedCorners
        ])
        unvisitedCorners.remove(cornerVisited)
        runningPerimeter += dist
        currentPosition = cornerVisited

    return runningPerimeter
Beispiel #11
0
def betterEvaluationFunction(currentGameState):
    """
    Your extreme ghost-hunting, pellet-nabbing, food-gobbling, unstoppable evaluation function.

    DESCRIPTION: <write something here so we know what you did>

    Pacman should be penalized based on distance of closest food and number of food
    Pacman should get every capsule it can

    """
    # Useful information you can extract.
    score = currentGameState.getScore()
    pacPosition = currentGameState.getPacmanPosition()
    foodList = currentGameState.getFood().asList()
    n = 0
    closestFoodDistance = 10000
    for food in foodList:
        distanceOfFood = distance.manhattan(pacPosition, food)
        if closestFoodDistance > distanceOfFood:
            closestFoodDistance = distanceOfFood
        n = n + 1
    if n != 0:
        score = score + 1000 / (1 + float(n))
        score = score - (closestFoodDistance * 2)
    else:
        score = score + 10000
    capsuleList = currentGameState.getCapsules()
    score = score - (10 * len(capsuleList))
    if n == 0:
        score = score + 10000
    return score
Beispiel #12
0
def manhattan(position, problem):
    """
    This heuristic is the manhattan distance to the goal.
    """

    position1 = position
    position2 = problem.goal

    return distance.manhattan(position1, position2)
Beispiel #13
0
def betterEvaluationFunction(currentGameState):
    """
    Your extreme ghost-hunting, pellet-nabbing, food-gobbling, unstoppable evaluation function.

    DESCRIPTION: Calculate the sum of distances of foods and the distances of ghosts.
                 The threat is more important than food. Take the leftover penalty
                 into account. Minus them from the original score.

    """
    position = currentGameState.getPacmanPosition()
    foods = currentGameState.getFood().asList()
    foodDist = 0
    for food in foods:
        foodDist += 2 * manhattan(position, food)
    ghostDist = 0
    for ghost in currentGameState.getGhostPositions():
        ghostDist += 4 * manhattan(position, ghost)
    penalty = -6 * len(food)
    return currentGameState.getScore() - foodDist - ghostDist + penalty
    def evaluationFunction(self, currentGameState, action):
        """
        Design a better evaluation function here.

        The evaluation function takes in the current `pacai.bin.pacman.PacmanGameState`
        and an action, and returns a number, where higher numbers are better.
        Make sure to understand the range of different values before you combine them
        in your evaluation function.
        """

        successorGameState = currentGameState.generateSuccessor(
            self.index, action)

        # Useful information you can extract.
        newPosition = successorGameState.getAgentPosition(self.index)
        oldFood = self.getOppFood(currentGameState)
        newGhostStates = self.getOppGhostState(successorGameState)
        newGhostPositions = self.getOppGhost(successorGameState)

        score = currentGameState.getScore() * 100  # weighted current score
        closestDistToFood = float('-inf')
        if self.onOppositeSide(newPosition, successorGameState):
            score += 100
        for food in oldFood.asList():
            dist = distance.maze(newPosition, food, currentGameState)
            if dist == 0:  # check if the new position is food
                score += 1000
            else:  # add reciprocol manhattan dist to food (b/c close food = more points) * weight
                score += (1 / dist) * 100
        # calculate ghost score
        eat_ghost = 1  # coefficient to switch values if ghost are edible
        for i in range(len(newGhostPositions)):
            if newGhostStates[i].getScaredTimer(
            ) > 0:  # if ghosts edible, change weight to inverse reciprocol
                eat_ghost = -0.1
            if distance.manhattan(newGhostPositions[i],
                                  newPosition) <= 1:  # if near ghost run away
                score -= 50 * eat_ghost
            else:  # subtract reciprocol ghost distance (b/c closer ghost = less points) * weight
                score -= (1 / distance.manhattan(newGhostPositions[i],
                                                 newPosition)) * 50 * eat_ghost
        return score
 def isOppClose(self, currentGameState):
     myPos = currentGameState.getAgentPosition(self.index)
     OppPositions = self.getOppGhost(currentGameState)
     OppPacmanPos = self.getOppPacman(currentGameState)
     if (len(OppPacmanPos) != 0):
         if (self.onMySide(self.teammatePos, currentGameState)
                 and self.onMySide(myPos, currentGameState)):
             for i in OppPositions:
                 if (distance.manhattan(myPos, i) <=
                     (distance.manhattan(i, self.teammatePos) + 30)):
                     return True
         else:
             return True
     for i in OppPositions:
         if self.isRed:
             if self.mid_x_coord <= (i[0] - 20):
                 return True
         else:
             if self.mid_x_coord >= (i[0] + 20):
                 return True
     return False
Beispiel #16
0
def foodHeuristic(state, problem):
    """
    Your heuristic for the FoodSearchProblem goes here.

    This heuristic must be consistent to ensure correctness.
    First, try to come up with an admissible heuristic;
    almost all admissible heuristics will be consistent as well.

    If using A* ever finds a solution that is worse than what uniform cost search finds,
    your heuristic is *not* consistent, and probably not admissible!
    On the other hand, inadmissible or inconsistent heuristics may find optimal solutions,
    so be careful.

    The state is a tuple (pacmanPosition, foodGrid) where foodGrid is a
    `pacai.core.grid.Grid` of either True or False.
    You can call `foodGrid.asList()` to get a list of food coordinates instead.

    If you want access to info like walls, capsules, etc., you can query the problem.
    For example, `problem.walls` gives you a Grid of where the walls are.

    If you want to *store* information to be reused in other calls to the heuristic,
    there is a dictionary called problem.heuristicInfo that you can use.
    For example, if you only want to count the walls once and store that value, try:
    ```
    problem.heuristicInfo['wallCount'] = problem.walls.count()
    ```
    Subsequent calls to this heuristic can access problem.heuristicInfo['wallCount'].
    """
    position, foodGrid = state
    foodList = foodGrid.asList()
    counter = 0
    foodToVisit = []
    x = 0
    if len(foodList) == 0:
        return 0
    while len(foodList) != x:
        foodToVisit.append(x)
        x += 1
    while len(foodToVisit) != 0:
        foodDistances = []
        for food in foodToVisit:
            foodDistances.append(
                (food, distance.manhattan(position, foodList[food])))
        for iterator in range(0, len(foodDistances) - 1):
            for a in range(0, len(foodDistances) - 1):
                temp = foodDistances[a + 1]
                if foodDistances[a][1] > temp[1]:
                    foodDistances[a + 1] = foodDistances[a]
                    foodDistances[a] = temp
        counter += foodDistances[0][1]
        position = foodList[foodDistances[0][0]]
        foodToVisit.remove(foodDistances[0][0])
    return counter - 4
Beispiel #17
0
def cornersHeuristic(state, problem):
    """
    A heuristic for the CornersProblem that you defined.

    This function should always return a number that is a lower bound
    on the shortest path from the state to a goal of the problem;
    i.e. it should be admissible.
    (You need not worry about consistency for this heuristic to receive full credit.)
    """

    # Useful information.
    # corners = problem.corners  # These are the corner coordinates
    # walls = problem.walls  # These are the walls of the maze, as a Grid.

    # position = state[0]
    # cornersToVisit = []
    # cTest = state[1]
    # for x in range(0, 4):
    #     if cTest[x] is False:
    #         cornersToVisit.append(x)
    # counter = 0
    # for a in range (0, len(cornersToVisit)):
    #     counter += distance.manhattan(position, problem.corners[cornersToVisit[a]])
    #     position = problem.corners[cornersToVisit[a]]
    # return counter

    counter = 0
    cornersToVisit = []
    position = state[0]
    cTest = state[1]
    for x in range(0, 4):
        if cTest[x] is False:
            cornersToVisit.append(x)
    while len(cornersToVisit) != 0:
        cornerDistances = []
        for corner in cornersToVisit:
            cornerDistances.append(
                (corner, distance.manhattan(position,
                                            problem.corners[corner])))
        for iterator in range(0, len(cornerDistances) - 1):
            for a in range(0, len(cornerDistances) - 1):
                temp = cornerDistances[a + 1]

                if cornerDistances[a][1] >= temp[1]:
                    cornerDistances[a + 1] = cornerDistances[a]
                    cornerDistances[a] = temp
        counter += cornerDistances[0][1]
        position = problem.corners[cornerDistances[0][0]]
        cornersToVisit.remove(cornerDistances[0][0])
    return counter
Beispiel #18
0
    def evaluationFunction(self, currentGameState, action):
        """
        Design a better evaluation function here.

        The evaluation function takes in the current `pacai.bin.pacman.PacmanGameState`
        and an action, and returns a number, where higher numbers are better.
        Make sure to understand the range of different values before you combine them
        in your evaluation function.
        """

        successorGameState = currentGameState.generatePacmanSuccessor(action)

        # Useful information you can extract.
        newPosition = successorGameState.getPacmanPosition()
        oldFood = currentGameState.getFood()
        newGhostStates = successorGameState.getGhostStates()
        # newScaredTimes = [ghostState.getScaredTimer() for ghostState in newGhostStates]
        score = 0
        closestFoodDistance = -1
        for food in oldFood.asList():
            distanceOfFood = distance.manhattan(food, newPosition)
            if closestFoodDistance == -1 or closestFoodDistance >= distanceOfFood:
                closestFoodDistance = distanceOfFood
        score += 10 / (1 + float(closestFoodDistance))

        if currentGameState.getPacmanPosition() == newPosition:
            score -= 5

        if newPosition in currentGameState.getCapsules():
            score += 100

        for ghostState in newGhostStates:
            distanceOfGhost = distance.manhattan(ghostState.getPosition(),
                                                 newPosition)
            if distanceOfGhost <= 2:
                score -= 100
        return score + successorGameState.getScore()
Beispiel #19
0
    def getDistribution(self, state):
        # Read variables from state.
        ghostState = state.getGhostState(self.index)
        legalActions = state.getLegalActions(self.index)
        pos = state.getGhostPosition(self.index)
        isScared = ghostState.isScared()

        speed = 1
        if (isScared):
            speed = 0.5

        actionVectors = [
            Actions.directionToVector(a, speed) for a in legalActions
        ]
        newPositions = [(pos[0] + a[0], pos[1] + a[1]) for a in actionVectors]
        pacmanPosition = state.getPacmanPosition()

        # Select best actions given the state.
        distancesToPacman = [
            distance.manhattan(pos, pacmanPosition) for pos in newPositions
        ]
        if (isScared):
            bestScore = max(distancesToPacman)
            bestProb = self.prob_scaredFlee
        else:
            bestScore = min(distancesToPacman)
            bestProb = self.prob_attack

        zipActions = zip(legalActions, distancesToPacman)
        bestActions = [
            action for action, distance in zipActions if distance == bestScore
        ]

        # Construct distribution.
        dist = counter.Counter()

        for a in bestActions:
            dist[a] = float(bestProb) / len(bestActions)

        for a in legalActions:
            dist[a] += float(1 - bestProb) / len(legalActions)

        dist.normalize()
        return dist
Beispiel #20
0
def foodHeuristic(state, problem):
    """
    Your heuristic for the FoodSearchProblem goes here.

    This heuristic must be consistent to ensure correctness.
    First, try to come up with an admissible heuristic;
    almost all admissible heuristics will be consistent as well.

    If using A* ever finds a solution that is worse than what uniform cost search finds,
    your heuristic is *not* consistent, and probably not admissible!
    On the other hand, inadmissible or inconsistent heuristics may find optimal solutions,
    so be careful.

    The state is a tuple (pacmanPosition, foodGrid) where foodGrid is a
    `pacai.core.grid.Grid` of either True or False.
    You can call `foodGrid.asList()` to get a list of food coordinates instead.

    If you want access to info like walls, capsules, etc., you can query the problem.
    For example, `problem.walls` gives you a Grid of where the walls are.

    If you want to *store* information to be reused in other calls to the heuristic,
    there is a dictionary called problem.heuristicInfo that you can use.
    For example, if you only want to count the walls once and store that value, try:
    ```
    problem.heuristicInfo['wallCount'] = problem.walls.count()
    ```
    Subsequent calls to this heuristic can access problem.heuristicInfo['wallCount'].
    """
    position, foodGrid = state

    # *** Your Code Here ***
    i = 0
    max = 0
    d = 0
    for x in foodGrid:
        j = 0
        for y in x:
            if y:
                d = distance.manhattan((i, j), position)
                if d > max:
                    max = d
            j += 1
        i += 1
    return max
Beispiel #21
0
    def applyAction(state, action):
        """
        Edits the state to reflect the results of the action.
        """

        legal = PacmanRules.getLegalActions(state)
        if (action not in legal):
            raise ValueError('Illegal pacman action: ' + str(action))

        pacmanState = state.getPacmanState()

        # Update position.
        vector = Actions.directionToVector(action, PacmanRules.PACMAN_SPEED)
        pacmanState.updatePosition(vector)

        # Eat.
        nextPosition = pacmanState.getPosition()
        nearest = nearestPoint(nextPosition)
        if (manhattan(nearest, nextPosition) <= 0.5):
            # Remove food
            PacmanRules.consume(nearest, state)
Beispiel #22
0
    def getDistance(self, pos1, pos2):
        """
        The only function you will need after you create the object.
        """

        if (self._distances is None):
            return manhattan(pos1, pos2)

        if isInt(pos1) and isInt(pos2):
            return self.getDistanceOnGrid(pos1, pos2)

        pos1Grids = getGrids2D(pos1)
        pos2Grids = getGrids2D(pos2)
        bestDistance = DEFAULT_DISTANCE

        for pos1Snap, snap1Distance in pos1Grids:
            for pos2Snap, snap2Distance in pos2Grids:
                gridDistance = self.getDistanceOnGrid(pos1Snap, pos2Snap)
                distance = gridDistance + snap1Distance + snap2Distance
                if bestDistance > distance:
                    bestDistance = distance

        return bestDistance
Beispiel #23
0
def cornersHeuristic(state, problem):
    """
    A heuristic for the CornersProblem that you defined.

    This function should always return a number that is a lower bound
    on the shortest path from the state to a goal of the problem;
    i.e. it should be admissible.
    (You need not worry about consistency for this heuristic to receive full credit.)
    """

    # Useful information.
    # corners = problem.corners  # These are the corner coordinates
    # walls = problem.walls  # These are the walls of the maze, as a Grid.

    # *** Your Code Here ***

    # Search nodes expanded: 928 #
    counter = 0
    position = state[0]
    cTest = state[1]
    cornersToVisit = []
    for x in range(0, 4):
        if cTest[counter] is False:
            cornersToVisit.append(x)

    distances = [0, 0, 0, 0]

    for a in range(0, len(cornersToVisit)):
        for corner in cornersToVisit:
            distances[corner] = distance.manhattan(position, problem.corners[corner])
        min = -1
        for corner in cornersToVisit:
            if min == -1 or distances[min] > distances[corner]:
                min = corner
        cornersToVisit.remove(min)
        counter += distances[min]
    return counter
    def chooseAction(self, state):
        """
        alpha-beta pruning algorithm
        (esentially minimax with additional alpha, beta conditions)
        """

        ghostDists = [
            distance.manhattan(self.getPosition(state), ghost)
            for ghost in self.getOppGhost(state)
        ]

        if len(ghostDists) > 0 and min(ghostDists) > 2:
            legalMoves = state.getLegalActions(self.index)
            if Directions.STOP in legalMoves:
                legalMoves.remove(Directions.STOP)

            # Choose one of the best actions.
            scores = [
                self.offenseEvaluationFunction(state, action)
                for action in legalMoves
            ]
            bestScore = max(scores)
            bestIndices = [
                index for index in range(len(scores))
                if scores[index] == bestScore
            ]
            chosenIndex = random.choice(
                bestIndices)  # Pick randomly among the best.

            return legalMoves[chosenIndex]

        print("alpha beta")

        # passing in alpha = -inf and beta = inf
        value, move = self.maxValue(state, 4, float('-inf'), float('inf'))
        # print("move: ", move + " value: ", value)
        return move
Beispiel #25
0
    def chooseAction(self, state):
        """
        alpha-beta pruning algorithm
        (esentially minimax with additional alpha, beta conditions)
        """
        ghostDists = [
            distance.manhattan(self.getPosition(state), ghost)
            for ghost in self.getOppGhost(state)
        ]

        if len(ghostDists) > 0 and min(ghostDists) > 5:
            legalMoves = state.getLegalActions(self.index)
            if Directions.STOP in legalMoves:
                legalMoves.remove(Directions.STOP)

            # Choose one of the best actions.
            scores = [
                self.offenseEvaluationFunction(state, action)
                for action in legalMoves
            ]
            bestScore = max(scores)
            bestIndices = [
                index for index in range(len(scores))
                if scores[index] == bestScore
            ]
            chosenIndex = random.choice(
                bestIndices)  # Pick randomly among the best.

            return legalMoves[chosenIndex]

        # print("alpha beta")

        # maxDepth = 2
        # numAgents = 4
        # evalFunc = self.ABEvaluationFunction
        #
        # def alphabeta(state, depth, agentIndex, alpha, beta):
        #     legalMoves = state.getLegalActions(agentIndex)
        #     if Directions.STOP in legalMoves:
        #         legalMoves.remove(Directions.STOP)
        #     if depth == maxDepth or len(legalMoves) == 0:
        #         return evalFunc(state), Directions.STOP
        #     print(legalMoves)
        #     v2, move = (None, None)
        #     nextStates = [state.generateSuccessor(agentIndex, move) for move in legalMoves]
        #     if agentIndex == 0:
        #         v = -99999
        #         for nextState, nextAction in zip(nextStates, legalMoves):
        #             v2, _ = alphabeta(nextState, depth, 1, alpha, beta)
        #             if v2 > v:
        #                 v, move = v2, nextAction
        #                 alpha = max(alpha, v)
        #             if v >= beta:
        #                 return v, move
        #         return v, move
        #     elif agentIndex == numAgents - 1:
        #         v = 99999
        #         for nextState in nextStates:
        #             v2, action = alphabeta(nextState, depth + 1, 0, alpha, beta)
        #             if v2 < v:
        #                 v, move = v2, action
        #                 beta = min(beta, v)
        #             if v <= alpha:
        #                 return v, move
        #         return v, move
        #     else:
        #         v = 99999
        #         for nextState in nextStates:
        #             v2, action = alphabeta(nextState, depth, agentIndex + 1, alpha, beta)
        #             if v2 < v:
        #                 v, move = v2, action
        #                 beta = min(beta, v)
        #             if v <= alpha:
        #                 return v, move
        #         return v, move
        #
        # alphabetaVal = alphabeta(state, 0, 0, -99999, 99999)
        # # print(alphabetaVal)
        # print(alphabetaVal)
        # return alphabetaVal[1]
        # passing in alpha = -inf and beta = inf
        value, move = self.maxValue(state, 2, float('-inf'), float('inf'))
        # print("move: ", move + " value: ", value)
        return move
Beispiel #26
0
def betterEvaluationFunction(currentGameState):
    """
    Your extreme ghost-hunting, pellet-nabbing, food-gobbling, unstoppable evaluation function.

    DESCRIPTION: <write something here so we know what you did>
    """
    newPosition = currentGameState.getPacmanPosition()
    oldFood = currentGameState.getFood()
    newGhostStates = currentGameState.getGhostStates()
    newScaredTimes = [ghostState.getScaredTimer() for ghostState in newGhostStates]

    # *** Your Code Here ***
    x = oldFood.getWidth()
    y = oldFood.getHeight()
    maxlen = distance.manhattan((x, y), (0, 0))
    halflen = maxlen
    minfooddist = maxlen
    scareddist = maxlen
    baddist = maxlen
    scaredflag = True
    badflag = True

    for i in range(x):
        for j in range(y):
            m = oldFood[i][j]
            if m:
                d = distance.manhattan(newPosition, (i, j))
                if d < minfooddist:
                    minfooddist = d

    for i in range(len(newScaredTimes)):
        ghost = newGhostStates[i]
        d = distance.manhattan(newPosition, ghost._position)
        if newScaredTimes[i] > 0:
            scaredflag = False
            if d < scareddist:
                scareddist = d
        else:
            badflag = False
            if d < baddist:
                baddist = d

    if baddist == 0:
        return -100
    if scaredflag:
        scareddist = 1
    if badflag:
        baddist = 1
    elif baddist > halflen:
        baddist = 1
    ev = (1 / scareddist) + (-1 / baddist) + (currentGameState.getScore() / 2)
    if minfooddist == 0:
        ev += 10
    else:
        ev += (2 / minfooddist)
    if scaredflag:
        ev -= 1
    if badflag:
        ev += 1
    elif baddist > halflen:
        ev += 1
    return ev
Beispiel #27
0
    def getFurthestCorner(self, pacPos):
        poses = [(1, 1), (1, self.height - 2), (self.width - 2, 1),
                 (self.width - 2, self.height - 2)]

        dist, pos = max([(manhattan(p, pacPos), p) for p in poses])
        return pos
Beispiel #28
0
 def canKill(pacmanPosition, ghostPosition):
     return manhattan(ghostPosition, pacmanPosition) <= COLLISION_TOLERANCE
 def getOtherRespawned(self, gameState):
     newTeammatePos = gameState.getAgentPosition(
         self.getTeammateIndex(gameState))
     return distance.manhattan(newTeammatePos, self.teammatePos) > 1