Example #1
0
    def evaluationFunction(self, currentGameState, action):
        """
        Design a better evaluation function here.

        The evaluation function takes in the current `pacai.bin.pacman.PacmanGameState`
        and an action, and returns a number, where higher numbers are better.
        Make sure to understand the range of different values before you combine them
        in your evaluation function.
        """

        successorGameState = currentGameState.generateSuccessor(
            self.index, action)

        # Useful information you can extract.
        newPosition = successorGameState.getAgentPosition(self.index)
        oldFood = self.getOppFood(currentGameState)
        oldOppPacmanPositions = self.getOppPacman(currentGameState)
        # newScaredTimes = [ghostState.getScaredTimer() for ghostState in newGhostStates]

        if self.onOppositeSide(newPosition, successorGameState):
            return -1000

        if len(oldOppPacmanPositions) == 0:
            return -1 * distance.maze(self.midpoint, newPosition,
                                      successorGameState)

        return -1 * distance.maze(oldOppPacmanPositions[0], newPosition,
                                  successorGameState)
    def defenseEvaluationFunction(self, currentGameState, action):

        successorGameState = currentGameState.generateSuccessor(
            self.index, action)

        # Useful information you can extract.
        newPosition = successorGameState.getAgentPosition(self.index)
        oldFood = self.getOppFood(currentGameState)
        oldOppPacmanPositions = self.getOppPacman(currentGameState)

        if self.onOppositeSide(newPosition, successorGameState):
            return -1000

        if len(oldOppPacmanPositions) == 0:
            otherside = self.getOppGhost(successorGameState)
            pospac = 0
            yax = 0
            for pacman in otherside:
                if pacman[0] > pospac:
                    pospac = pacman[0]
                    yax = pacman[1]

            mid = self.midpoint[0]

            while currentGameState.getWalls()[mid][yax]:
                mid += 1
            position = (mid, yax)
            return -1 * distance.maze(position, newPosition,
                                      successorGameState)

        return -1 * distance.maze(oldOppPacmanPositions[0], newPosition,
                                  successorGameState)
Example #3
0
    def offenseEvaluationFunction(self, currentGameState, action):

        successorGameState = currentGameState.generateSuccessor(
            self.index, action)

        # Useful information you can extract.
        newPosition = successorGameState.getAgentPosition(self.index)
        oldFood = self.getOppFood(currentGameState)
        newGhostStates = self.getOppGhostState(successorGameState)
        newGhostPositions = self.getOppGhost(successorGameState)
        oldOppPacmanPositions = self.getOppPacman(currentGameState)
        score = currentGameState.getScore() * 100  # weighted current score
        closestDistToFood = float('-inf')
        if newPosition in oldOppPacmanPositions:
            return 10000
        if self.onOppositeSide(newPosition, successorGameState):
            score += 100
        for food in oldFood.asList():
            dist = distance.maze(newPosition, food, currentGameState)
            if dist == 0:  # check if the new position is food
                score += 1000
            else:  # add reciprocol manhattan dist to food (b/c close food = more points) * weight
                score += (1 / dist) * 100
        # calculate ghost score
        eat_ghost = 1  # coefficient to switch values if ghost are edible
        for i in range(len(newGhostPositions)):
            dist = distance.manhattan(newGhostPositions[i], newPosition)
            if newGhostStates[i].getScaredTimer(
            ) > 0:  # if ghosts edible, weight=inverse reciprocol
                eat_ghost = -0.1
            if dist <= 1:  # if near ghost run away
                score -= 50 * eat_ghost
            else:  # subtract reciprocol ghost distance (b/c closer ghost = less points) * weight
                score -= (1 / dist) * 50 * eat_ghost
        return score
    def ABEvaluationFunction(self, currentGameState):

        food = self.getOppFood(currentGameState).asList()

        minManhattanFood = None
        minFoodDist = 999999

        for f in food:
            curDist = distance.manhattan(f, self.getPosition(currentGameState))
            if curDist < minFoodDist:
                minFoodDist = curDist
                minManhattanFood = f

        features = {
            "closestDistToFood":
            distance.maze(minManhattanFood, self.getPosition(currentGameState),
                          currentGameState),
            "closestDistToGhost":
            min([
                distance.manhattan(self.getPosition(currentGameState), ghost)
                for ghost in self.getOppGhost(currentGameState)
            ])
        }

        weights = {"closestDistToFood": -1, "closestDistToGhost": 100}

        return sum(
            [features[feat] * weights[feat] for feat in features.keys()])
Example #5
0
    def defenseEvaluationFunction(self, currentGameState, action):

        successorGameState = currentGameState.generateSuccessor(self.index, action)

        # Useful information you can extract.
        newPosition = successorGameState.getAgentPosition(self.index)
        oldFood = self.getOppFood(currentGameState)
        oldOppPacmanPositions = self.getOppPacman(currentGameState)
        # newScaredTimes = [ghostState.getScaredTimer() for ghostState in newGhostStates]

        if self.onOppositeSide(newPosition, successorGameState):
            return -1000

        if len(oldOppPacmanPositions) == 0:
            return -1 * distance.maze(self.midpoint, newPosition, successorGameState)

        return -1 * distance.maze(oldOppPacmanPositions[0], newPosition, successorGameState)
Example #6
0
    def ABEvaluationFunction(self, currentGameState):

        food = self.getOppFood(currentGameState).asList()

        minManhattanFood = None
        minFoodDist = 999999

        for f in food:
            curDist = distance.manhattan(f, self.getPosition(currentGameState))
            if curDist < minFoodDist:
                minFoodDist = curDist
                minManhattanFood = f

        currentPos = self.getPosition(currentGameState)
        numWalls = self.getNumWalls(currentPos, currentGameState)
        # print("Num Walls =", numWalls)
        ateCapsule = self.oppIsScared(currentGameState)
        ghostDistMultiplier = 1
        if ateCapsule:
            # print ("ate capsule")
            ghostDistMultiplier = -2

        closestDistToGhost = 0

        try:
            closestDistToGhost = min([
                distance.manhattan(self.getPosition(currentGameState), ghost)
                for ghost in self.getOppGhost(currentGameState)
            ])
        except:
            pass

        if closestDistToGhost > 10:
            return -10000

        features = {
            "closestDistToFood":
            distance.maze(minManhattanFood, self.getPosition(currentGameState),
                          currentGameState),
            "closestDistToGhost":
            min([
                distance.manhattan(self.getPosition(currentGameState), ghost)
                for ghost in self.getOppGhost(currentGameState)
            ]),
            # "closestDistToGhost": closestDistToGhost,
            "surroundingWalls":
            numWalls
        }

        weights = {
            "closestDistToFood": -50,
            "closestDistToGhost": 100 * ghostDistMultiplier,
            "surroundingWalls": -100
        }

        return sum(
            [features[feat] * weights[feat] for feat in features.keys()])
    def evaluationFunction(self, currentGameState, action):
        """
        Design a better evaluation function here.
        The evaluation function takes in the current `pacai.bin.pacman.PacmanGameState`
        and an action, and returns a number, where higher numbers are better.
        Make sure to understand the range of different values before you combine them
        in your evaluation function.
        """

        successorGameState = currentGameState.generateSuccessor(
            self.index, action)

        # Useful information you can extract.
        newPosition = successorGameState.getAgentPosition(self.index)
        oldFood = self.getOppFood(currentGameState)
        newGhostStates = self.getOppGhostState(successorGameState)
        newGhostPositions = self.getOppGhost(successorGameState)

        score = currentGameState.getScore() * 100  # weighted current score
        closestDistToFood = float('-inf')
        if self.onOppositeSide(newPosition, successorGameState):
            score += 100
        for food in oldFood.asList():
            dist = distance.maze(newPosition, food, currentGameState)
            if dist == 0:  # check if the new position is food
                score += 1000
            else:  # add reciprocol manhattan dist to food (b/c close food = more points) * weight
                score += (1 / dist) * 100
        # walls = currentGameState.getWalls()
        # if(self.onOppositeSide(newPosition, currentGameState)):
        #     for wall in walls.asList():
        #         dist = distance.manhattan(newPosition, wall)
        #         if dist <= 1:  # check if the new position is food
        #             score -= 100
        # calculate ghost score
        eat_ghost = 1  # coefficient to switch values if ghost are edible
        for i in range(len(newGhostPositions)):
            dist = distance.manhattan(newGhostPositions[i], newPosition)
            if newGhostStates[i].getScaredTimer(
            ) > 0:  # if ghosts edible, weight=inverse reciprocol
                eat_ghost = -0.1
            if dist <= 1:  # if near ghost run away
                score -= 50 * eat_ghost
            else:  # subtract reciprocol ghost distance (b/c closer ghost = less points) * weight
                score -= (1 / dist) * 50 * eat_ghost
        if (len(self.posHistory) > 2
                and newPosition == self.posHistory[len(self.posHistory) - 2]):
            score -= 10000
        return score
    def evaluationFunction(self, currentGameState, action):
        """
        Design a better evaluation function here.

        The evaluation function takes in the current `pacai.bin.pacman.PacmanGameState`
        and an action, and returns a number, where higher numbers are better.
        Make sure to understand the range of different values before you combine them
        in your evaluation function.
        """

        successorGameState = currentGameState.generateSuccessor(
            self.index, action)

        # Useful information you can extract.
        newPosition = successorGameState.getAgentPosition(self.index)
        oldFood = self.getOppFood(currentGameState)
        newGhostPositions = self.getOppGhost(successorGameState)
        caps = self.getOppCap(currentGameState)
        #newScaredTimes = [ghostState.getScaredTimer() for ghostState in newGhostStates]

        # print("hey!", len(newGhostPositions))

        # *** Your Code Here ***
        foodList = oldFood.asList()
        #atFood = False
        #atGhost = False
        score = 0

        mincap = float("inf")

        for cap in caps:  # go through all the food
            distf = distance.maze(cap, newPosition, currentGameState)
            mincap = min(distf, mincap)
        if mincap == 0 or mincap == 1:
            score += 2000
        else:
            score += 300 / mincap

    # closestDistToFood = 9999
        for food in foodList:
            if newPosition == food:
                atFood = True
            dist = distance.maze(newPosition, food, currentGameState)
            #   if closestDistToFood > dist:
            #      closestDistToFood = dist
            if dist == 0:  # will be landing on food so very desierable
                score += 1000
            elif dist == 1:  # close to food so very good
                score += 400
            elif dist == 2:
                score += 320
            else:  # all furhter food considered
                score = score + 300 / dist
        for ghostState in newGhostPositions:

            if distance.maze(newPosition, ghostState, currentGameState) <= 2:
                # atGhost = True
                score -= 2000
            else:
                score = score - 1 / distance.maze(newPosition, ghostState,
                                                  currentGameState)

        return score