Exemple #1
0
 def searchTree(state: GameState, depth: int, agent: int):
     actions = state.getLegalActions(agent)
     nextAgent = (agent + 1) % state.getNumAgents()
     if state.isLose() or state.isWin() or len(actions) == 0:
         return [state.getScore(), None]
     elif depth == 0:
         return [self.evaluationFunction(state), None]
     elif agent == 0:
         successors = [
             searchTree(state.generateSuccessor(agent, action), depth,
                        nextAgent)[0] for action in actions
         ]
         maximum = max(successors)
         maxIndex = successors.index(maximum)
         return [maximum, actions[maxIndex]]
     else:
         nextDepth = depth
         if nextAgent == 0:
             nextDepth -= 1
         successors = [
             searchTree(state.generateSuccessor(agent, action),
                        nextDepth, nextAgent)[0] for action in actions
         ]
         expected = sum(successors) * 1.0 / len(successors)
         return [expected, None]
Exemple #2
0
def betterEvaluationFunction(currentGameState: GameState):
    """
    Your extreme ghost-hunting, pellet-nabbing, food-gobbling, unstoppable
    evaluation function (question 5).

    DESCRIPTION: <write something here so we know what you did>
    """
    "*** YOUR CODE HERE ***"
    pos = currentGameState.getPacmanPosition()
    foods = currentGameState.getFood()
    walls = currentGameState.getWalls()
    capsules = currentGameState.getCapsules()
    ghosts = currentGameState.getGhostStates()

    score = currentGameState.getScore()
    foods_cost = _food_cost(foods.asList(), pos, walls)
    capsules_cost = _food_cost(capsules, pos, walls)

    ghosts_dis, s_ghosts_dis = _ghost_cost(ghosts, pos, walls)

    def d(x):
        if x == 0:
            return float('inf')
        return 9 / (x**2)

    ghosts_cost = sum(map(d, ghosts_dis))
    s_ghosts_cost = sum(map(lambda x: x[0], filter(lambda x: x[0] < x[1], s_ghosts_dis)))

    return score - (2 * foods_cost) - capsules_cost - s_ghosts_cost - ghosts_cost
Exemple #3
0
def betterEvaluationFunction(currentGameState: GameState) -> float:
    """
    Your extreme, unstoppable evaluation function (problem 4). Note that you can't fix a seed in this function.
  """

    # BEGIN_YOUR_CODE (our solution is 13 lines of code, but don't worry if you deviate from this)
    def getDistFromPacman(x, y):
        pacmanPos = currentGameState.getPacmanPosition()
        return abs(pacmanPos[0] - x) + abs(pacmanPos[1] - y)

    def exponentiallyWeightedScore(objectives):
        return sum(2**(-1 *
                       getDistFromPacman(objectivePos[0], objectivePos[1]))
                   for objectivePos in objectives)

    def getFoodScore(foodGrid, pacmanPosition):
        foodsPos = [(x, y) for y in range(foodGrid.height)
                    for x in range(foodGrid.width) if foodGrid[x][y] == True]
        return exponentiallyWeightedScore(foodsPos)

    food = 9 * getFoodScore(currentGameState.getFood(),
                            currentGameState.getPacmanPosition())
    ghostStates = currentGameState.getGhostStates()
    scary = sum(ghostState.scaredTimer for ghostState in ghostStates) > 0
    capsule = 100 if scary else 200 * exponentiallyWeightedScore(
        currentGameState.getCapsules())
    scaredGhostsPos = [
        ghostState.getPosition() for ghostState in ghostStates
        if ghostState.scaredTimer > 0
    ]
    ghost = 200 * exponentiallyWeightedScore(scaredGhostsPos)
    score = currentGameState.getScore()
    return food + capsule + ghost + score
Exemple #4
0
def scoreEvaluationFunction(currentGameState: GameState):
    """
      This default evaluation function just returns the score of the state.
      The score is the same one displayed in the Pacman GUI.

      This evaluation function is meant for use with adversarial search agents
      (not reflex agents).
    """
    return currentGameState.getScore()
def betterEvaluationFunction(currentGameState: GameState) -> float:
    """
    Your extreme, unstoppable evaluation function (problem 4). Note that you can't fix a seed in this function.
  """

    # BEGIN_YOUR_CODE (our solution is 13 lines of code, but don't worry if you deviate from this)

    WIN_STATE_SCORE = 100000
    LOSE_STATE_SCORE = -WIN_STATE_SCORE
    NEAR_GHOST_MAX_SCORE = 50
    NEAR_FOOD_MAX_SCORE = 10000

    def get_closest_food_distance(pacman_pos, foods):
        closest_distance = float("inf")
        for food in foods:
            distance = util.manhattanDistance(food, pacman_pos)
            closest_distance = min(closest_distance, distance)
        return closest_distance

    def get_closest_ghost(pacman_pos, ghosts):
        closest_distance = float("inf")
        closest_ghost = None
        for ghost in ghosts:
            distance = util.manhattanDistance(ghost.getPosition(), pacman_pos)
            if distance < closest_distance:
                closest_distance = distance
                closest_ghost = ghost
        return (closest_ghost, closest_distance)

    def get_heuristic_score():
        pacman_position = currentGameState.getPacmanPosition()
        foods = currentGameState.getFood().asList()
        ghosts = currentGameState.getGhostStates()

        if currentGameState.isWin():
            return WIN_STATE_SCORE
        if currentGameState.isLose():
            return LOSE_STATE_SCORE

        if len(ghosts) > 0:
            (closest_ghost, closest_ghost_distance) = get_closest_ghost(
                pacman_position, ghosts)
            if closest_ghost_distance > 0 and closest_ghost_distance < 4:
                close_ghost_score = NEAR_GHOST_MAX_SCORE / closest_ghost_distance
                if closest_ghost.scaredTimer > 0:
                    return close_ghost_score
                return -1 * close_ghost_score

        closest_food_distance = get_closest_food_distance(
            pacman_position, foods)
        near_food_score = NEAR_FOOD_MAX_SCORE / closest_food_distance
        return near_food_score

    return currentGameState.getScore() + get_heuristic_score()
def betterEvaluationFunction(current_game_state: GameState):
    """
    Your extreme ghost-hunting, pellet-nabbing, food-gobbling, unstoppable
    evaluation function (question 5).

    DESCRIPTION: <write something here so we know what you did>
    """
    "*** YOUR CODE HERE ***"

    base_score = current_game_state.getScore()
    food_score = calculate_food_score(current_game_state)
    food_distance_score = calculate_food_distance_score(current_game_state)
    capsule_score = calculate_capsule_score(current_game_state)

    score = base_score + food_score + capsule_score
    return score
Exemple #7
0
def betterEvaluationFunction(currentGameState: GameState) -> float:
    """
      Your extreme, unstoppable evaluation function (problem 4). Note that you can't fix a seed in this function.
    """
    score = currentGameState.getScore()
    numFoods = currentGameState.getNumFood()
    score -= 4.5 * numFoods
    pacPos = currentGameState.getPacmanPosition()
    numCapsules = len(currentGameState.getCapsules())
    ghostStates = [
        manhattanDistance(ghost, pacPos)
        for ghost in currentGameState.getGhostPositions()
    ]
    minGhost = min(ghostStates)
    index = ghostStates.index(minGhost)
    minGhost = 1.0 / minGhost
    if currentGameState.getGhostState(index + 1).scaredTimer > 5:
        minGhost *= -200
    score -= minGhost
    score -= numCapsules * 30
    if numFoods >= 1:
        foods = currentGameState.getFood().data
        num = 0
        arr = []
        for i in range(len(foods)):
            for j in range(len(foods[0])):
                if foods[i][j]:
                    arr.append((i, j))
                    num += 1
                    if num == numFoods:
                        break
            if num == numFoods:
                break
        nearest = min([util.manhattanDistance(pacPos, food) for food in arr])
        if numFoods > 1:
            score += 3.0 * pow(nearest, -1.0)
        else:
            score += 5.0 * pow(nearest, -1.0)
    return score
def betterEvaluationFunction(currentGameState: GameState):
    """
    Your extreme ghost-hunting, pellet-nabbing, food-gobbling, unstoppable
    evaluation function (question 5).

    DESCRIPTION: <write something here so we know what you did>
    """
    "*** YOUR CODE HERE ***"
    # 首先判断当前是否已经结束
    if currentGameState.isWin():
        return float('inf')
    if currentGameState.isLose():
        return -float('inf')
    score = currentGameState.getScore()
    # 考虑food
    foods = currentGameState.getFood().asList()
    foodDis = [
        util.manhattanDistance(currentGameState.getPacmanPosition(), food)
        for food in foods
    ]
    foodDis.sort(reverse=True)
    numFoods = 3
    if currentGameState.getNumFood() < 3:
        numFoods = currentGameState.getNumFood()
    score -= foodDis[0] * 1.5
    # for i in range(numFoods):
    #     score-=(numFoods-i)*foodDis[i]
    # 如果附近有food 最好可以吃到
    score -= (currentGameState.getNumFood() * 4)
    # 考虑ghost
    ghostDis = [
        util.manhattanDistance(currentGameState.getPacmanPosition(), ghost)
        for ghost in currentGameState.getGhostPositions()
    ]
    score += max(3, min(ghostDis)) * 2
    # 考虑capsule
    score -= len(currentGameState.getCapsules()) * 4
    return score
Exemple #9
0
def betterEvaluationFunction(currentGameState: GameState):
    """
    Your extreme ghost-hunting, pellet-nabbing, food-gobbling, unstoppable
    evaluation function (question 5).

    DESCRIPTION: <write something here so we know what you did>
    From the state we pulled pacman's position, ghost positions, food positions and capsule positions.
     We calculated the distance to all the food dots and the ghosts and used food dots as positive
     incentive and ghosts as negative incentive. We used number of capsules left as negative incentive
     to try to get pacman to eat capsules.
    """
    "*** YOUR CODE HERE ***"
    ghostScore : float = 1
    nearGhosts : float = 0
    foodScore : float = 0
    curScore = currentGameState.getScore()

    nearestFood = [(0, 0), float('inf')]
    pacPos = currentGameState.getPacmanPosition()
    foodPoss= currentGameState.getFood().asList()
    capsulePoss = currentGameState.getCapsules()
    ghostPoss = currentGameState.getGhostPositions()

    for foodPos in foodPoss:
        val = manhattanDistance(foodPos, pacPos)
        if val < nearestFood[1]:
            nearestFood[1] = val
            nearestFood[0] = foodPos
    foodScore = nearestFood[1]
    
    for gpos in ghostPoss:
        val = manhattanDistance(pacPos, gpos)
        if val <= 1:
            nearGhosts += (1-val)
        ghostScore += val

    return curScore - (1/ghostScore) + (1/foodScore) - nearGhosts - len(capsulePoss)
Exemple #10
0
 def searchTree(state: GameState, depth: int, agent: int, a, b):
     actions = state.getLegalActions(agent)
     nextAgent = (agent + 1) % state.getNumAgents()
     if state.isLose() or state.isWin() or len(actions) == 0:
         return [state.getScore(), None]
     elif depth == 0:
         return [self.evaluationFunction(state), None]
     elif agent == 0:
         value = float('-inf')
         successors = []
         for action in actions:
             curr = searchTree(state.generateSuccessor(agent, action),
                               depth, nextAgent, a, b)[0]
             successors.append(curr)
             value = max(value, curr)
             a = max(a, value)
             if a >= b:
                 break
         maxIndex = successors.index(value)
         return [value, actions[maxIndex]]
     else:
         nextDepth = depth
         if nextAgent == 0:
             nextDepth -= 1
         value = float('inf')
         successors = []
         for action in actions:
             curr = searchTree(state.generateSuccessor(agent, action),
                               nextDepth, nextAgent, a, b)[0]
             successors.append(curr)
             value = min(value, curr)
             b = min(b, value)
             if a >= b:
                 break
         minIndex = successors.index(value)
         return [value, actions[minIndex]]