Пример #1
0
    def evaluationFunction(self, currentGameState, action):
        """
    Design a better evaluation function here.

    The evaluation function takes in the current and proposed successor
    GameStates (pacman.py) and returns a number, where higher numbers are better.

    The code below extracts some useful information from the state, like the
    remaining food (newFood) and Pacman position after moving (newPos).
    newScaredTimes holds the number of moves that each ghost will remain
    scared because of Pacman having eaten a power pellet.

    Print out these variables to see what you're getting, then combine them
    to create a masterful evaluation function.
    """
        # Useful information you can extract from a GameState (pacman.py)
        successorGameState = currentGameState.generatePacmanSuccessor(action)
        newPos = successorGameState.getPacmanPosition()
        newFood = successorGameState.getFood()
        newGhostStates = successorGameState.getGhostStates()
        newScaredTimes = [
            ghostState.scaredTimer for ghostState in newGhostStates
        ]
        "*** YOUR CODE HERE ***"

        ghostDistances = []
        for index in range(1, len(newGhostStates) + 1):
            ghostPosition = successorGameState.getGhostPosition(index)
            ghostDistances.append(
                mazeDistance(newPos, ghostPosition, successorGameState))
        if min(ghostDistances) <= 2:
            return -min(ghostDistances)
        scared = []
        for scare in newScaredTimes:
            if scare > 0:
                scared.append(scare)
        if len(scared) > 0:
            deliciousGhost = []
            for ghostState in newGhostStates:
                if ghostState.scaredTimer > 0:
                    deliciousGhost.append(
                        mazeDistance(newPos, ghostState.getPosition(),
                                     successorGameState))

            return successorGameState.getScore() + 200 / min(deliciousGhost)
        if successorGameState.getScore() > currentGameState.getScore():
            return successorGameState.getScore()
        foodDistance = []
        for food in newFood.asList():

            dis = mazeDistance(newPos, food, successorGameState)
            foodDistance.append(dis)
            if dis <= 2:
                break
        if len(foodDistance) == 0:
            return successorGameState.getScore()
        return successorGameState.getScore() - min(foodDistance)
Пример #2
0
    def evaluationFunction(self, currentGameState, action):
        """
        Design a better evaluation function here.

        The evaluation function takes in the current and proposed successor
        GameStates (pacman.py) and returns a number, where higher numbers are better.

        The code below extracts some useful information from the state, like the
        remaining food (newFood) and Pacman position after moving (newPos).
        newScaredTimes holds the number of moves that each ghost will remain
        scared because of Pacman having eaten a power pellet.

        Print out these variables to see what you're getting, then combine them
        to create a masterful evaluation function.
        """
        # Useful information you can extract from a GameState (pacman.py)

        successorGameState = currentGameState.generatePacmanSuccessor(action)
        newPos = successorGameState.getPacmanPosition()
        newFood = successorGameState.getFood()
        newGhostStates = successorGameState.getGhostStates()
        newScaredTimes = [
            ghostState.scaredTimer for ghostState in newGhostStates
        ]

        "*** YOUR CODE HERE ***"
        currentFoodPos = currentGameState.getFood().asList()
        from searchAgents import mazeDistance
        newGhostPos = [
            ghostState.getPosition() for ghostState in newGhostStates
        ]
        newGhostDist = [
            mazeDistance((int(newPos[0]), int(newPos[1])),
                         (int(ghostPos[0]), int(ghostPos[1])),
                         successorGameState) for ghostPos in newGhostPos
        ]
        newFoodPos = newFood.asList()
        newFoodDist = [
            mazeDistance((int(newPos[0]), int(newPos[1])),
                         (int(foodPos[0]), int(foodPos[1])),
                         successorGameState) for foodPos in newFoodPos
        ]
        if successorGameState.isWin():
            return float('inf')
        else:
            minFoodDist = min(newFoodDist)
            minGhostDist = min(newGhostDist)
            if minGhostDist <= 1:
                return 0.0
            elif newPos in currentFoodPos:
                return 2.0
            else:
                return 1.0 / minFoodDist  # float
            '''
Пример #3
0
def genAlgSearch(problem):
    """Use a genetic algorithm to create possible paths
    this method will verify the path and make a new generation if it doesn't work
    
    This should replace the run() method in GAsearch.py"""
    print("Genetic Algorithm Search")
    ga = GA(problem)

    best_overall = Chromosome(problem, [Directions.STOP], 500)
    best_generation = 0
    for gen in range(ga.num_generations):  # loop generations
        best = Chromosome(problem, [Directions.STOP],
                          300)  # empty initial best generation chromosome
        ga.build_population(best)
        # print(ga.population)
        for pop in ga.population:
            state, path = build_path(problem, problem.getStartState(), pop)

            # update scores for fitness function
            pop.cost = problem.getCostOfActions(path)
            pop.dist_s = searchAgents.mazeDistance(state,
                                                   problem.getStartState(),
                                                   problem.gameState)
            pop.dist_g = searchAgents.mazeDistance(state, problem.goal,
                                                   problem.gameState)
            pop.score = problem.gameState.getScore()
            pop.size = len(problem.gameState.getWalls()[0])

            # print("Gene:", pop.calculate_fitness(), "\tBest:", best.calculate_fitness(), end = '\t')
            if pop.calculate_fitness() > best.calculate_fitness():
                best = pop.clone()
                # print("Clone:", best.calculate_fitness())
            # optimization
            if not problem.isGoalState(
                    state):  # did not reach goal state, add another move
                pop.chromosome.append(
                    random.choice([
                        Directions.NORTH, Directions.SOUTH, Directions.EAST,
                        Directions.WEST
                    ]))
        if best.calculate_fitness() > best_overall.calculate_fitness():
            best_overall = best
            best_generation = gen
        print("\nGeneration %d of %d, best:" % (gen + 1, ga.num_generations),
              best.chromosome)
        ga.reproduction_loop()
    # _, path = build_path(problem, problem.getStartState(), best)
    # if len(best_overall.chromosome) <= 1:
    #     print("Path result too short. Re-running...")
    #     genAlgSearch(problem)
    print("\nBest solution was found in generation %d out of %d" %
          ((best_generation + 1), ga.num_generations))
    print(best_overall.chromosome)
    return best_overall.chromosome
Пример #4
0
def getMinFoodDist(curPos, currentFood, currentGameState):
    minScore = float('inf')
    for v in currentFood:
        dist = searchAgents.mazeDistance(curPos, v, currentGameState)
        if dist < minScore:
            minScore = dist
    return minScore
Пример #5
0
Файл: search.py Проект: Hulea/AI
def iterativeDeepeningAStar(problem):
    from searchAgents import mazeDistance
    nodAux = problem.getStartState()
    if problem.isGoalState(nodAux):
        return []

    s = util.Stack()
    depth = 0

    while True:

        noduriVizitate = []
        s.push((problem.getStartState(), [], 0))
        nodAux, directii, prevCost = s.pop()
        noduriVizitate.append(nodAux)

        while not problem.isGoalState(nodAux):
            for urmatorul, directie, cost in problem.getSuccessors(nodAux):
                if urmatorul not in noduriVizitate:
                    if prevCost + cost <= depth:
                        dirNoua = directii + [directie]
                        costNou = prevCost + cost
                        s.push((urmatorul, dirNoua, costNou))
                        noduriVizitate.append(urmatorul)
            if s.isEmpty():
                break
            else:
                nodAux, directii, prevCost = s.pop()

        if problem.isGoalState(nodAux):
            return directii
        else:
            depth = depth + mazeDistance(nodAux, problem.goal, problem.state)
Пример #6
0
def betterEvaluationFunction(currentGameState):
    """
    Your extreme ghost-hunting, pellet-nabbing, food-gobbling, unstoppable
    evaluation function (question 5).

    DESCRIPTION: <write something here so we know what you did>
    """
    "*** YOUR CODE HERE ***"
    " number of foods"
    numFoods = currentGameState.getFood().count()
    " closet food "
    searchAgent = searchAgents.ClosestDotSearchAgent('breadthFirstSearch')
    actions = searchAgent.findPathToClosestDot(currentGameState) # Find a path
    distanceToFood = 0
    if actions: distanceToFood = len(actions)
    " all ghosts"
    pacmanPos = currentGameState.getPacmanPosition()
    ghostPos = currentGameState.getGhostPositions()    
    minGhostDistance = 0
    for pos in ghostPos:
        pos = (int(pos[0]), int(pos[1]))
        ghostDistance = searchAgents.mazeDistance(pacmanPos, pos, currentGameState)
        if ghostDistance < minGhostDistance: minGhostDistance = ghostDistances
    " scared time"
    ghostStates = currentGameState.getGhostStates()
    newScaredTimes = [ghostState.scaredTimer for ghostState in ghostStates]
    totalScaredTime = 0
    for time in newScaredTimes: 
        totalScaredTime += time
    #if totalScaredTime > 0: minGhostDistance = -minGhostDistance
    return currentGameState.getScore() -numFoods*2 + 1/(distanceToFood+1) -2/(minGhostDistance+1) +totalScaredTime
Пример #7
0
def betterEvaluationFunction(currentGameState):
    """
      Your extreme ghost-hunting, pellet-nabbing, food-gobbling, unstoppable
      evaluation function (question 5).

      DESCRIPTION: There are four components contributing to the evaulation result.
      current score, left food and position, ghost position, capsule postion

    """
    "*** YOUR CODE HERE ***"
    newPos = currentGameState.getPacmanPosition()
    newFood = currentGameState.getFood()
    newGhostStates = currentGameState.getGhostStates()
    newScaredTimes = [ghostState.scaredTimer for ghostState in newGhostStates]


    from search import bfs
    from searchAgents import PositionSearchProblem, mazeDistance
    "*** YOUR CODE HERE ***"


    nearestGhostDistance = 1000
    for ghostState in newGhostStates:
        if ghostState.scaredTimer == 0:
            nearestGhostDistance = min(nearestGhostDistance, mazeDistance(ghostState.configuration.pos, newPos, currentGameState))
    nearestFoodDistance = 1000
    averageDistance = 0.0

    for foodPos in newFood.asList():
        size = len(newFood.asList())
        if size < 3:
            distance = mazeDistance(foodPos, newPos, currentGameState)
        else:
            distance = manhattanDistance(foodPos, newPos)
        averageDistance += distance
        nearestFoodDistance = min(nearestFoodDistance, distance)
    averageDistance = averageDistance / float(len(newFood.asList()) + 0.1)
    score_factor = currentGameState.getScore()
    food_factor = -(0.9 * nearestFoodDistance + 0.1 * averageDistance) / 2.0 - len(newFood.asList())
    ghost_factor =  -5.0 / (nearestGhostDistance + 1.0)
    capsule_factor = - 50.0 * len(currentGameState.getCapsules()) + 0.1* sum(newScaredTimes)

    return score_factor + food_factor + ghost_factor + capsule_factor
Пример #8
0
 def cumGhostsDistance(position, ghostPositions, state):
   ghostStates = state.getGhostStates()
   scaredTimes = [ghostState.scaredTimer for ghostState in ghostStates]
   total = 0
   for i in range(len(scaredTimes)):
     d = math.sqrt(mazeDistance(position, util.nearestPoint(state.getGhostPosition(i+1)), state))
     if scaredTimes[i] > 0:
       d = d * math.sqrt(scaredTimes[i])
     else:
       d *= -1 
     total += int(d)
   return total
Пример #9
0
def betterEvaluationFunction(currentGameState):
  """
    Your extreme ghost-hunting, pellet-nabbing, food-gobbling, unstoppable
    evaluation function (question 5).

    DESCRIPTION: <write something here so we know what you did>
  """
  "*** YOUR CODE HERE ***"
  pac_position = currentGameState.getPacmanPosition()
  food_list = currentGameState.getFood().asList()
  ghost_positions = currentGameState.getGhostPositions()

  scared_times = [ghost_state.scaredTimer for ghost_state in currentGameState.getGhostStates()]

  bonus = 0
  for i in range(0, len(ghost_positions)):
        gp_int = tuple([int(ghost_positions[i][0]), int(ghost_positions[i][1])])
        dist = mazeDistance(pac_position, gp_int, currentGameState)
        if scared_times[i] > 1:
                bonus += scared_times[i] * 10
                continue
        if dist < 2:
                return -1 * sys.maxint - 1

  if len(food_list) == 0:
        return sys.maxint

  dist = sys.maxint
  if len(food_list) > 0:
        dist = mazeDistance(pac_position, food_list[0], currentGameState)

 
  score = (scoreEvaluationFunction(currentGameState) +
                2000/(len(food_list) + 1) -
                dist) + bonus
                
 
  return score
Пример #10
0
def betterEvaluationFunction(currentGameState):
    """
    Your extreme ghost-hunting, pellet-nabbing, food-gobbling, unstoppable
    evaluation function (question 5).

    DESCRIPTION: <write something here so we know what you did>
    things that are important: possible moves(dead end), ghost(distance, scared), food
    Basically, we want to eat the ghosts whenever we can. Since if ghost is not too far, 
    we get huge bonus to score, pacman would try to eat the big dot. if that is not avaliable,
    we check if we have won, if not, we try to eat the closest food. if the distance of that
    is too much(bigger than the depth) we try to go to the first food remaining. Since we use a
    sigmoid function returning a bonus to score of 1 to 9, which is less or equal to a food's worth,
    pacman will try to eat closer food.
  """
    "*** YOUR CODE HERE ***"
    pos = currentGameState.getPacmanPosition()
    newFood = currentGameState.getFood()
    ghostStates = currentGameState.getGhostStates()
    scaredTimes = [ghostState.scaredTimer for ghostState in ghostStates]
    scared = []
    for scare in scaredTimes:
        if scare > 0:
            scared.append(scare)
    if len(scared) > 0:
        deliciousGhost = []
        for ghostState in ghostStates:
            if ghostState.scaredTimer > 0:
                deliciousGhost.append(
                    mazeDistance(pos, ghostState.getPosition(),
                                 currentGameState))
        return currentGameState.getScore() + 200 / min(deliciousGhost)
    foods = newFood.asList()
    if len(foods) < 1:
        return currentGameState.getScore() + 10
    import math
    sig = 9 / (1 + math.e**(mazeDistance(pos, foods[0], currentGameState)))
    return currentGameState.getScore() + sig
Пример #11
0
 def distCloseFood(position, foodGrid) :
     gridInfo = foodGrid.packBits()
     
     value = None
     for i in range(gridInfo[0]) :
         for j in range(gridInfo[1]) :
             if foodGrid[i][j] == True :
                 dist = searchAgents.mazeDistance(position, (i,j), successorGameState)
                 #dist = ( ( abs(position[0] - i) + abs(position[1] - j) ), (i,j) )
                 if value == None :
                     value = dist
                 if dist < value :
                     value = dist
     if value == None : 
         value = (0, position)
     return value
Пример #12
0
        def distCloseFood(position, foodGrid):
            gridInfo = foodGrid.packBits()

            value = None
            for i in range(gridInfo[0]):
                for j in range(gridInfo[1]):
                    if foodGrid[i][j] == True:
                        dist = searchAgents.mazeDistance(
                            position, (i, j), successorGameState)
                        #dist = ( ( abs(position[0] - i) + abs(position[1] - j) ), (i,j) )
                        if value == None:
                            value = dist
                        if dist < value:
                            value = dist
            if value == None:
                value = (0, position)
            return value
Пример #13
0
    def evalState(state):
      position = state.getPacmanPosition()
      capsules = state.getCapsules()
      foodGrid = state.getFood()
      foodState = position, foodGrid, tuple(capsules)
      closestFood = None
      if foodState in closestFoodCache:
        closestFood = closestFoodCache[foodState]
      else:
        nodes = util.Queue()
        node = position, 0
        nodes.push(node)
        closestFood = findClosestFood(nodes, foodGrid, capsules, state.getWalls())
        closestFoodCache[foodState] = closestFood
      
      ghostState = position, tuple(ghostPositions)
      closestGhost = None
      if ghostState in evalCache:
        closestGhost = evalCache[ghostState]
      else:
        closestGhost = min(mazeDistance(position, util.nearestPoint(ghostPosition), state) for ghostPosition in ghostPositions)
        evalCache[ghostState] = closestGhost

      foodLeft = state.getNumFood()         
      score = state.getScore()
      #scared = newScaredTimes[0] > 0
      #cgd = cumGhostsDistance(position, ghostPositions, state)
      #numFoodEaten = startNumFood - foodLeft

      # a = 0
      # if score < 0:
      #   a = -1 * math.sqrt(abs(score))
      # elif score > 0:
      #   a = math.sqrt(score)
      a = score
      b = -1 * closestFood
      c = -1 * foodLeft**2
      #d = 0.5 * (( closestGhost**(1.0/4)) + ( 10 * scared * closestGhost))
      d = -1 * closestGhost
      #e = cgd
      f = 100 * len(capsules) 
      #print 'newScaredTimes', newScaredTimes
      #print 'score, closestFood, cgd', score, b, e
      result = a + b + d + f
      return result
Пример #14
0
    def evaluationFunction(self, currentGameState, action):
        """
        Design a better evaluation function here.

        The evaluation function takes in the current and proposed successor
        GameStates (pacman.py) and returns a number, where higher numbers are better.

        The code below extracts some useful information from the state, like the
        remaining food (newFood) and Pacman position after moving (newPos).
        newScaredTimes holds the number of moves that each ghost will remain
        scared because of Pacman having eaten a power pellet.

        Print out these variables to see what you're getting, then combine them
        to create a masterful evaluation function.
        """

        # Useful information you can extract from a GameState (pacman.py)
        successorGameState = currentGameState.generatePacmanSuccessor(action)
        newPos = successorGameState.getPacmanPosition()
        newFood = successorGameState.getFood()
        newGhostStates = successorGameState.getGhostStates()
        newScaredTimes = [ghostState.scaredTimer for ghostState in newGhostStates]

        from search import bfs
        from searchAgents import PositionSearchProblem, mazeDistance
        "*** YOUR CODE HERE ***"

        nearestGhostDistance = 1000
        for ghostState in newGhostStates:
            if ghostState.scaredTimer == 0:
                nearestGhostDistance = min(nearestGhostDistance, mazeDistance(ghostState.configuration.pos, newPos, successorGameState))

        nearestFoodDistance = 1000
        for foodPos in newFood.asList():
            #nearestFoodDistance = min(nearestFoodDistance, mazeDistance(foodPos, newPos, successorGameState))
            nearestFoodDistance = min(nearestFoodDistance, manhattanDistance(foodPos, newPos))
        return successorGameState.getScore() - 5.0 / (nearestGhostDistance + 1.0) - nearestFoodDistance / 2.0
Пример #15
0
def betterEvaluationFunction(currentGameState):
    """
      Your extreme ghost-hunting, pellet-nabbing, food-gobbling, unstoppable
      evaluation function (question 5).

      DESCRIPTION: <write something here so we know what you did>
    """
    "*** YOUR CODE HERE ***"
    
    x,y = currentGameState.getPacmanPosition() #pacman position
    food_list = currentGameState.getFood().asList() #food not eaten yet
    cap_list = currentGameState.getCapsules() #pellet not eaten yet
    ghost_states = currentGameState.getGhostStates() #ghost states
    lower_bd = currentGameState.getFood().width + currentGameState.getFood().height
    dis_f = [manhattanDistance((x,y),food) for food in food_list]
    dis_cap = [mazeDistance((x,y),cap,currentGameState) for cap in cap_list]
    dis_ghost_can_eat = []
    dis_ghost_escape = []
    eva = 0
    
    for ghost in ghost_states:
          distance_g = manhattanDistance((x,y),ghost.getPosition())
          if ghost.scaredTimer > 2 :
                dis_ghost_can_eat.append(distance_g)
          else:
                if distance_g < 4 :
                  dis_ghost_escape.append(distance_g)
    if dis_f:
        eva = eva + (lower_bd - min(dis_f))
    if dis_cap:
        eva = eva + 3*(lower_bd - min(dis_cap))
    if dis_ghost_can_eat:
        eva = eva + 2*(lower_bd - min(dis_ghost_can_eat))
    if dis_ghost_escape:
        eva = eva + 4*min(dis_ghost_escape) + (sum(dis_ghost_escape)/len(dis_ghost_escape))/2
    return eva + currentGameState.getScore()
Пример #16
0
def betterEvaluationFunction(currentGameState):
    """
      Your extreme ghost-hunting, pellet-nabbing, food-gobbling, unstoppable
      evaluation function (question 5).

      DESCRIPTION: <write something here so we know what you did>
    """
    "*** YOUR CODE HERE ***"

    # Useful information you can extract from a GameState (pacman.py)
    # tuple (x,y)
    pacmanPos = currentGameState.getPacmanPosition()
    # list of list with true/false, true where food exists
    newFood = currentGameState.getFood()
    # list of each ghost states, use .getPosition
    newGhostStates = currentGameState.getGhostStates()
    newScaredTimes = [ghostState.scaredTimer for ghostState in newGhostStates]

    "*** YOUR CODE HERE ***"
    newFoodCount = currentGameState.getNumFood()
    if newFoodCount == 0:
        newFoodCount = -1

    foodDis = 0
    foodMinList = []
    for food in newFood.asList():
        #dis = searchAgents.mazeDistance(food, pacmanPos, currentGameState)
        dis = manhattanDistance(food, pacmanPos)
        foodDis += dis
        foodMinList.append(dis)
    if len(foodMinList) == 0:
        foodMin = 1
    else:
        foodMin = min(foodMinList)
    if foodDis == 0:
        foodDis = 1

    ghostDis = 0
    ghostMinList = []
    for ghost in currentGameState.getGhostPositions():
        dis = manhattanDistance(ghost, pacmanPos)
        ghostDis += dis
        ghostMinList.append(dis)
    if len(foodMinList) == 0 or not ghostMinList:
        ghostMin = 1
    else:
        ghostMin = min(ghostMinList)
    if ghostDis == 0:
        ghostDis = 1
    if ghostMin == 0:
        ghostMin = 1

    minCapsuleDis = 0
    sumCapsuleDis = 0
    numCapsules = len(currentGameState.getCapsules())
    for capsule in currentGameState.getCapsules():
        sumCapsuleDis += searchAgents.mazeDistance(capsule, pacmanPos,
                                                   currentGameState)

    if minCapsuleDis == 0 or numCapsules == 0:
        minCapsuleDis = 1
        sumCapsuleDis = 1
    else:
        minCapsuleDis = sumCapsuleDis / numCapsules

    # pacman ate a capsule and can now eat ghosts
    if sum(newScaredTimes) > 1:
        score = 1 / foodMin - newFoodCount + 1 / ghostDis
    # pacman is vulnerable to ghosts
    else:
        # original
        # score = 1/foodMin - newFoodCount - 2/ghostMin - 1/ghostDis

        #score = 1/foodMin + 100/newFoodCount - 2/ghostMin - 1/ghostDis + 1/sumCapsuleDis + 100/numCapsules

        # the closer the ghost, want capsule more
        # print(numCapsules)
        # score = 1/foodMin + 3/newFoodCount + 1/minCapsuleDis - 2/ghostMin - 1/ghostDis
        score = 1 / foodMin - newFoodCount - 2 / ghostMin - 1 / ghostDis
    return score
def betterEvaluationFunction(currentGameState):
    """
     Your extreme ghost-hunting, pellet-nabbing, food-gobbling, unstoppable
      evaluation function (question 5).

      DESCRIPTION: <write something here so we know what you did>
      Athlon, 2/2, 2016
      feathures to evaluate gameState: 
      Eat food,  distance to ghost, pacman distance to food,distance to power pollet, eat power pollet. 
    """
    "*** YOUR CODE HERE ***"

    #Idea:3 factors to decide Agent's action, 1) newPos food info 2) newPos distance to food 3) newPos distance to Ghost
    # distance to food should have highest weight, live alive is most important !

    #score = (pacman2Ghost position) / (pacman postion to nearlist food distance).
    #pacman2Ghost position: for several ghots distance = (min value)*5 + average value
    #while newscareTimes > 0, use constant to replace (pacman2Ghost position) , this factor is ignored for all actions
    #eat power pollet, newScaredTimes set to [40,40], -for each setp pacman move, -1.
    #Foods = successorGameState.getFood().asList()

    #successorGameState = currentGameState.generatePacmanSuccessor(action)
    #     newPos = successorGameState.getPacmanPosition()
    #     newFood = successorGameState.getFood()
    #     newGhostStates = successorGameState.getGhostStates()
    #     newScaredTimes = [ghostState.scaredTimer for ghostState in newGhostStates]
    import searchAgents
    Pacman_Position = currentGameState.getPacmanPosition()
    #print "Pacman position", Pacman_Position
    Food = currentGameState.getFood().asList()
    #Total_Food = currentGameState.getNumFood
    Ghost_Position = currentGameState.getGhostPositions()
    #print " Ghost postion", Ghost_Position
    capsules_Position = currentGameState.getCapsules()
    GhostStates = currentGameState.getGhostStates()
    ScaredTimes = [ghostState.scaredTimer for ghostState in GhostStates]
    #define weight parametes here
    Weight_Pac2Ghost = 6.0
    Weight_Pac2Food = 1.0
    Weight_Pac2Cap = 5.0
    Weight_eat_food = 100.0
    Weight_NumFood = 5000.0
    neighbor_food_counter = 0

    #get the distance to closet ghost, ignore the others
    #print "ghost postion,",Ghost_Position

    #convert float number position to integer position
    def convert_to_integer(position):
        x, y = position
        x = int(x)
        y = int(y)
        return (x, y)

    distance = float("inf")
    for GP in Ghost_Position:
        #print "GP before convert", GP
        #Turn float to int number
        GP_new = convert_to_integer(GP)
        #print " GP after convert", GP_new
        Pac2Ghost = searchAgents.mazeDistance(Pacman_Position, GP_new,
                                              currentGameState)
        Pac2Ghost = min(distance, Pac2Ghost)
        #Pac2Ghost = Pac2Ghost*Weight_Pac2Ghost
        #Pac2Ghost = min(abs(newPos[0]-GP[0]) + abs(newPos[1]-GP[1]) for GP in GhostPosition)
    if Pac2Ghost > 2:
        Weight_Pac2Ghost = 1000  #ignore this factor when ghost is faraway, set to a constast big number
        Pac2Ghost = 10
    else:  # < =  2, which close enough, set to a small score to avoid this.
        Weight_Pac2Ghost = 1
    #get pacman distance to power pollet
    if len(capsules_Position) > 0:
        Pac2Cap = min(
            searchAgents.mazeDistance(Pacman_Position, cap, currentGameState)
            for cap in capsules_Position)
    else:  # factor will be multiply, set to 1.0 to ignore this factor
        Pac2Cap = 1.0

    #Get pacman distance to closet food
    if len(Food) > 0:
        Pac2Food = min(
            searchAgents.mazeDistance(Pacman_Position, Fd, currentGameState)
            for Fd in Food)
        #a lot of case it will be 1, same value as no food, so add weight for food distance
        #Pac2Food = Weight_Pac2Food/Pac2Food
    else:  # No food to eat :)
        # nothing to eat, set it to a constant value and this number need to be small or last food will not eat
        Pac2Food = 0.0

    # if eat power pollet, ingore pacman distace to Ghost
    if ScaredTimes[0] > 0:
        Pac2Ghost = 0.0  # eat pollet, ignore Pac2Ghost distance, set a constant value here

    # if Pacman position has food, *10, else *1
    if currentGameState.data._foodEaten == Pacman_Position:
        #print " current pos has food", Pacman_Position
        eat_food = 1.0
    else:
        eat_food = 0.0

    #fix the bug of Pacman stop moving: Pacman evaluate gamestate only after depth n step, it may stop without
    #eat food with same score of depth n step.  To fix this, get neighbor food count of n step Pacman postion.
    #get the state walls
#     from game import Actions
#     walls = currentGameState.data.layout.walls
#     neighbors = Actions.getLegalNeighbors(Pacman_Position,walls)
#     for neighbor in neighbors:
#         x,y = neighbor
#         print "Pacman postion, neighbor", Pacman_Position, neighbor
#         if currentGameState.hasFood(x,y) == True:
#             neighbor_food_counter += 1
#     print "current postion, food_counter",Pacman_Position, neighbor_food_counter

#use food counter as factor to determin game state score
    NumFood = currentGameState.getNumFood()
    #NumFood add 1 to avoid divided by zero case when food is empty
    #when pacman to Ghost distance is 1, this has biggest weight of 6000
    #Pac2man to Ghost is a number between <6, when it close to 1, becomes 6000.
    Score = Weight_NumFood * (1.0 / (NumFood + 1)) + Weight_Pac2Food / (
        Pac2Food + 1) + Weight_Pac2Ghost * abs(2.001 - Pac2Ghost)
    #Score = Weight_NumFood*(1.0/NumFood) + Weight_Pac2Food/Pac2Food
    #Score =  Weight_eat_food*neighbor_food_counter
    #Score =  Weight_eat_food*eat_food + Weight_Pac2Ghost*Pac2Ghost +  Weight_Pac2Food/Pac2Food +  Weight_Pac2Cap/Pac2Cap
    #print "score:", Score
    return Score

    util.raiseNotDefined()
Пример #18
0
    def evaluationFunction(self, currentGameState, action):
        """
        Design a better evaluation function here.

        The evaluation function takes in the current and proposed successor
        GameStates (pacman.py) and returns a number, where higher numbers are better.

        The code below extracts some useful information from the state, like the
        remaining food (newFood) and Pacman position after moving (newPos).
        newScaredTimes holds the number of moves that each ghost will remain
        scared because of Pacman having eaten a power pellet.

        Print out these variables to see what you're getting, then combine them
        to create a masterful evaluation function.
        """
        # Useful information you can extract from a GameState (pacman.py)
        successorGameState = currentGameState.generatePacmanSuccessor(action)
        newPos = successorGameState.getPacmanPosition()
        newFood = successorGameState.getFood()
        newGhostStates = successorGameState.getGhostStates()
        ghostsPositions = successorGameState.getGhostPositions()
        newScaredTimes = [ghostState.scaredTimer for ghostState in newGhostStates]
        walls = successorGameState.getWalls();
        capsules = successorGameState.getCapsules();
        maxEval = 10000

        #1! Calculate pellets points to substract from max Eval 100 importance
        foodPath = newFood.asList()
        foodPoints = len(foodPath) * 100

        #2! Calculate distance from food 1 importance 5 if there are walls
        minDistance = 0
        nearestFood = None
        for f in foodPath:
            d = util.manhattanDistance(newPos,f)
            if f[0] == newPos[0]:
                for i in range(newPos[1],f[1]):
                     if walls[newPos[0]][i] == True:
                        d = d+4
            if f[1] == newPos[1]:
                for i in  range(newPos[0],f[0]):
                    if walls[i][newPos[1]] == True:
                        d = d+4
            if minDistance == 0 or d < minDistance:
                minDistance = d
                nearestFood = f
        
        #override the manhatan distance with Maze distance
        import searchAgents
        if len(foodPath) > 0:
            minDistance = searchAgents.mazeDistance(newPos,nearestFood,successorGameState)

        import math
        #Calculate ghots (convert to negative) importance 9000
        
        gPoints = 0
        i=0
        for g in ghostsPositions:
            #Using BFS only when problems of near ghosts
            gpoint = 0
            securityDistance = 3
            multiplier = 1
            if newScaredTimes[i] != 0:
                securityDistance = securityDistance * 3
                multiplier = (-1/3)
            distance = util.manhattanDistance(newPos,g)
            if distance < securityDistance:
                gNew = (int(g[0]),int(g[1]))
                distance = searchAgents.mazeDistance(newPos,gNew,successorGameState)
                if distance <= securityDistance:
                    if distance == 0:
                        distance = 0.5
                    gpoint = 3000 * (securityDistance/distance)
                    gpoint = gpoint * multiplier
            gPoints = gPoints + gpoint
            i=i+1
        ret = maxEval -gPoints -foodPoints -minDistance
        print "Position:{0}, Ghost Point:{1}, FoodPoint:{2}, Distance points:{3}, Total:{4}".format(newPos,gPoints,foodPoints,minDistance,ret)
        return ret
Пример #19
0
def betterEvaluationFunction(currentGameState):
    """
      Your extreme ghost-hunting, pellet-nabbing, food-gobbling, unstoppable
      evaluation function (question 5).

      DESCRIPTION: <write something here so we know what you did>
    """
     # Useful information you can extract from a GameState (pacman.py)
    newPos = currentGameState.getPacmanPosition()
    newFood = currentGameState.getFood()
    newGhostStates = currentGameState.getGhostStates()
    ghostsPositions = currentGameState.getGhostPositions()
    newScaredTimes = [ghostState.scaredTimer for ghostState in newGhostStates]
    walls = currentGameState.getWalls();
    capsules = currentGameState.getCapsules();
    maxEval = 10000

    #1! Calculate pellets points to substract from max Eval 100 importance
    foodPath = newFood.asList()
    foodPoints = len(foodPath) * 100

    #2! Calculate distance from food 1 importance 5 if there are walls
    minDistance = 0
    nearestFood = None
    for f in foodPath:
        d = util.manhattanDistance(newPos,f)
        if f[0] == newPos[0]:
            for i in range(newPos[1],f[1]):
                    if walls[newPos[0]][i] == True:
                        d = d+4
        if f[1] == newPos[1]:
            for i in  range(newPos[0],f[0]):
                if walls[i][newPos[1]] == True:
                    d = d+4
        if minDistance == 0 or d < minDistance:
            minDistance = d
            nearestFood = f
        
    #override the manhatan distance with Maze distance
    import searchAgents
    if len(foodPath) > 0:
        minDistance = searchAgents.mazeDistance(newPos,nearestFood,currentGameState)

    import math
    #Calculate ghots (convert to negative) importance 9000
        
    gPoints = 0
    i=0
    for g in ghostsPositions:
        #Using BFS only when problems of near ghosts
        gpoint = 0
        securityDistance = 3
        multiplier = 1
        if newScaredTimes[i] != 0:
            securityDistance = securityDistance * 3
            multiplier = (-1/3)
        distance = util.manhattanDistance(newPos,g)
        if distance < securityDistance:
            gNew = (int(g[0]),int(g[1]))
            distance = searchAgents.mazeDistance(newPos,gNew,currentGameState)
            if distance <= securityDistance:
                if distance == 0:
                    distance = 0.5
                gpoint = 3000 * (securityDistance/distance)
                gpoint = gpoint * multiplier
        gPoints = gPoints + gpoint
        i=i+1
    ret = maxEval -gPoints -foodPoints -minDistance
    print "Position:{0}, Ghost Point:{1}, FoodPoint:{2}, Distance points:{3}, Total:{4}".format(newPos,gPoints,foodPoints,minDistance,ret)
    return ret
Пример #20
0
def enhancedPacmanFeatures(state, action):

    ###################################################################
    #                            FEATURES                               #
    #   STOP: 50 si la accion es STOP. 0 Si es otra cosa.               #
    #   NEAREST_GHOST: MD al fantasma mas Cercano                       #
    #   NEAREST_CAPSULE: MD a la capsula mas cercana                    #
    #   FOOD: Lista de MDs a las 5 comidas mas cercanas                 #
    #   CAPSULE COUNT: # de Capsulas que existen                        #
    #   SCORE: Score actual                                             #
    #   ASUSTADITOS: # de Fantasmas asustados                           #
    ###################################################################

    features = util.Counter()
    # Feature de STOP
    features["STOP"] = int(action == Directions.STOP)

    # Generar arreglos y listas de los atributos del estado
    successor = state.generateSuccessor(0, action)
    pac_pos = successor.getPacmanPosition()
    ghosts = successor.getGhostPositions()
    ghostStates = successor.getGhostStates()
    # Fantasma Arriba
    features["win"] = int(successor.isWin())
    features["lose"] = int(successor.isLose()) * (-10)

    x, y = pac_pos
    meanghosts = []
    for ghostState in ghostStates:
        if ghostState.scaredTimer == 0:
            meanghosts.append(ghostState.getPosition())

    if (x, y + 1) in meanghosts:
        features["UP"] = 0
    else:
        features["UP"] = 1

    if (x, y - 1) in meanghosts:
        features["DOWN"] = 0
    else:
        features["DOWN"] = 1

    if (x + 1, y) in meanghosts:
        features["RIGHT"] = 0
    else:
        features["RIGHT"] = 1

    if (x - 1, y) in meanghosts:
        features["LEFT"] = 0
    else:
        features["LEFT"] = 1

    capsules = successor.getCapsules()
    state_food = state.getFood()
    food = [(x, y) for x, row in enumerate(state_food)
            for y, food in enumerate(row) if food]
    nearest_ghosts = sorted(
        [util.manhattanDistance(pac_pos, i) for i in ghosts])

    # Feature de Fantasmita Mas Cercano
    features["nearest_ghost"] = nearest_ghosts[0]

    # Feature de Pildora mas cercana
    nearest_caps = sorted(
        [util.manhattanDistance(pac_pos, i) for i in capsules])
    if nearest_caps:
        features["nearest_capsule"] = nearest_caps[0]
    else:
        features["nearest_capsule"] = 0

    # Feature de MD a las 5 comidas mas cercanas
    nearest_food = sorted([(util.manhattanDistance(pac_pos, i), i)
                           for i in food])
    nearest_food = nearest_food[:5]
    for i in xrange(min(len(nearest_food), 5)):
        nearest_food[i] = searchAgents.mazeDistance(pac_pos,
                                                    nearest_food[i][1], state)

    for i, weight in zip(xrange(min(len(nearest_food), 5)),
                         [1.1, 1.2, 1.3, 1.4, 1.5]):
        features[("food", i)] = weight * nearest_food[i]

    # Feature de cantidad de capsulas
    if len(capsules) != 0:
        features["capsule count"] = 1 / float(len(capsules))
    else:
        features["capsule count"] = 1

    # Feature de Score
    features["score"] = float(state.getScore()) / 2000

    # Feature de cantidad de Fantasmitas Asustaditos
    ghostStates = successor.getGhostStates()
    numOfScaredGhosts = 0
    for ghostState in ghostStates:
        if ghostState.scaredTimer > 0:
            numOfScaredGhosts += 1
    if numOfScaredGhosts != 0:
        features["asustaditos"] = 1 / float(numOfScaredGhosts)
    else:
        features["asustaditos"] = 1

    return features
Пример #21
0
 def closestCapsule(position, capsules, state):
   result = min(mazeDistance(position, capsule, state) for capsule in capsules)
   return result
Пример #22
0
def ecEvaluationFunction(currentGameState):
    """
      Your extreme ghost-hunting, pellet-nabbing, food-gobbling, unstoppable
      evaluation function (question 5).

      DESCRIPTION: Our evaluation function begins with the current game score; this helps maintain rewards for going fast and eating ghosts,
      so it is the starting point from which the evaluation function adds and subtracts. By weighting it this way, small things like food pellets
      and time are put into a correct points perspective.

      To encourage pacman to eat food, we subtract 10 points for every food still left in the game. This is a 10 point reward for eating food; just
      like the in game score.

      To encourage pacman to not die by ghost, we find the nearest maze distance [EDIT: MAZEDISTANCE IS OUR BOTTLENECK] to a ghost and add 20 points for every maze distance away the
      ghost is. This utilizes our search algorithms (and the maze distance heuristic) from project 1.

      However, pacman can earn points by eating a ghost (if they are scared), and definitely doesn't need to stay away from them! In this case,
      the heuristic we use to measure a ghost's worth is 20 (scaled for the 200 points you get in game for eating a ghost) minus the distance to
      that ghost (the amount of time it take to reach it)

    """
    from searchAgents import mazeDistance
    foods = currentGameState.getFood().asList()
    ghostStates = currentGameState.getGhostStates()
    position = currentGameState.getPacmanPosition()
    foodcount = currentGameState.getNumFood()
    score = currentGameState.getScore()
    # the amount of food that can be remaining by which point it is not too inefficient to use a mazeDistance heuristic on
    # all foods.

    mazeDistanceFoodCutoff = 20

    nearestGhostDistance = float("inf")

    # evaluate the current state of the ghosts
    ghostEval = 0
    for ghost in ghostStates:
      ghostPosition = (int(ghost.getPosition()[0]), int(ghost.getPosition()[1]))
      md = mazeDistance(position, ghostPosition, currentGameState)

      if ghost.scaredTimer == 0:
        if md < nearestGhostDistance:
          nearestGhostDistance = md
      #for scared ghosts, evaluate them as 200 points, minus the distance they are away.
      elif ghost.scaredTimer > md:
        ghostEval += 200 - md

    if nearestGhostDistance == float("inf"):
      nearestGhostDistance = 0

    ghostEval += nearestGhostDistance

    # find closest food. WE WANT TO BE NEAR THESE! However, mazedistance is inefficient since it runs a 
    # bfs on all food. thus, only late game can we afford to use it.
    closestfood = float("inf")
    if len(foods) < mazeDistanceFoodCutoff:
      for food in foods:
        md = mazeDistance(position, food, currentGameState)
        if md < closestfood:
          closestfood = md
      if closestfood == float("inf"):
        closestfood = 1
    else:
      for food in foods:
        md = manhattanDistance(position, food)
        if md < closestfood:
          closestfood = md
        if closestfood == float("inf"):
          closestfood = 1
    return score - 10*foodcount + 10*ghostEval + 20.0/closestfood 
Пример #23
0
def contestEvaluationFunction(currentGameState):
    """
      Your extreme ghost-hunting, pellet-nabbing, food-gobbling, unstoppable
      evaluation function (question 5).

      DESCRIPTION: <write something here so we know what you did>
    """
    "*** YOUR CODE HERE ***"
    
    pac_position = currentGameState.getPacmanPosition()
    food_list = currentGameState.getFood().asList()
    ghost_positions = currentGameState.getGhostPositions()
    capsules = currentGameState.getCapsules()
    scared_times = [ghost_state.scaredTimer for ghost_state in currentGameState.getGhostStates()]
    if len(food_list) == 0:
        return sys.maxint 
    closest_dist = sys.maxint
    closest_food = None
    total_dist = 0
    for food in food_list:
        dist = manhattanDistance(food, pac_position)
        total_dist += dist
        if dist < closest_dist:
                closest_dist = dist
                closest_food = food

    closest_dist = mazeDistance(closest_food, pac_position, currentGameState)
    score = 0
    #score = 5000.0/float(total_dist) - 50.0 * float(closest_dist)
    hunt = False
    for scared in scared_times:
	if scared < 2:
		hunt = True
		break

    closest_c = None
    if hunt:	
	    cd = sys.maxint
	    for capsule in capsules:
        	dist_c = manhattanDistance(capsule, pac_position)
       		 #cd += dist
		if dist_c < cd:
			cd = dist_c
			closest_c = capsule
    	    if closest_c is not None:
			dist_c = mazeDistance(pac_position, closest_c, currentGameState)
			    #score += 10.0/(cd)

    #closest_ghost = 100
    #closest_gd = sys.maxint
    for i in range(0, len(ghost_positions)):
        gp_int = tuple([int(ghost_positions[i][0]), int(ghost_positions[i][1])])
        dist = manhattanDistance(gp_int, pac_position)
     #   	if dist < closest_dist:
                	#closest_gd = dist
      #          	closest_ghost = i
       	if dist < 3 and scared_times[i] < 1:
		#print gp_int, scared_times[i]
                maze_dist = mazeDistance(pac_position, gp_int, currentGameState)
               	if maze_dist < 2:
                       	return -1 * sys.maxint  

    if closest_c is not None:
	return 10.0/(dist_c + 1) + 100.0/(len(capsules) + 1)
	
    score =  6800.0/float(len(food_list) + 1)  + 10.0/float(closest_dist)
    return score
Пример #24
0
def betterEvaluationFunction(currentGameState):
    """
    Your extreme ghost-hunting, pellet-nabbing, food-gobbling, unstoppable
    evaluation function (question 5).

    DESCRIPTION: <write something here so we know what you did>
    """
    # TODO: optimize the evaluation function
    "*** YOUR CODE HERE ***"
    import sys, searchAgents

    pacmanPos = currentGameState.getPacmanPosition()
    foodPos = currentGameState.getFood().asList()
    foodDis = [manhattanDistance(pacmanPos, xy) for xy in foodPos]
    ghostStates = currentGameState.getGhostStates()
    scaredTimes = [ghostState.scaredTimer for ghostState in ghostStates]
    ghostsPos_float = [
        ghostStates[i].getPosition() for i in range(len(ghostStates))
    ]
    ghostsPos = []
    for each in ghostsPos_float:
        xy = []
        for num in each:
            xy.append(int(num))
        ghostsPos.append(xy)
    ghostsMazeDis = [
        searchAgents.mazeDistance(pacmanPos, ghostxy, currentGameState)
        for ghostxy in ghostsPos
    ]
    # ghostsDis = [manhattanDistance(pacmanPos, ghostxy) for ghostxy in ghostsPos]
    capsulesPos = currentGameState.getCapsules()
    capsulesMazeDis = [
        searchAgents.mazeDistance(pacmanPos, capsulePos, currentGameState)
        for capsulePos in capsulesPos
    ]

    # capsulesDis = [manhattanDistance(pacmanPos, xy) for xy in capsulesPos]
    # minCapsuleDis = min(capsulesDis)

    encourage = 0
    ghostPenalty = [i * 0 for i in range(len(ghostStates))]
    capsulesPenalty = -50.0

    # when Ghosts are scared
    for i in range(len(scaredTimes)):
        if scaredTimes[i] > 0:
            ghostsMazeDis[i] = sys.maxint

    # penalty from ghost
    for i in range(len(ghostStates)):
        if ghostsMazeDis[i] > 0 and ghostsMazeDis[i] <= 1:
            ghostPenalty[i] = sys.maxint
        elif ghostsMazeDis[i] > 1:
            ghostPenalty[i] = -25.0 / ((ghostsMazeDis[i] / 2.0)**5)

    # encourage from food
    if len(foodDis):
        minFoodDis = min(foodDis)
        # meanFoodDis = float(sum(foodDis))/len(foodDis)
        encourage = 10.0 / minFoodDis
        if foodDis.count(minFoodDis) > 1 and minFoodDis >= 2:
            indexes = [i for i, v in enumerate(foodDis) if v == minFoodDis]
            minDisfoodsPos = [foodPos[index] for index in indexes]
            minFoodMazeDis = min([
                searchAgents.mazeDistance(pacmanPos, minDisfoodPos,
                                          currentGameState)
                for minDisfoodPos in minDisfoodsPos
            ])
            encourage = (5.0 / minFoodDis + 15.0 / minFoodMazeDis) / 2.0

        # minFoodMazeDis = min([searchAgents.mazeDistance(pacmanPos, foodxy, currentGameState) for foodxy in foodPos])
        # meanFoodMazeDis = float(sum([searchAgents.mazeDistance(pacmanPos, foodxy, currentGameState) for foodxy in foodPos])) / len(foodPos)
        # encourage = 10.0 / minFoodDis + 5.0 / minFoodMazeDis

    # penalty from not eating capsule
    if len(capsulesMazeDis):
        minCapsuleMazeDis = min(capsulesMazeDis)
        if minCapsuleMazeDis <= 2 * ghostsMazeDis[
                0] and minCapsuleMazeDis <= 2 * ghostsMazeDis[
                    1] and minCapsuleMazeDis <= 3 * minFoodDis:
            capsulesPenalty = -minCapsuleMazeDis**2 - len(capsulesPos) * 40.0

    return currentGameState.getScore() + encourage + sum(
        ghostPenalty) + capsulesPenalty
Пример #25
0
def distHeuristic(pacPos, posList, gameState):
    """
    Your heuristic for the FoodSearchProblem goes here.

    This heuristic must be consistent to ensure correctness.  First, try to come
    up with an admissible heuristic; almost all admissible heuristics will be
    consistent as well.

    If using A* ever finds a solution that is worse uniform cost search finds,
    your heuristic is *not* consistent, and probably not admissible!  On the
    other hand, inadmissible or inconsistent heuristics may find optimal
    solutions, so be careful.

    The state is a tuple ( pacmanPosition, foodGrid ) where foodGrid is a Grid
    (see game.py) of either True or False. You can call foodGrid.asList() to get
    a list of food coordinates instead.

    If you want access to info like walls, capsules, etc., you can query the
    problem.  For example, problem.walls gives you a Grid of where the walls
    are.

    If you want to *store* information to be reused in other calls to the
    heuristic, there is a dictionary called problem.heuristicInfo that you can
    use. For example, if you only want to count the walls once and store that
    value, try: problem.heuristicInfo['wallCount'] = problem.walls.count()
    Subsequent calls to this heuristic can access
    problem.heuristicInfo['wallCount']
    """

    # Fetch state.
    #position = state.

    #position, foodGrid = state
    position = pacPos
    foods = posList

    # If there's no food left, return 0.
    if len(foods) == 0:
        return 0

    # Create a graph from the food state.
    G = []
    V = len(foods)
    for ((i, food1), (j,
                      food2)) in itertools.combinations(enumerate(foods), 2):
        G.append((i, j, util.manhattanDistance(foods[i], foods[j])))

    # Initialize the distance to be the distance to the closest food.
    distTuple = min(map(
        lambda food: (food, util.manhattanDistance(position, food)), foods),
                    key=lambda x: x[1])
    dist = searchAgents.mazeDistance(position, distTuple[0], gameState)
    # Kruskal's algorithm for finding minimum-cost spanning tree.
    # See pseudocode at https://en.wikipedia.org/wiki/Kruskal's_algorithm
    parent = dict()
    rank = dict()

    # Make unique set ("subtree") for each node to start.
    for node in range(V):
        parent[node] = node
        rank[node] = 0

    # Sort the edges in non-decreasing order by weight.
    G.sort(key=lambda edge: edge[2])

    # Keep adding shortest edges and unioning disjoint "trees" until
    # spanning tree is found.
    for (src, dest, weight) in G:
        if find(src, parent) != find(dest, parent):
            union(src, dest, rank, parent)
            dist += weight  # Remember, we just want to sum the edge weights.

    # Return the sum of the distances in the MST (plus the distance to the closest node).
    return dist
Пример #26
0
def betterEvaluationFunction(currentGameState):
    """
     Your extreme ghost-hunting, pellet-nabbing, food-gobbling, unstoppable
      evaluation function (question 5).

      DESCRIPTION: <write something here so we know what you did>
      Athlon, 2/2, 2016
      feathures to evaluate gameState: 
      Eat food,  distance to ghost, pacman distance to food,distance to power pollet, eat power pollet. 
    """
    "*** YOUR CODE HERE ***"
    
    #Idea:3 factors to decide Agent's action, 1) newPos food info 2) newPos distance to food 3) newPos distance to Ghost
        # distance to food should have highest weight, live alive is most important !
         
        #score = (pacman2Ghost position) / (pacman postion to nearlist food distance).  
        #pacman2Ghost position: for several ghots distance = (min value)*5 + average value
        #while newscareTimes > 0, use constant to replace (pacman2Ghost position) , this factor is ignored for all actions
        #eat power pollet, newScaredTimes set to [40,40], -for each setp pacman move, -1.
        #Foods = successorGameState.getFood().asList()
        
    #successorGameState = currentGameState.generatePacmanSuccessor(action)
#     newPos = successorGameState.getPacmanPosition()
#     newFood = successorGameState.getFood()
#     newGhostStates = successorGameState.getGhostStates()
#     newScaredTimes = [ghostState.scaredTimer for ghostState in newGhostStates]
    import searchAgents
    Pacman_Position =    currentGameState.getPacmanPosition()
    #print "Pacman position", Pacman_Position
    Food = currentGameState.getFood().asList()
    Ghost_Position = currentGameState.getGhostPositions()
    #print " Ghost postion", Ghost_Position
    capsules_Position = currentGameState.getCapsules()
    GhostStates = currentGameState.getGhostStates()
    ScaredTimes = [ghostState.scaredTimer for ghostState in GhostStates]
    #define weight parametes here
    Weight_Pac2Ghost = 0.0
    Weight_Pac2Food = 20.0
    Weight_Pac2Cap =  5
    Weight_eat_food = 30
    #get the distance to closet ghost, ignore the others
    #print "ghost postion,",Ghost_Position
    
    #convert float number position to integer position
    def convert_to_integer(position):
        x,y = position
        x = int(x)
        y = int(y)
        return (x,y)
    
    distance = float ("inf")
    for GP in Ghost_Position:
        #print "GP before convert", GP
        #Turn float to int number
        GP_new = convert_to_integer(GP)
        #print " GP after convert", GP_new
        Pac2Ghost = searchAgents.mazeDistance(Pacman_Position, GP_new, currentGameState)    
        Pac2Ghost = min(distance,Pac2Ghost)
        #Pac2Ghost = Pac2Ghost*Weight_Pac2Ghost
        #Pac2Ghost = min(abs(newPos[0]-GP[0]) + abs(newPos[1]-GP[1]) for GP in GhostPosition)
    #get pacman distance to power pollet
    if len(capsules_Position) > 0:
        Pac2Cap = min(searchAgents.mazeDistance(Pacman_Position, cap, currentGameState) for cap in capsules_Position)    
    else:  # factor will be multiply, set to 1.0 to ignore this factor
        Pac2Cap = 1.0
    
    #Get pacman distance to closet food    
    if len(Food) > 0:
        Pac2Food = min(searchAgents.mazeDistance(Pacman_Position, Fd, currentGameState) for Fd in Food)
        #a lot of case it will be 1, same value as no food, so add weight for food distance 
        #Pac2Food = Weight_Pac2Food/Pac2Food
    else:  # No food to eat :)
        # nothing to eat, set it to a constant value and this number need to be small or last food will not eat
        Pac2Food = 0.0   
        
    # if eat power pollet, ingore pacman distace to Ghost
    if ScaredTimes[0] > 0:
        Pac2Ghost = 0.0  # eat pollet, ignore Pac2Ghost distance, set a constant value here
          
    # if Pacman position has food, *10, else *1
    if currentGameState.data._foodEaten == Pacman_Position:
        print " current pos has food", Pacman_Position
        eat_food = 1.0
    else:
        eat_food = 0.0
    
    Score = Weight_eat_food*eat_food 
    #Score =  Weight_eat_food*eat_food + Weight_Pac2Ghost*Pac2Ghost +  Weight_Pac2Food/Pac2Food +  Weight_Pac2Food/Pac2Cap
    print "score:", Score
    return Score

    util.raiseNotDefined()
Пример #27
0
def betterEvaluationFunction(currentGameState):
    """
    Your extreme ghost-hunting, pellet-nabbing, food-gobbling, unstoppable
    evaluation function (question 5).

    DESCRIPTION: 
    """
    "*** YOUR CODE HERE ***"
    curPos = currentGameState.getPacmanPosition()
    food = currentGameState.getFood()
    ghostStates = currentGameState.getGhostStates()
    scaredTimes = [ghostState.scaredTimer for ghostState in ghostStates]
    capsulePositions = currentGameState.getCapsules()


    curScore = currentGameState.getScore()

    if capsulePositions:
        curScore += 1. / len(capsulePositions)

    if currentGameState.isWin():
        return float("inf")

    if currentGameState.isLose():
        return float('-inf')


    scaredGhosts = []
    for ghost in ghostStates:
        if ghost.scaredTimer:
            scaredGhosts.append(ghost)

    closestScared = float("inf")
    if scaredGhosts:
        closestScared = min([mazeDistance(curPos, ghost.getPosition(), currentGameState) for ghost in scaredGhosts])

    if closestScared == 0:
        closestScared = 0.1

    closestGhostDistance = 0
    if ghostStates:
        closestGhostDistance = min([mazeDistance(curPos, ghost.getPosition(), currentGameState) for ghost in ghostStates])
        if closestGhostDistance <= 4:
            curScore += 1.*closestGhostDistance
        else:
            curScore += 4

    foodList = food.asList()
    numFood = len(foodList)

    closestFoodDistance = 0
    if foodList:
        closestFoodDistance = min([mazeDistance(curPos, item,  currentGameState) for item in foodList])

    if closestFoodDistance == 0:
        return float('inf')

    score = curScore \
            + 1./numFood \
            + 1.0/closestFoodDistance \
            + 10./closestScared
    return score
Пример #28
0
def betterEvaluationFunction(currentGameState):
    """
    Your extreme ghost-hunting, pellet-nabbing, food-gobbling, unstoppable
    evaluation function (question 5).

    DESCRIPTION: <write something here so we know what you did>
    """
    "*** YOUR CODE HERE ***"
    pacmanPosition = currentGameState.data.agentStates[
        0].configuration.getPosition()
    ghostStates = currentGameState.getGhostStates()
    ghostPosition = [
        ghostState.configuration.getPosition() for ghostState in ghostStates
    ]
    foodStates = currentGameState.getFood()
    scaredTimes = [ghostState.scaredTimer for ghostState in ghostStates]
    #distances = ClosestDotSearchAgent().findPathToClosestDot(currentGameState)
    # cost = currentScore + 1.5 * mahDisToClosestFood + (-2) * mahDisToClosetGhost + (-1) * min(scareTimes)
    currentScore = currentGameState.getScore()
    global minDistance
    disToClosestFood = math.inf

    for posx in range(len(foodStates.data)):
        for posy in range(len(foodStates.data[0])):
            if foodStates.data[posx][posy] == True:
                a = (posx, posy, pacmanPosition[0], pacmanPosition[1])
                b = (pacmanPosition[0], pacmanPosition[1], posx, posy)
                if a in minDistance or b in minDistance:
                    temp = minDistance.get(a, 0) or minDistance.get(b, 0)
                else:
                    minDistance[a] = mazeDistance(
                        (int(posx), int(posy)),
                        (pacmanPosition[0], pacmanPosition[1]),
                        currentGameState)
                    temp = minDistance[a]
                if temp < disToClosestFood:
                    disToClosestFood = temp

    disToClosetGhost = math.inf

    for posx, posy in ghostPosition:
        a = (int(posx), int(posy), pacmanPosition[0], pacmanPosition[1])
        b = (pacmanPosition[0], pacmanPosition[1], int(posx), int(posy))
        if a in minDistance or b in minDistance:
            temp = minDistance.get(a, 0) or minDistance.get(b, 0)
        else:
            minDistance[a] = mazeDistance(
                (int(posx), int(posy)), (pacmanPosition[0], pacmanPosition[1]),
                currentGameState)
            temp = minDistance[a]

        if temp < disToClosetGhost:
            disToClosetGhost = temp

    if disToClosestFood == math.inf:
        disToClosestFood = 0

    return currentScore - 3.0 * 1.0 /(disToClosetGhost + 1e-6) \
           - 1.5 * disToClosestFood \
           - 5 * sum([sum(i) for i in foodStates.data])\
           - 2 * len(scaredTimes)
Пример #29
0
def betterEvaluationFunction(currentGameState):
    """
      Your extreme ghost-hunting, pellet-nabbing, food-gobbling, unstoppable
      evaluation function (question 5).

      DESCRIPTION: <write something here so we know what you did>
    """

    # Useful information you can extract from a GameState (pacman.py)
    layout = currentGameState.data.layout  # Layout of this game.
    walls = layout.walls  # Grid of where walls are.
    pos = currentGameState.getPacmanPosition()  # Pacman's current position.
    food = currentGameState.getFood()  # Grid of where food is.
    foodNum = len(food.asList())  # Current number of food.
    capsules = currentGameState.getCapsules()  # Available capsule positions.
    origCapNum = len(layout.capsules)  # Original number of capsules.
    capNum = len(capsules)  # Current number of capsules.

    # Split ghosts between those who will be scared by the time
    # pacman gets to them, and ones we consider dangerous.
    ghostStates = set(
        map(lambda ghost: (ghost, manhattanDistance(pos, ghost.getPosition())),
            currentGameState.getGhostStates()))
    badGhosts = set(
        filter(lambda (ghost, dist): ghost.scaredTimer <= dist, ghostStates))
    scaredGhosts = ghostStates - badGhosts
    origGhostPositions = map(lambda x: x[1], layout.agentPositions)[1:]

    origGhostNum = len(ghostStates)  # Number of ghost when game started.
    scaredGhostNum = len(scaredGhosts)  # Number of scared ghosts right now.

    # LOSE FEATURE
    # Losing is really bad. Really really bad.
    # Don't even bother looking at other features.
    if currentGameState.isLose():
        return -100000

    # WIN FEATURE
    # Winning is great!
    # But some wins are better than others.
    winFeature = 1000000 * int(currentGameState.isWin())

    # GHOST HOUSE FEATURE
    # Discourage being close to where the ghosts start.
    # Boolean value reflects pacman being within 1 unit
    # from wherever a ghost starts.
    ghostHouseFeature = int(
        any(
            map(
                lambda ghostPos: searchAgents.mazeDistance(
                    pos, ghostPos, currentGameState) <= 1,
                origGhostPositions)))

    # SCARED GHOST FEATURE
    # Encourage eating a scared ghost.
    # This feature is intimiately related to the capsule feature
    # as it should be incentivised to finish eating the available
    # scared ghosts before eating another capsule.
    scaredGhostFeature = (origCapNum - capNum) * (origGhostNum + 1)
    minScaredGhostDist = float("inf")

    if scaredGhostNum > 0:
        minScaredGhostPos = tuple(
            map(int,
                min(scaredGhosts, key=lambda x: x[1])[0].getPosition()))
        minScaredGhostDist = searchAgents.mazeDistance(pos, minScaredGhostPos,
                                                       currentGameState)
        scaredGhostFeature += 1 / float(minScaredGhostDist) - scaredGhostNum

    # CAPSULE FEATURE
    # Encourage eating a capsule.
    # This feature is intimiately related to the scared ghost feature
    # as it should be incentivised to eat the
    # scared ghosts before eating another capsule.
    capsuleFeature = 0
    minCapsuleDist = float("inf")

    if scaredGhostNum > 0:
        capsuleFeature += 1 + (origCapNum - capNum - 1) * (origGhostNum + 1)

    elif capNum > 0:
        minCapsulePos = min(map(
            lambda cap: (cap, util.manhattanDistance(pos, cap)), capsules),
                            key=lambda x: x[1])[0]
        minCapsuleDist = searchAgents.mazeDistance(pos, minCapsulePos,
                                                   currentGameState)
        capsuleFeature += 1 / float(minCapsuleDist) + (origCapNum - capNum) * (
            origGhostNum + 1)

    # GOAL BOUND FEATURE
    # Encourage decreasing the estimated path to finishing the food.
    # Adopted from Ben's food heuristic last project.
    goalBoundDenom = 1 + (len(capsules) + 1) * len(ghostStates)
    goalBound = float("inf")

    if len(scaredGhosts) == 0 and len(capsules) == 0:
        goalBound = distHeuristic(pos, food.asList(), currentGameState)
        goalBoundDenom += goalBound + 2 * foodNum

    goalBoundFeature = 1 / float(goalBoundDenom)

    # BAD GHOST FEATURE
    # Bad ghosts are bad, run away!
    minBadGhostFeature = 0
    meanBadGhostFeature = 0

    if len(badGhosts) > 0:
        # If a ghost is really close, take the time to compute the actual
        # maze distance so we know if pacman is safe or not.
        for ghost, dist in badGhosts:
            if dist < 3:
                badGhosts.remove((ghost, dist))
                badGhosts.add((ghost,
                               searchAgents.mazeDistance(
                                   pos, tuple(map(int, ghost.getPosition())),
                                   currentGameState)))

        minBadGhostDist = min(badGhosts, key=lambda x: x[1])[1]
        meanBadGhostDist = mean(map(lambda x: x[1], badGhosts))

        if minBadGhostDist < 3:
            minBadGhostFeature = 1 / float(1 + minBadGhostDist)

        if meanBadGhostDist < 8:
            meanBadGhostFeature = 1 / float(1 + meanBadGhostDist)

    # WALL FEATURE
    # Generally safer to be near less walls because then
    # it's harder to get stuck between ghosts.
    # If there are three walls around pacman, that is really dangerous!
    wallNum = 0

    for i in [1, -1]:
        wallNum += int(walls[pos[0]][pos[1] + i])
        wallNum += int(walls[pos[0] + i][pos[1]])

    wallsFeature = 1 / float(1 + wallNum)

    if wallNum == 2 and len(scaredGhosts) == 0:
        wallFeature = 1

    if wallNum == 3 and len(scaredGhosts) == 0:
        wallsFeature = 30

    # LINE FEATURE
    # Generally discourage being in a line with two other ghosts
    # because this can lead to being stuck between two ghosts.
    lineFeature = 0
    horiVertiGhosts = 0

    for (ghost, dist) in badGhosts:
        horiVertiGhosts += int(ghost.getPosition()[0] == pos[0] and dist < 5)
        horiVertiGhosts += int(ghost.getPosition()[1] == pos[1] and dist < 5)
        if horiVertiGhosts > 1:
            lineFeature = 1
            break

    # WEIGHTED SUM OF FEATURES
    # Some features are more important than others.
    return (1 * winFeature + 30 * goalBoundFeature + 300 * capsuleFeature +
            200 * scaredGhostFeature - 200 * minBadGhostFeature -
            50 * meanBadGhostFeature - 40 * ghostHouseFeature -
            30 * lineFeature - 15 * wallsFeature + random.uniform(0, 0.01))
Пример #30
0
def betterEvaluationFunction(currentGameState):
    """
      Your extreme ghost-hunting, pellet-nabbing, food-gobbling, unstoppable
      evaluation function (question 5).

      DESCRIPTION: <write something here so we know what you did>

      The basic idea of the following implementation is that
      eating ghost is the first priority. This idea is based
      on the observation that to exceed 1450 points in average,
      it requires eating at least 3 or 4 ghosts in one game. 

      Note that the following strategy only works for smallClassic
      layout, but the basic idea could be apply to many other layouts

      In order to win the game with score higher than 1700, the pacman
      could follow the following steps:

          1. After the game starts, eat the closest capsule
          2. After eating capsule, eat as many ghosts as possible
          3. After eating all the ghosts or there is no enough time 
             to eat the next closest ghost, repeat step 1 and step 2
             until there is no more capsules.
             (in smallClassic case, there are only 2 capsules in total)
          4. Clean up all the food in the map

      Thus, to design an evaluation function especially for smallClassic
      layout, here I designed 5 cases besides the winning and losing gateState:

          case 1: before eating the 1st capsule
          case 2: after eating the 1st capsule and there are scared ghosts
          case 3: after eating the 1st capsule but there are no more scared ghosts
          case 4: after eating the 2nd capsule and there are scared ghosts
          case 5: there is no more any capsule and scared ghost in the map

      The following description would explain the design of return value
      and other details of each case:

          case 0:

              Before start, first check whether the gameState is the end of
              the game, return +99999 for winning case and -99999 for losing case.

              Also, if pacman is only 1 step beside any not scared ghost, return
              -10000 for this case since this may result in losing case.

          case 1:

              1st priority: eat the closest capsule

              to achieve this goal, return value is designed to be:

                  200 - len(capsules) * 100 + 1.0 / min(capsulesDist)

              where len(capsules) is number of capsules and min(capsulesDist)
              is the distance to the nearest capsule.

              This return value encourage pacman to go after capsule so as
              to decrease the value of min(capsulesDist).

              return value min: 200 - 2 * 100 + 0.xx  = 0.xx
              return value max: 200 - 2 * 100 + 1 / 1 = 1 (1 step before eating 1st capsule)

          case 2:

              subcase 2-1: 

              1st priority: eat the closest ghost

              to achieve this goal, return value is designed to be:

                  401 - scaredNum * 100 + 1.0 / pair[0]

              where scaredNum is number of scared ghosts, and pair[0] is 
              the distance to the nearest scared ghost.

              This return value encourage pacman to go after ghosts so as
              to decrease the value of pair[0]. Despite the value of pair[0]
              may increase right after eating the first ghost, scaredNum would
              decrease by 1 which in turn getting a much higher return value.

              return value min: 401 - 2 * 100 + 0.xx = 201.xx
              return value max: 401 - 1 * 100 + 1/1  = 302 (1 step before eating 2nd ghost)

              Note the the minimum value in this case should be larger than the
              maximum value in case 1, so that the pacman would be encouraged to 
              go from case 1 to case 2.

              subcase 2-2: 

              Lat but not least, if the remaining scared time is not enough to 
              chase any remaining scared ghost, then the 1 priority for pacman
              is to eat the 2nd capsule instead of keep chasing ghosts. Thus the
              return value for this scenario is designed to be:

                  300 - len(capsules) * 100 + 1.0 / min(capsulesDist)

              return value min: 300 - 1 * 100 + 0.xx = 200.xx
              return value max: 300 - 1 * 100 + 1/1  = 201 (1 step before eating 2nd capsule)

              Note that the maximum value in this subcase should be less than any value
              in subcase 2-1, since we should encourage pacman to eat all the scared ghosts
              as fast as possible. Also, any value in this subcase should be larger than
              any value in case 1, too.

          case 3:

              1st priority: eat the 2nd capsule

              to achieve this goal, return value is designed to be:

                  500 - len(capsules) * 100 + 1.0 / min(capsulesDist)

              return value min: 500 - 1 * 100 + 0.xx = 400.xx
              return value max: 500 - 1 * 100 + 1/1  = 401

              Note that the minimum value for this case should be larger
              than any value in case 2 to encourage pacman eating all the ghosts

          case 4:

              subcase 4-1:

              1st priority: eat the closest scared ghost

              to achieve this goal, return value is designed to be:

                  801 - scaredNum * 100 + 1.0 / pair[0]

              which have the same idea in case 2.

              return value min: 801 - 2 * 100 + 0.xx = 601.xx
              return value max: 801 - 1 * 100 + 1/1  = 702

              Note that the minimum return value have to be larger than
              any value in case 3 to encourage pacman going from case 3
              to case 4.

              subcase 4-2:

              Same as subcase 2-2. if there is no more time left for eating
              the next closest ghost, start cleaning up the map instead of
              keep chasing ghosts. To achieve this goal, return value is
              designed to be:

                  600 - foodNum + 1.0 / minFoodDist

              where foodNum is number of remaining food, and minFoodDist is
              the distance to the nearest food.

              This return value encourage pacman to go after the nearest food
              as well as eating all the remaining food.

              return value min: 600 - foodNum + 0.xx
              return value max: 600 - 1 + 1/1 = 600 (1 step before eating last food)

              Note that 0 < foodNum < 100, thus the minimum of return value is larger
              than 500 but less than 600, which matches the requirement that return value
              in this case should be less than subcase 4-1 but larger than any value in
              case 3.

              0 < foodNum  : if foodNum = 0 implies winning case
              foodNum < 100: there is no such space for 100 foodNum in this layout

          case 5:

              1st priority: eat the nearest food

              return value:

                  1000 - foodNum + 1.0 / minFoodDist

              return value min: 900.xx (0 < foodNum < 100)
              return value max: 1000 - 1 + 1/1 = 1000

              Note that the minimum value in this case should be larger than
              any other value in any other case described above.
    """
    "*** YOUR CODE HERE ***"
    # pacman position
    pacmanPos_tmp = currentGameState.getPacmanPosition()
    pacmanPos = (int(pacmanPos_tmp[0]), int(pacmanPos_tmp[1]))

    # capsule positions
    capsules = currentGameState.getCapsules()

    # scared timer for ghosts
    ghostStates = currentGameState.getGhostStates()
    scaredTimers = [state.scaredTimer for state in ghostStates]

    # import mazeDistance, remember to include related files
    from searchAgents import mazeDistance

    # count the distance between pacman and each ghost
    ghostsPos_tmp = currentGameState.getGhostPositions()
    ghostsPos = [(int(tmp[0]), int(tmp[1])) for tmp in ghostsPos_tmp]
    ghostsDist = [
        mazeDistance(ghostPos, pacmanPos, currentGameState)
        for ghostPos in ghostsPos
    ]

    # case 0
    if currentGameState.isWin():
        return 99999
    elif currentGameState.isLose():
        return -99999

    # build pairs between scared ghosts and their remaining scared time
    # count the number of scared ghost at the same time
    distTimerPairs = []
    effectGhostDist = []
    scaredNum = 0
    for i in range(len(ghostStates)):
        distTimerPairs.append((ghostsDist[i], scaredTimers[i]))
        if scaredTimers[i] == 0:
            effectGhostDist.append(ghostsDist[i])
        else:
            scaredNum += 1

    # case 0
    if len(effectGhostDist) != 0:
        if min(effectGhostDist) <= 1:
            return -10000

    # sort according to distance of scared ghosts
    distTimerPairs.sort()

    # distance to capsules
    capsulesDist = [
        mazeDistance((int(capsule[0]), int(capsule[1])), pacmanPos,
                     currentGameState) for capsule in capsules
    ]

    if len(capsules) == 2:  # case 1
        # eat capsules is 1st priority
        # 1 step before eating 1st capsule, return value is 200 - 2 * 100 + 1 = 1
        return 200 - len(capsules) * 100 + 1.0 / min(capsulesDist)
    elif len(capsules) == 1 and scaredNum != 0:  # case 2
        # subcase 2-1
        # eat ghosts is 1st priority
        # return value for 1 step aftering eating 1st capsule have to be larger than 1 (201.x > 1)
        for pair in distTimerPairs:
            if (pair[1] - pair[0] >= 0) and (pair[1] > 0):
                # 1 step before eating 2nd ghost is 401 - 100 + 1 = 302
                return 401 - scaredNum * 100 + 1.0 / pair[0]
            else:
                continue
        # subcase 2-2
        # if script end up in this section, meaning there is no more time for eating next ghost
        # then the 1st priority will be eating 2nd capsule
        # return value have to be less than any value of upper section (more prefer to eat ghost rather than wasting capsules)
        # 1 step before eating 2nd capsule is 300 - 100 + 1 = 201 (201.x > 201)
        return 300 - len(capsules) * 100 + 1.0 / min(capsulesDist)
    elif len(capsules) == 1 and scaredNum == 0:  # case 3
        # after eating 2 ghosts using 1st capsule
        # eating 2nd capsule is 1st priority
        # minimum value have to be larger than the value of 1 step before eating 2nd ghost (302)
        # so as to encourage eating 2nd ghost
        # 1 step after eating 2nd ghost is 400.x
        # 1 step before eating 2nd capsule is 500 - 100 + 1 = 401
        return 500 - len(capsules) * 100 + 1.0 / min(capsulesDist)
    elif len(capsules) == 0 and scaredNum != 0:  # case 4
        # subcase 4-1
        # eat ghost is 1st priority
        # return value for 1 step aftering eating 2nd capsule have to be larger than 401 ( 601.x > 401)
        for pair in distTimerPairs:
            if (pair[1] - pair[0] >= 0) and (pair[1] > 0):
                # 1 step before eating 2nd ghost is 801 - 100 + 1 = 702
                return 801 - scaredNum * 100 + 1.0 / pair[0]
            else:
                continue
        # subcase 4-2
        # if script end up in this section, meaning there is no more time for eating next ghost
        # then the 1st priority will be eating the nearset food
        # return value have to be larger than 401 (1 step before eating 2nd capsule)
        # return value have to be less than 601.x (any value of upper section)
        foodList = currentGameState.getFood().asList()
        foodDist = [
            mazeDistance((int(foodPos[0]), int(foodPos[1])), pacmanPos,
                         currentGameState) for foodPos in foodList
        ]
        minFoodDist = min(foodDist)
        foodNum = len(foodList)
        return 600 - foodNum + 1.0 / minFoodDist
    else:
        # eating the nearest food
        # code is same as upper section, but the meaning is different
        # here the minimum value have to be larger than any other value above
        foodList = currentGameState.getFood().asList()
        foodDist = [
            mazeDistance((int(foodPos[0]), int(foodPos[1])), pacmanPos,
                         currentGameState) for foodPos in foodList
        ]
        minFoodDist = min(foodDist)
        foodNum = len(foodList)
        return 1000 - foodNum + 1.0 / minFoodDist
Пример #31
0
def betterEvaluationFunction(currentGameState):
    """
      Your extreme ghost-hunting, pellet-nabbing, food-gobbling, unstoppable
      evaluation function (question 5).

      DESCRIPTION: <write something here so we know what you did>
    """
    "*** YOUR CODE HERE ***"
    # Losing is bad
    if currentGameState.isLose():
        return -sys.maxint - 1

    pacmanPosition = currentGameState.getPacmanPosition()
    stateValue = 0
    pathToClosestFood = getPathToClosestFood(currentGameState)
    ghostDistanceValue = 0
    scaredGhostBonus = 0
    alreadyAssignedScaredGhostBonus = False
    for ghostState in currentGameState.getGhostStates():
        ghostPosFloat = ghostState.getPosition()
        ghostPos = (int(ghostPosFloat[0]), int(ghostPosFloat[1]))
        #dFromPacman = mazeDistance(pacmanPosition, ghostPos, currentGameState)
        dFromPacman = manhattanDistance(pacmanPosition, ghostPos)

        # First, calculate scare bonus. Having scared ghost while they're close to pacman is good. (encourage
        # pacman to kill ghosts) 150 is chosen to not overcompensate fo the scared state.
        if dFromPacman < 7 and isGhostScared(ghostState) and not alreadyAssignedScaredGhostBonus:
            scaredGhostBonus += 150
            alreadyAssignedScaredGhostBonus = True
        # Ghost getting really close and scared. Encourage pacman to attack
        # value 100 is chosen to prevent overcompensating for scared state, resulting in pacman to hesitate
        # actually killing the ghost.
        if dFromPacman < 3 and isGhostScared(ghostState):
            attackPathLength = mazeDistance(pacmanPosition, ghostPos, currentGameState)
            if attackPathLength < 3:
                scaredGhostBonus += 100 / (attackPathLength + 1)

        # Calculate ghostDistanceValue
        # If ghost too far, then we don't care (use manhattan distance to do a quick check, if reasonably close,
        # then it's worth it to invest in working out actual distance for more accurate assessment.)
        if dFromPacman > 2:
            continue
        elif mazeDistance(pacmanPosition, ghostPos, currentGameState):
            continue
        # If reasonably close,
        if isGhostScared(ghostState):
            ghostDistanceValue -= dFromPacman
        else:
            ghostDistanceValue += dFromPacman

    remainingFoodCount = currentGameState.getNumFood()

    stateValue += currentGameState.getScore() * 2
    if pathToClosestFood is not None:
        stateValue -= len(pathToClosestFood)
    stateValue += int(2000 / (remainingFoodCount + 1)) # as num of food decreases, getting remaining food becomes more important
    stateValue -= ghostDistanceValue * 2
    stateValue += scaredGhostBonus

    """
    print "====="
    print pacmanPosition
    print "Score: " + str(currentGameState.getScore())
    if pathToClosestFood is not None:
        print "pathToClosestFood: " + str(len(pathToClosestFood))
    print "Ghost: " + str(ghostDistanceValue)
    print "stateval: " + str(stateValue)
    print "=====\n"
    """

    return stateValue