def betterEvaluationFunction(currentGameState: GameState): """ Your extreme ghost-hunting, pellet-nabbing, food-gobbling, unstoppable evaluation function (question 5). DESCRIPTION: <write something here so we know what you did> """ "*** YOUR CODE HERE ***" pos = currentGameState.getPacmanPosition() foods = currentGameState.getFood() walls = currentGameState.getWalls() capsules = currentGameState.getCapsules() ghosts = currentGameState.getGhostStates() score = currentGameState.getScore() foods_cost = _food_cost(foods.asList(), pos, walls) capsules_cost = _food_cost(capsules, pos, walls) ghosts_dis, s_ghosts_dis = _ghost_cost(ghosts, pos, walls) def d(x): if x == 0: return float('inf') return 9 / (x**2) ghosts_cost = sum(map(d, ghosts_dis)) s_ghosts_cost = sum(map(lambda x: x[0], filter(lambda x: x[0] < x[1], s_ghosts_dis))) return score - (2 * foods_cost) - capsules_cost - s_ghosts_cost - ghosts_cost
def enhancedPacmanFeatures(state, action): """ For each state, this function is called with each legal action. It should return a counter with { <feature name> : <feature value>, ... } """ features = util.Counter() # *** YOUR CODE HERE *** succGameState = state.generateSuccessor(0, action) dist = 0 for n in range(len(GameState.getGhostPositions(succGameState))): pac_location = GameState.getPacmanPosition(succGameState) ghost_loc = GameState.getGhostPositions(succGameState) dist += util.manhattanDistance(pac_location, ghost_loc[n]) feat = 'dist'+str(n) features[feat] = util.manhattanDistance(pac_location, ghost_loc[n]) if action == 'Stop': features['stopped'] += 1 features['dist'] = dist features['foodCount'] = GameState.getNumFood(succGameState) features['power_pellet'] = len(GameState.getCapsules(succGameState)) return features
def betterEvaluationFunction(currentGameState: GameState) -> float: """ Your extreme, unstoppable evaluation function (problem 4). Note that you can't fix a seed in this function. """ # BEGIN_YOUR_CODE (our solution is 13 lines of code, but don't worry if you deviate from this) def getDistFromPacman(x, y): pacmanPos = currentGameState.getPacmanPosition() return abs(pacmanPos[0] - x) + abs(pacmanPos[1] - y) def exponentiallyWeightedScore(objectives): return sum(2**(-1 * getDistFromPacman(objectivePos[0], objectivePos[1])) for objectivePos in objectives) def getFoodScore(foodGrid, pacmanPosition): foodsPos = [(x, y) for y in range(foodGrid.height) for x in range(foodGrid.width) if foodGrid[x][y] == True] return exponentiallyWeightedScore(foodsPos) food = 9 * getFoodScore(currentGameState.getFood(), currentGameState.getPacmanPosition()) ghostStates = currentGameState.getGhostStates() scary = sum(ghostState.scaredTimer for ghostState in ghostStates) > 0 capsule = 100 if scary else 200 * exponentiallyWeightedScore( currentGameState.getCapsules()) scaredGhostsPos = [ ghostState.getPosition() for ghostState in ghostStates if ghostState.scaredTimer > 0 ] ghost = 200 * exponentiallyWeightedScore(scaredGhostsPos) score = currentGameState.getScore() return food + capsule + ghost + score
def calculate_capsule_score(game_state: GameState): capsules = game_state.getCapsules() min_distance = math.inf distance_coefficient = 130 for capsule_position in capsules: pacman_distance_from_capsule = util.manhattanDistance(game_state.getPacmanPosition(), capsule_position) min_distance = min(min_distance, pacman_distance_from_capsule) return distance_coefficient / min_distance
def __init__(self, start_game_state: GameState): super().__init__(start_game_state) self._expanded = 0 # DO NOT CHANGE; Number of search nodes expanded self.startingPosition = start_game_state.getPacmanPosition() self.capsules = tuple(start_game_state.getCapsules()) self.foods = start_game_state.getFood() self.walls = start_game_state.getWalls() self.costFn = lambda x: 1 self.start_game_state = start_game_state self.is_eating_capsule = True
def betterEvaluationFunction(currentGameState:GameState): """ Your extreme ghost-hunting, pellet-nabbing, food-gobbling, unstoppable evaluation function (question 5). DESCRIPTION: <write something here so we know what you did> """ features = [0.]*8 weights = [20000,10,1,1,45,0.5,3,1] #1/number_food,1/cloest_food_dis, #1/num_pallets, 1/cloest_pallet, if_empty_pallet(encourage to eat the last pallet), scared_time(encourage the pacman to eat pallet to increase scared_time) #encourage to eat the ghost, some scared time becomes 0, some are not #1/distance_to_ghost #weights = [] if currentGameState.isWin(): #print(currentGameState.getFood().asList()) return float("inf") if currentGameState.isLose(): return float("-inf") pac_pos = currentGameState.getPacmanPosition() food_pos = currentGameState.getFood().asList() num_food = len(food_pos) ghost_states = currentGameState.getGhostStates() ghost_scared_times = [ghostState.scaredTimer for ghostState in ghost_states] ghost_pos = [g.getPosition() for g in ghost_states] pallets = currentGameState.getCapsules() features[0] = 1/num_food food_dis = sorted([util.manhattanDistance(fp,pac_pos) for fp in food_pos]) features[1] = 1/food_dis[0] pallets_dis = sorted([util.manhattanDistance(p,pac_pos) for p in pallets]) if pallets_dis: features[2] = 1/len(pallets_dis) features[3] = 1/pallets_dis[0] features[4] = int(not pallets) # if pallets become zero, we need to encourage this features[5] = sum(ghost_scared_times) features[6] = 1 if 0 in ghost_scared_times and sum(ghost_scared_times)>0 else 0 # if some ghost's scared time becomes zero, some are not, then some ghost is eaten # for those states after the ghost is eaten, this will always be 1, but does not afftect the action selection, because we compare the relative relation ghost_dis = [util.manhattanDistance(g, pac_pos) for g in ghost_pos] minDis = min(ghost_dis) min_dis_ghost_inds = [i for i in range(len(ghost_dis)) if ghost_dis[i] == minDis] min_dis_ghost_scared_times = [ghost_scared_times[i] for i in min_dis_ghost_inds] if any(st < minDis for st in min_dis_ghost_scared_times): features[7] = -1.0 / minDis # if any closet ghost has no enough scared time, then the agent is in danger else: features[7] = 1.0 / minDis # otherwise, there is no danger return sum(w*f for w,f in zip(weights,features))
def betterEvaluationFunction(currentGameState: GameState) -> float: """ Your extreme, unstoppable evaluation function (problem 4). Note that you can't fix a seed in this function. """ score = currentGameState.getScore() numFoods = currentGameState.getNumFood() score -= 4.5 * numFoods pacPos = currentGameState.getPacmanPosition() numCapsules = len(currentGameState.getCapsules()) ghostStates = [ manhattanDistance(ghost, pacPos) for ghost in currentGameState.getGhostPositions() ] minGhost = min(ghostStates) index = ghostStates.index(minGhost) minGhost = 1.0 / minGhost if currentGameState.getGhostState(index + 1).scaredTimer > 5: minGhost *= -200 score -= minGhost score -= numCapsules * 30 if numFoods >= 1: foods = currentGameState.getFood().data num = 0 arr = [] for i in range(len(foods)): for j in range(len(foods[0])): if foods[i][j]: arr.append((i, j)) num += 1 if num == numFoods: break if num == numFoods: break nearest = min([util.manhattanDistance(pacPos, food) for food in arr]) if numFoods > 1: score += 3.0 * pow(nearest, -1.0) else: score += 5.0 * pow(nearest, -1.0) return score
def betterEvaluationFunction(currentGameState: GameState): """ Your extreme ghost-hunting, pellet-nabbing, food-gobbling, unstoppable evaluation function (question 5). DESCRIPTION: <write something here so we know what you did> """ "*** YOUR CODE HERE ***" # 首先判断当前是否已经结束 if currentGameState.isWin(): return float('inf') if currentGameState.isLose(): return -float('inf') score = currentGameState.getScore() # 考虑food foods = currentGameState.getFood().asList() foodDis = [ util.manhattanDistance(currentGameState.getPacmanPosition(), food) for food in foods ] foodDis.sort(reverse=True) numFoods = 3 if currentGameState.getNumFood() < 3: numFoods = currentGameState.getNumFood() score -= foodDis[0] * 1.5 # for i in range(numFoods): # score-=(numFoods-i)*foodDis[i] # 如果附近有food 最好可以吃到 score -= (currentGameState.getNumFood() * 4) # 考虑ghost ghostDis = [ util.manhattanDistance(currentGameState.getPacmanPosition(), ghost) for ghost in currentGameState.getGhostPositions() ] score += max(3, min(ghostDis)) * 2 # 考虑capsule score -= len(currentGameState.getCapsules()) * 4 return score
def betterEvaluationFunction(currentGameState: GameState): """ Your extreme ghost-hunting, pellet-nabbing, food-gobbling, unstoppable evaluation function (question 5). DESCRIPTION: <write something here so we know what you did> From the state we pulled pacman's position, ghost positions, food positions and capsule positions. We calculated the distance to all the food dots and the ghosts and used food dots as positive incentive and ghosts as negative incentive. We used number of capsules left as negative incentive to try to get pacman to eat capsules. """ "*** YOUR CODE HERE ***" ghostScore : float = 1 nearGhosts : float = 0 foodScore : float = 0 curScore = currentGameState.getScore() nearestFood = [(0, 0), float('inf')] pacPos = currentGameState.getPacmanPosition() foodPoss= currentGameState.getFood().asList() capsulePoss = currentGameState.getCapsules() ghostPoss = currentGameState.getGhostPositions() for foodPos in foodPoss: val = manhattanDistance(foodPos, pacPos) if val < nearestFood[1]: nearestFood[1] = val nearestFood[0] = foodPos foodScore = nearestFood[1] for gpos in ghostPoss: val = manhattanDistance(pacPos, gpos) if val <= 1: nearGhosts += (1-val) ghostScore += val return curScore - (1/ghostScore) + (1/foodScore) - nearGhosts - len(capsulePoss)