def scoreEvaluationFunction(self, currentGameState): """ This default evaluation function just returns the score of the state. The score is the same one displayed in the Pacman GUI. This evaluation function is meant for use with adversarial search agents (not reflex agents). """ print(currentGameState.getFood()) foodlist=currentGameState.getFood().asList() print("foodlist: ",foodlist) pos=currentGameState.getPacmanPosition() print("Pos: ",pos) nearfooddist = 0 if [util.manhattanDistance(pos, xy2) for xy2 in foodlist]: list = [util.manhattanDistance(currentGameState.getPacmanPosition(), xy2) for xy2 in foodlist] print("list: ", list) nearfooddist = min(list) print("nearfooddist: ", nearfooddist) numfood = currentGameState.getNumFood() print("numfood: ",numfood) evfun =-nearfooddist + currentGameState.getScore()-numfood print("evfun: ", evfun) return evfun
def eval(self, state, foodSet): """ arg: - State: representation of the game at the time t. - foodSet: set containing the positions of the food. Evaluation based on minimal manhattan distance between pacman and foods, manhattan distance, the number of food eaten and the score. return: - evalution of a state. """ foodMD = np.Infinity pacmanPos = state.getPacmanPosition() for elmt in foodSet: MD = manhattanDistance(elmt, pacmanPos) if MD < foodMD: foodMD = MD nbFood = self.initFood - state.getNumFood() ghostMD = manhattanDistance(pacmanPos, state.getGhostPosition(1)) evalFood = (1 / (foodMD + 1)) evalGhost = (1.0 / (ghostMD + 1)) evaluation = state.getScore( ) + 100 * evalFood + 50 * nbFood + evalGhost if state.isLose(): evaluation = 0 return (evaluation, None)
def _evalFct8(self, gameState): # dist manhatan à la food # nbre de food restante # manhatan distance au ghost pacPos = gameState.getPacmanPosition() foodList = gameState.getFood().asList() ghostPos = gameState.getGhostPositions()[0] score = gameState.getScore() # a) distance to food distFood = float("+inf") for foodPos in foodList: distFood = min(distFood, util.manhattanDistance(pacPos, foodPos)) # b) nb food left foodLeft = len(foodList) if foodLeft > 0: foodFactor = (1 / foodLeft) * 100 else: foodFactor = 100 # c) ghost factor safeDist = util.manhattanDistance(pacPos, ghostPos) # d) lose factor loseFactor, winFactor = 0, 0 if gameState.isLose(): loseFactor = 150 elif gameState.isWin(): winFactor = 150 return foodFactor - distFood * 8 + safeDist - loseFactor + winFactor
def _get_evidence(self, state): """ Computes noisy distances between pacman and ghosts. Arguments: ---------- - `state`: The current game state s_t where 't' is the current time step. See FAQ and class `pacman.GameState`. Return: ------- - A list of Z noised distances in real numbers where Z is the number of ghosts. XXX: DO NOT MODIFY THIS FUNCTION !!! Doing so will result in a 0 grade. """ positions = state.getGhostPositions() pacman_position = state.getPacmanPosition() noisy_distances = [] for p in positions: true_distance = util.manhattanDistance(p, pacman_position) noisy_distances.append( np.random.normal(loc=true_distance, scale=np.sqrt(self.sensor_variance))) return noisy_distances
def _record_metrics(self, belief_states, state): """ Use this function to record your metrics related to true and belief states. Won't be part of specification grading. Arguments: ---------- - `state`: The current game state s_t where 't' is the current time step. See FAQ and class `pacman.GameState`. - `belief_states`: A list of Z N*M numpy matrices of probabilities where N and M are respectively width and height of the maze layout and Z is the number of ghosts. N.B. : [0,0] is the bottom left corner of the maze """ num = "16" mon_fichier = open("damso" + str(num) + ".txt", "a") b = np.where(belief_states[0].max() == belief_states[0]) B = (b[0][0], b[1][0]) g = state.getGhostPosition(1) d = util.manhattanDistance(B, g) mon_fichier.write( str(belief_states.var() * 100) + "\t" + str(d) + "\n") mon_fichier.close() pass
def cost_way(self, food, pacmanPos, foodSet, prevCost): """ arg: -Food: boolean argument, true if there is food. -PacmanPos: position of pacman -FoodSet: set containing the position of the food dots -PrevCost: contain the previous cost. Calculate the cost as the sum of the largest Manhattan distance and the cost of the edge. return: -The global cost of the edge. """ cost = prevCost maxMD = 0 for elmt in foodSet: MD = manhattanDistance(elmt, pacmanPos) if MD > maxMD: maxMD = MD if food: cost = cost + maxMD + 1 else: cost = cost + maxMD + 10 return cost
def heuristic(self, state): """ Given a pacman game state, returns the cost associated with it. The cost is the Manhattan distance to the furthest dot. Arguments: ---------- - `state`: the current game state. See FAQ and class `pacman.GameState`. Return: ------- - The estimated cost associated with the state. """ food = state.getFood() position = state.getPacmanPosition() max = 0 for x in range(food.width): for y in range(food.height): if food[x][y] is True: distance = manhattanDistance([x, y], position) if distance > max: max = distance return max
def min_distance_food(self, state): """ Calculates the minimum distance between Pacman and the nearest dot. Arguments: ---------- - `state`: the current game state. Return: ------- - The minimum distance between Pacman and the nearest dot. """ distance_min = INFINITY pacman_pos = state.getPacmanPosition() food = state.getFood() position = [-1, -1] for x in food: position[0] += 1 position[1] = -1 for y in x: position[1] += 1 if y: distance = manhattanDistance(pacman_pos, position) if distance < distance_min: distance_min = distance return distance_min
def _evalFct6(self, gameState): # depth = 3 # small_adv: dumby, greedy, smarty gagne, opti # medium_adv: # - dumby: boucle infinie # - greedy: gagne pas opti ou suicidaire sur la fin, opti 1 fois # - smarty: suicidaire sur la fin mais a déjà gagné 1 fois opti # large_adv: # - dumby: gagne pas opti # - greedy: commence bien (sauf la 1ère) puis suicide ou bien # suicide par manque de prévoyance # - smarty: commence bien (sauf la 1ère) puis suicide ou bien # suicide par manque de prévoyance pacPos = gameState.getPacmanPosition() foodList = gameState.getFood().asList() ghostPos = gameState.getGhostPositions()[0] score = gameState.getScore() if len(foodList) == 0: return score dist = float("+inf") for foodPos in foodList: dist = min(dist, util.manhattanDistance(pacPos, foodPos)) print(dist) return score - dist
def distClosestFood(pos, foodList): if len(foodList) == 0: return 1 dist = float("+inf") for foodPos in foodList: dist = min(dist, util.manhattanDistance(pos, foodPos)) return dist
def _get_evidence(self, state): """ Computes noisy distances between pacman and ghosts. Arguments: ---------- - `state`: The current game state s_t where 't' is the current time step. See FAQ and class `pacman.GameState`. Return: ------- - A list of Z noised distances in real numbers where Z is the number of ghosts. XXX: DO NOT MODIFY THIS FUNCTION !!! Doing so will result in a 0 grade. """ positions = state.getGhostPositions() pacman_position = state.getPacmanPosition() noisy_distances = [] for pos in positions: true_distance = util.manhattanDistance(pos, pacman_position) noise = binom.rvs(self.n, self.p) - self.n*self.p noisy_distances.append(true_distance + noise) return noisy_distances
def _record_metrics(self, belief_states, state): """ Use this function to record your metrics related to true and belief states. Won't be part of specification grading. Arguments: ---------- - `state`: The current game state s_t where 't' is the current time step. See FAQ and class `pacman.GameState`. - `belief_states`: A list of Z N*M numpy matrices of probabilities where N and M are respectively width and height of the maze layout and Z is the number of ghosts. N.B. : [0,0] is the bottom left corner of the maze """ if self.metrics is 0: self.metrics = np.zeros((len(belief_states), 10000)) true_ghost_position = state.getGhostPosition(1) for k in range(len(belief_states)): belief_state = belief_states[k] self.metrics[k, self.counter % 10000] = util.manhattanDistance( true_ghost_position, np.unravel_index(belief_state.argmax(), belief_state.shape)) if self.counter > 10000: print(np.mean(self.metrics[0]))
def eval(state): """ Returns a score that defines the final numeric value for a game that ends for Min or Max. Arguments: ---------- - `state`: the current game state. Return: ------- - A floating-point object that defines the final numeric value for a game that ends for Min or Max. """ if manhattanDistance(state.getPacmanPosition(), state.getGhostPosition(1)) < 2: return float("-inf") return -0.5 * state.getNumFood() + 0.5 * manhattanDistance( state.getPacmanPosition(), state.getGhostPosition(1))
def score_evaluation_function(self, current_game_state): food_list = current_game_state.getFood().asList() pacman_pos = current_game_state.getPacmanPosition() food_nbr = len(food_list) nearfooddist = 0 if [util.manhattanDistance(pacman_pos, xy2) for xy2 in food_list]: food_dist_list = [util.manhattanDistance(current_game_state.getPacmanPosition(), xy2) for xy2 in food_list] nearfooddist = min(food_dist_list) ghost_pos = current_game_state.getGhostPosition(self.ghostIndex) ghost_dist = 0 if util.manhattanDistance(pacman_pos, ghost_pos) == 0: ghost_dist = self.infinity else: ghost_dist = util.manhattanDistance(pacman_pos, ghost_pos) return - food_nbr * 10 - nearfooddist * 2 + current_game_state.getScore() - ghost_dist
def scoreEvaluationFunction(self, currentGameState): foodlist = currentGameState.getFood().asList() pos = currentGameState.getPacmanPosition() numfood = currentGameState.getNumFood() nearfooddist = 0 if [util.manhattanDistance(pos, xy2) for xy2 in foodlist]: list = [util.manhattanDistance(currentGameState.getPacmanPosition(), xy2) for xy2 in foodlist] nearfooddist = min(list) posghost = currentGameState.getGhostPosition(1) if util.manhattanDistance(pos, posghost) == 0: distghost = -99 else: distghost = util.manhattanDistance(pos, posghost) evfun = currentGameState.getScore() - nearfooddist * 1.5 - numfood * 10 return evfun
def scoreEvaluationFunction(self, current_game_state): foodlist = current_game_state.getFood().asList() pos = current_game_state.getPacmanPosition() numfood = current_game_state.getNumFood() nearfooddist = 0 if [util.manhattanDistance(pos, xy2) for xy2 in foodlist]: foodlist = [util.manhattanDistance(current_game_state.getPacmanPosition(), xy2) for xy2 in foodlist] nearfooddist = min(foodlist) posghost = current_game_state.getGhostPosition(1) distghost = 0 if util.manhattanDistance(pos, posghost) == 0: distghost = 1e80 else: distghost = util.manhattanDistance(pos, posghost) evfun = - numfood * 10 - nearfooddist * 2 + current_game_state.getScore() - (distghost) # evfun = self.heuristic(current_game_state) return evfun
def get_distance(self, successor): successorPos = successor.getPacmanPosition() successorFood = successor.getFood().asList() dist = [] for i in successorFood: dist.append(manhattanDistance(successorPos, i)) # dist.append(distance.correlation(successorPos, i)) # dist.append(distance.cosine(successorPos, i)) # return min(dist or [0]) return mean(dist or [0])
def _evalFct5(self, gameState): pacPos = gameState.getPacmanPosition() foodList = gameState.getFood().asList() ghostPos = gameState.getGhostPositions()[0] score = gameState.getScore() if len(foodList) == 0: return 1 dist = float("+inf") for foodPos in foodList: dist = min(dist, util.manhattanDistance(pacPos, foodPos)) return score - dist
def __eval_state(self, state): """Returns a custum utility value of the state. Arguments: ---------- - `state`: the current game state. Returns: ---------- The custom utility value at a given state. """ current_score = state.getScore() pacman_pos = state.getPacmanPosition() ghost_list = state.getGhostPositions() food_list = state.getFood().asList() # If pacman wins the game if state.isWin() or state.isLose(): return current_score # Get the distance from pacman to the closest food food_dist = [ manhattanDistance(pacman_pos, food_pos) for food_pos in food_list ] closest_food = min(food_dist) # Get the distances from pacman to the closest ghost ghost_dist = [ manhattanDistance(pacman_pos, ghost_pos) for ghost_pos in ghost_list ] closest_ghost = min(ghost_dist) # Get the number of foods left nb_foods_left = state.getNumFood() # Compute score score = (1 * current_score - 1.5 * (1. / closest_ghost) - 1.5 * closest_food - 6 * nb_foods_left) return score
def eval(state, grid): """ Returns a score that defines the final numeric value for a game that ends for Min or Max. Arguments: ---------- - `state`: the current game state. - `grid`: the current state's boolean grid of the position of the dots. Return: ------- - A floating-point object that defines the final numeric value for a game that ends for Min or Max. """ if manhattanDistance(state.getPacmanPosition(), state.getGhostPosition(1)) < 1: return float("-inf") if manhattanDistance(state.getPacmanPosition(), state.getGhostPosition(1)) < 3: return 0.75 * -(closestDotDistance( state, grid)) + 0.25 * manhattanDistance(state.getPacmanPosition(), state.getGhostPosition(1)) else: return -(closestDotDistance(state, grid))
def scoreEvaluationFunction(self, current_game_state): foodlist = current_game_state.getFood().asList() pos = current_game_state.getPacmanPosition() numfood = current_game_state.getNumFood() nearfooddist = 0 if [util.manhattanDistance(pos, xy2) for xy2 in foodlist]: foodlist = [ util.manhattanDistance(current_game_state.getPacmanPosition(), xy2) for xy2 in foodlist ] nearfooddist = min(foodlist) posghost = current_game_state.getGhostPosition(1) distghost = 0 if util.manhattanDistance(pos, posghost) == 0: distghost = 0.000000000001 else: distghost = util.manhattanDistance(pos, posghost) print("dist" + str(2. / distghost)) evfun = -numfood * 4. - nearfooddist * 1.5 + current_game_state.getScore( ) - (2. / distghost) return evfun
def heuristicFunction(self, state): """ This function computes the path cost between a node at a state and a goal node. """ foodMatrix = state.getFood() position = state.getPacmanPosition() heuristics = set() for x in range(0, foodMatrix.width): for y in range(0, foodMatrix.height): if foodMatrix[x][y] is True: manhattan = manhattanDistance(position, (x, y)) heuristics.add(manhattan) if not heuristics: return 0 return min(heuristics)
def heuristic(self, state): foods = [] for i in range(0, state.getFood().width - 1): for j in range(0, state.getFood().height - 1): if state.getFood().data[i][j]: foods.append((i, j)) node = state.getPacmanPosition() heuristic = 0 while foods: distance, food = min([(util.manhattanDistance(node, food), food) for food in foods]) heuristic += distance node = food foods.remove(food) return heuristic
def h(currentState): """ Returns the mananhattan Distance between a state and the nearest dot Arguments: ---------- -`currentState`(GameState): the current game state. Return: ------- - Manhattan distance between initial state and the furthest dot. """ posOfFood = currentState.getFood().asList() if len(posOfFood) == 0 : return 0 distances = [manhattanDistance(position, currentState.getPacmanPosition()) for position in posOfFood] return max(distances)
def h(state): """ Returns the value of the admissible heuristic for the current state. Arguments: ---------- - `state`: the current game state. See FAQ and class `pacman.GameState`. Return: ------- - A positive value representing the admissible heuristic for the current state, The admissible heuristic chosen is the mean distance between the current position and the foods position """ food_list = state.getFood().asList() if len(food_list) == 0: return 0 current_position = state.getPacmanPosition() result = 0 for food_position in food_list: result = result + manhattanDistance(current_position, food_position) return result / len(food_list)
def closestDotDistance(state, grid): """ Search the closest euclidean distance between the pacman and the dots. Arguments: ---------- - `state`: the current game state. - `grid`: the current state's boolean grid of the position of the dots. Return: ------- - A floatinf-point value that is the closest distance. """ distances = [ manhattanDistance(pos, state.getPacmanPosition()) for pos in grid.asList() ] i = distances.index(min(distances)) # index of the closest dot return (grid.asList()[i][0] - state.getPacmanPosition()[0])**2 + ( grid.asList()[i][1] - state.getPacmanPosition()[1])**2
def _evalFct6(self, gameState): try: algo = LeeAlgo(gameState) pacPos = gameState.getPacmanPosition() ghostPos = gameState.getGhostPosition(1) food = gameState.getFood().asList() score = gameState.getScore() # a) win factor if gameState.isWin(): score += 1000 # b) lose factor: if gameState.isLose(): score -= 4000 # c) distance to food factor minFoodDist = float("inf") for foodPos in food: try: tmp = algo.mazeDistance(pacPos, foodPos) minFoodDist = min(minFoodDist, tmp) except Exception as e: print("Line 240: {}".format(e)) score -= minFoodDist * 50 algo.minMazeDistance(pacPos, food) # d) number food left factor: if len(food) > 0: score += ((1 / len(food)) * 300) # d) distance to ghost factor: if util.manhattanDistance(pacPos, ghostPos) < 2: # runaway: score -= 800 return score except Exception as e: print("Error is in the utilFct6: {}".format(e)) return 0
def min_distance_ghost(self, state): """ Calculates the minimum distance between Pacman and the nearest ghost. Arguments: ---------- - `state`: the current game state. Return: ------- - The minimum distance between Pacman and the nearest ghost. """ distance_min = INFINITY pacman_pos = state.getPacmanPosition() for ghost_pos in state.getGhostPositions(): distance = manhattanDistance(pacman_pos, ghost_pos) if distance < distance_min: distance_min = distance return distance_min
def heuristic(self, state, goal): heuristic = util.manhattanDistance(state.getPacmanPosition(), goal) return heuristic
def dist2Ghost(pos, ghostPosition): return util.manhattanDistance(pos, ghostPosition)