def registerInitialState(self, state): "This method is called before any moves are made." "*** YOUR CODE HERE ***" walls = state.getWalls() top, right = walls.height-2, walls.width-2 self.top, self.right = top, right self.corners = ((1,1), (1,top), (right, 1), (right, top)) corners_path = [((mazeDistance(state.getPacmanPosition(), c, state), c)) for c in self.corners] prob = PositionSearchProblem(state, start=state.getPacmanPosition(), goal=min(corners_path)[1], warn=False) self.moves = search.bfs(prob) foodGrid = state.getFood() # walls = state.getWalls() # start = state.getPacmanPosition() mcdonalds = [] for x, row in enumerate(foodGrid): for y, cell in enumerate(row): if foodGrid[x][y]: distance = mazeDistance(state.getPacmanPosition(), (x,y), state) #distance = find_manhattan_distance(state.getPacmanPosition(), (x,y)) if mcdonalds: coordinate = min(mcdonalds)[1] prob = PositionSearchProblem(state, start=start, goal=coordinate, warn=False) self.moves = search.bfs(prob) return self.moves = []
def foodHeuristic(state, problem): """ Your heuristic for the FoodSearchProblem goes here. This heuristic must be consistent to ensure correctness. First, try to come up with an admissible heuristic; almost all admissible heuristics will be consistent as well. If using A* ever finds a solution that is worse uniform cost search finds, your heuristic is *not* consistent, and probably not admissible! On the other hand, inadmissible or inconsistent heuristics may find optimal solutions, so be careful. The state is a tuple ( pacmanPosition, foodGrid ) where foodGrid is a Grid (see game.py) of either True or False. You can call foodGrid.asList() to get a list of food coordinates instead. If you want access to info like walls, capsules, etc., you can query the problem. For example, problem.walls gives you a Grid of where the walls are. If you want to *store* information to be reused in other calls to the heuristic, there is a dictionary called problem.heuristicInfo that you can use. For example, if you only want to count the walls once and store that value, try: problem.heuristicInfo['wallCount'] = problem.walls.count() Subsequent calls to this heuristic can access problem.heuristicInfo['wallCount'] """ position, foodGrid = state "*** YOUR CODE HERE ***" distances_x =[] distances_manhattan = [] distances_y = [] maxes =[] global farthest_Coordinate for coordinate in foodGrid.asList(): distances_x.append((abs(position[0] - coordinate[0]))) distances_y.append(abs(position[1] - coordinate[1])) distances_manhattan.append((abs(position[0] - coordinate[0])) + (abs(position[1] - coordinate[1]))) for coordinate in foodGrid.asList(): if max(distances_manhattan) == (abs(position[0] - coordinate[0])) + (abs(position[1] - coordinate[1])): farthest_Coordinate = coordinate if len(foodGrid.asList()) < 3: prob = PositionSearchProblem(problem.startingGameState, start=position, goal=farthest_Coordinate, warn=False) if ((search.bfs(prob))) != None: maxes.append(len(search.bfs(prob))) elif len(foodGrid.asList()) > 9: prob = PositionSearchProblem(problem.startingGameState, start=position, goal=farthest_Coordinate, warn=False) if ((search.bfs(prob))) != None: maxes.append(len(search.bfs(prob))) if len(distances_x) == 0: return 0 maxes.append(max(distances_manhattan)) maxes.append((max(distances_x)+max(distances_y))) maxes.append(len(foodGrid.asList())) return max(maxes)
def maze(point1, point2): x1, y1 = point1 x2, y2 = point2 assert not walls[x1][y1], 'point1 is a wall: ' + point1 assert not walls[x2][y2], 'point2 is a wall: ' + str(point2) prob = PositionSearchProblem(problem.startingGameState, start=point1, goal=point2, warn=False) return len(search.bfs(prob))
def once(self, state): if not util.packet_queue.empty(): return player = state.me() self.tryPutBomb(state, player) safe_map = state.game_map.safeMap() playerPos = util.coordToPos(player.x, player.y) gridX, gridY = util.posToGrid(playerPos) if safe_map[gridX][gridY]: return def __internal_safe(pos): gridX, gridY = util.posToGrid(pos) return safe_map[gridX][gridY] actions = search.bfs(state.game_map, playerPos, __internal_safe) move = actions[0] if state.moveValidForMe(actions[0]): self.goMove(player, move) else: # If unable to go to specified pos now, go to current center first centerX, centerY = util.posToCoord(playerPos) dx, dy = (centerX - player.x, centerY - player.y) self.goMove(player, Direction.byDistance(dx, dy))
def cornersHeuristic(state, problem): """ A heuristic for the CornersProblem that you defined. state: The current search state (a data structure you chose in your search problem) problem: The CornersProblem instance for this layout. This function should always return a number that is a lower bound on the shortest path from the state to a goal of the problem; i.e. it should be admissible (as well as consistent). """ #corners = problem.corners # These are the corner coordinates #walls = problem.walls # These are the walls of the maze, as a Grid (game.py) xy1 = state[0] distance = [] for s in state[1]: xy2 = s xyxy = xy1[0],xy1[1],xy2[0],xy2[1] if xyxy in problem.heuristicInfo.keys(): distance.append(problem.heuristicInfo[xyxy]) else: prob = PositionSearchProblem(problem.state, start=xy1, goal=xy2, warn=False, visualize=False) d = len(search.bfs(prob)) problem.heuristicInfo.update({xyxy:d}) distance.append(d) distance.sort()
def mazeDistance(point1, point2, gameState): """ Returns the maze distance between any two points, using the search functions you have already built. The gameState can be any game state -- Pacman's position in that state is ignored. Example usage: mazeDistance( (2,4), (5,6), gameState) This might be a useful helper function for your ApproximateSearchAgent. """ x1, y1 = point1 x2, y2 = point2 walls = gameState.getWalls() assert not walls[x1][y1], 'point1 is a wall: ' + point1 assert not walls[x2][y2], 'point2 is a wall: ' + str(point2) prob = PositionSearchProblem(gameState, start=point1, goal=point2, warn=False, visualize=False) return len(search.bfs(prob))
def getAction(self, state): """ From game.py: The Agent will receive a GameState and must return an action from Directions.{North, South, East, West, Stop} """ if len(self.answer) > 0: answer = self.answer[0] self.answer = self.answer[1:] return answer else: self.time = 1 if state.getFood().count() <= 20 and self.time == 1: problem = FoodSearchProblem(state) self.answer = search.aStarSearch(problem, foodHeuristic) answer = self.answer[0] self.answer = self.answer[1:] return answer problem = AnyFoodSearchProblem(state) self.answer = search.bfs(problem) answer = self.answer[0] self.answer = self.answer[1:] return answer
def mazeDistance(self,point1,point2): for food in self.getFood(): if food not in self.myWalls: prob = PositionSearchProblem(self, start=point1, goal=point2, warn=False) dist = len(search.bfs(prob)) self.Queue.push(food,dist) self.min = self.min if self.min<dist else dist
def findPathToClosestDot(self, gameState): """ Returns a path (a list of actions) to the closest dot, starting from gameState. """ # Here are some useful elements of the startState return search.bfs(AnyFoodSearchProblem(gameState))
def getAction(self, state): """ From game.py: The Agent will receive a GameState and must return an action from Directions.{North, South, East, West, Stop} """ "*** YOUR CODE HERE ***" if not self.moves: currx, curry = state.getPacmanPosition() walls = state.getWalls() mcdonalds = [] foodGrid = state.getFood() for i in range(currx-2, currx+1): for j in range(curry-2, curry+1): if i >=0 and j>= 0 and i <= self.right and j <= self.top and foodGrid[i][j] and not walls[i][j]: score = find_manhattan_distance(state.getPacmanPosition(), (i, j)), 0, (i, j) mcdonalds.append(score) if not mcdonalds: for x, row in enumerate(foodGrid): for y, cell in enumerate(row): if foodGrid[x][y]: score = mazeDistance(state.getPacmanPosition(), (x,y), state), self.adjacentDots(state, x,y), (x, y) mcdonalds.append(score) if mcdonalds: coordinate = min(mcdonalds)[2] prob = PositionSearchProblem(state, start=state.getPacmanPosition(), goal=coordinate, warn=False) self.moves.extend(search.bfs(prob)) a = self.moves.pop(0) return a
def findPathToClosestDot(self, gameState): """ Returns a path (a list of actions) to the closest dot, starting from gameState. """ # Here are some useful elements of the startState startPosition = gameState.getPacmanPosition() food = gameState.getFood() walls = gameState.getWalls() problem = AnyFoodSearchProblem(gameState) actions = search.bfs(problem) return actions "*** YOUR CODE HERE ***" util.raiseNotDefined()
def cornersHeuristic(state, problem): """ A heuristic for the CornersProblem that you defined. state: The current search state (a data structure you chose in your search problem) problem: The CornersProblem instance for this layout. This function should always return a number that is a lower bound on the shortest path from the state to a goal of the problem; i.e. it should be admissible (as well as consistent). """ corners = problem.corners # These are the corner coordinates walls = problem.walls # These are the walls of the maze, as a Grid (game.py) "*** YOUR CODE HERE ***" #Manhatten/Euclid to closest unreached corner Manhattens = [] for i, corner in enumerate(corners): if not state[1][i]: #Manhattens.append(abs(state[0][0] - corner[0]) + abs(state[0][1] - corner[1])) #Manhattens.append(((state[0][0] - corner[0]) ** 2 + (state[0][1] - corner[1]) **2 )** 0.5) x1, y1 = state[0] x2, y2 = corner #assert not walls[x1][y1], 'point1 is a wall: ' + state[0] #assert not walls[x2][y2], 'point2 is a wall: ' + str(corner) prob = PositionSearchProblem(problem.startgameState, start=state[0], goal=corner, warn=False, visualize=False) Manhattens.append(len(search.bfs(prob))) if len(Manhattens) == 0: Manhattens.append(0) return Manhattens[0]
def findPathToClosestDot(self, gameState): "Returns a path (a list of actions) to the closest dot, starting from gameState" # Here are some useful elements of the startState pacman_position = gameState.getPacmanPosition() food_grid = gameState.getFood() walls = gameState.getWalls() problem = AnyFoodSearchProblem(gameState) food_list = food_grid.asList() closest_food_distance = sys.maxint closest_food = None for food in food_list: food_distance = abs(pacman_position[0] - food[0]) + abs(pacman_position[1] - food[1]) if food_distance < closest_food_distance: closest_food_distance = food_distance closest_food = food point1 = pacman_position point2 = closest_food x1, y1 = point1 x2, y2 = point2 walls = gameState.getWalls() assert not walls[x1][y1], 'point1 is a wall: ' + point1 assert not walls[x2][y2], 'point2 is a wall: ' + str(point2) prob = PositionSearchProblem(gameState, start=point1, goal=point2, warn=False, visualize=False) return search.bfs(prob)
def getAction(self, state): """ From game.py: The Agent will receive a GameState and must return an action from Directions.{North, South, East, West, Stop} """ "*** YOUR CODE HERE ***" if self.foodPos == state.getPacmanPosition(): offset = 5 foodcount = [] while len(foodcount) == 0: for food in state.getFood().asList(): if util.manhattanDistance(state.getPacmanPosition(), food) < offset: foodcount.append(food) offset += 2 maze = [] for food in foodcount: point1, point2 = state.getPacmanPosition(), food x1, y1 = point1 x2, y2 = point2 walls = state.getWalls() assert not walls[x1][y1], 'point1 is a wall: ' + point1 assert not walls[x2][y2], 'point2 is a wall: ' + str(point2) prob = PositionSearchProblem(state, start=point1, goal=point2, warn=False, visualize=False) self.nextPos = util.Queue() searchp = search.bfs(prob) maze.append((len(searchp), searchp, food)) mini = min(maze) self.foodPos = mini[2] for direction in mini[1]: self.nextPos.push(direction) return self.nextPos.pop()
def findPathToClosestDot(self, gameState): """ Returns a path (a list of actions) to the closest dot, starting from gameState. """ # Here are some useful elements of the startState position = gameState.getPacmanPosition() food = gameState.getFood() walls = gameState.getWalls() problem = AnyFoodSearchProblem(gameState) foodList = food.asList() "*** YOUR CODE HERE ***" closestDist = 999999 closestFood = None for food in foodList: dist = ((position[0] - food[0])**2 + (position[1] - food[1])**2)**0.5 if dist < closestDist : closestDist = dist closestFood = food problem.goal = closestFood return search.bfs(problem)
def findPathToClosestDot(self, gameState): "Returns a path (a list of actions) to the closest dot, starting from gameState" # Here are some useful elements of the startState startPosition = gameState.getPacmanPosition() food = gameState.getFood() walls = gameState.getWalls() problem = AnyFoodSearchProblem(gameState) return search.bfs(problem)
def main(): search_type = sys.argv[1] board = sys.argv[2] if search_type == 'bfs': return bfs(EightPuzzle(board)) elif search_type == 'dfs': return dfs(EightPuzzle(board)) return ast(EightPuzzle(board, heuristic=manhattan_distance))
def betterEvaluationFunction(currentGameState): """ Your extreme ghost-hunting, pellet-nabbing, food-gobbling, unstoppable evaluation function (question 5). DESCRIPTION: there are several issues that I take into account 1.the shortest path to a food. I may use the greedy algorithm to search for all the dots, so that the shorter pacman's distance, the higher score will be. -negative -reciprocal -important 2.numbers of food left. I won't explain too much as it's obvious. -negative -normal -in standard score 3.the utility caring the distance with ghost. I consider the ghost 2 grid away from me is safe as I won't care too much about a ghost that can't eat pacman within one or two steps, but when the ghost is two or one grid away, I may be cautious for they may eat me within there ability. -negative -important 4.if I've win or lose, it has the most weight -depends -most important -in standard score 5.when the ghost is safe, I may not take it into account that I will keep it away -depends -not so important -in standard score 6.states of ghost. If pacman can make the ghost into white, it may be very pleased. -positive -not so important """ if currentGameState.isWin() or currentGameState.isLose(): return currentGameState.getScore() shortestPathProblem = searchAgents.AnyFoodSearchProblem(currentGameState) shortestPathLen=len(search.bfs(shortestPathProblem)) #first parameter foodLeft=currentGameState.getNumFood() #second parameter ghostStates = currentGameState.getGhostStates() scaredTimes = [ghostState.scaredTimer for ghostState in ghostStates] position=currentGameState.getPacmanPosition() ghostThreat=0 #the third parameter for ghost in ghostStates: if scaredTimes[ghostStates.index(ghost)]<1: if util.manhattanDistance(ghost.configuration.pos,position)<=1: if util.manhattanDistance(ghost.configuration.pos,position)==0: ghostThreat+=-10000 else: ghostThreat+=-300 else: ghostThreat+=10.0/util.manhattanDistance(ghost.configuration.pos,position) else: ghostThreat+=0 newFood=currentGameState.getFood() foodAttraction=0 for i in range(newFood.width): for j in range(newFood.height): if newFood[i][j]: foodAttraction+=1.0/math.pow(util.manhattanDistance((i,j), position),2) totalScore=currentGameState.getScore() + 10*1.0/shortestPathLen + ghostThreat + foodAttraction return totalScore util.raiseNotDefined()
def on_click(): ''' This function defines the action of the 'Next' button. ''' global algo, counter, next_button, romania_problem, start, goal romania_problem = GraphProblem(start.get(), goal.get(), romania_map) if "Breadth-First Tree Search" == algo.get(): node = breadth_first_tree_search(romania_problem) if node is not None: final_path = bfts(romania_problem).solution() final_path.append(start.get()) display_final(final_path) next_button.config(state="disabled") counter += 1 elif "Depth-First Tree Search" == algo.get(): node = depth_first_tree_search(romania_problem) if node is not None: final_path = dfts(romania_problem).solution() final_path.append(start.get()) display_final(final_path) next_button.config(state="disabled") counter += 1 elif "Breadth-First Search" == algo.get(): node = breadth_first_search(romania_problem) if node is not None: final_path = bfs(romania_problem).solution() final_path.append(start.get()) display_final(final_path) next_button.config(state="disabled") counter += 1 elif "Depth-First Graph Search" == algo.get(): node = depth_first_graph_search(romania_problem) if node is not None: final_path = dfgs(romania_problem).solution() final_path.append(start.get()) display_final(final_path) next_button.config(state="disabled") counter += 1 elif "Uniform Cost Search" == algo.get(): node = uniform_cost_search(romania_problem) if node is not None: final_path = ucs(romania_problem).solution() final_path.append(start.get()) display_final(final_path) next_button.config(state="disabled") counter += 1 elif "A* - Search" == algo.get(): node = astar_search(romania_problem) if node is not None: final_path = asts(romania_problem).solution() final_path.append(start.get()) display_final(final_path) next_button.config(state="disabled") counter += 1
def findPathToClosestDot(self, gameState): """ Returns a path (a list of actions) to the closest dot, starting from gameState. """ # Here are some useful elements of the startState startPosition = gameState.getPacmanPosition() food = gameState.getFood() walls = gameState.getWalls() problem = AnyFoodSearchProblem(gameState) "*** YOUR CODE HERE ***" return search.bfs(problem) # each food is a goal, just find a optimal one
def cornersHeuristic(state, problem): corners = problem.corners # These are the corner coordinates walls = problem.walls # These are the walls of the maze, as a Grid (game.py) x,y = state[0] visitedCorners = state[1] hew = 0 for corner in corners: if corner not in visitedCorners: prob = PositionSearchProblem(problem.startingGameState, start=(x,y), goal=(corner), warn=False, visualize=False) hew = max(hew, len(search.bfs(prob))) return hew
def findPathToClosestDot(self, gameState): """ Returns a path (a list of actions) to the closest dot, starting from gameState. """ # Here are some useful elements of the startState startPosition = gameState.getPacmanPosition() food = gameState.getFood().asList() walls = gameState.getWalls() problem = AnyFoodSearchProblem(gameState) closest_food = min(food, key=lambda f: mazeDistance(startPosition, f, gameState)) prob = PositionSearchProblem(gameState, start=startPosition, goal=closest_food, warn=False, visualize=False) return search.bfs(prob)
def getAction(self, state): food = state.getFood() walls = state.getWalls() pos = state.getPacmanPosition() #print self.actions if len(self.actlist) > 0: #print self.actlist return self.actlist.pop(0) if food[pos[0]-1][pos[1]+0]: if food[pos[0]+0][pos[1]+1]: self.ignoreStack.append((pos[0]+0, pos[1]+1)) if food[pos[0]+0][pos[1]-1]: self.ignoreStack.append((pos[0]+0, pos[1]-1)) if food[pos[0]+1][pos[1]+0]: self.ignoreStack.append((pos[0]+1, pos[1]+0)) return 'West' if food[pos[0]+0][pos[1]+1]: if food[pos[0]+0][pos[1]-1]: self.ignoreStack.append((pos[0]+0, pos[1]-1)) if food[pos[0]-1][pos[1]+0]: self.ignoreStack.append((pos[0]-1, pos[1]+0)) if food[pos[0]+1][pos[1]+0]: self.ignoreStack.append((pos[0]+1, pos[1]+0)) return 'North' if food[pos[0]+0][pos[1]-1]: if food[pos[0]+0][pos[1]+1]: self.ignoreStack.append((pos[0]+0, pos[1]+1)) if food[pos[0]-1][pos[1]+0]: self.ignoreStack.append((pos[0]-1, pos[1]+0)) if food[pos[0]+1][pos[1]+0]: self.ignoreStack.append((pos[0]+1, pos[1]+0)) return 'South' if food[pos[0]+1][pos[1]+0]: if food[pos[0]+0][pos[1]+1]: self.ignoreStack.append((pos[0]+0, pos[1]+1)) if food[pos[0]+0][pos[1]-1]: self.ignoreStack.append((pos[0]+0, pos[1]-1)) if food[pos[0]-1][pos[1]+0]: self.ignoreStack.append((pos[0]-1, pos[1]+0)) return 'East' while len(self.ignoreStack) > 0: oldpos = self.ignoreStack.pop(len(self.ignoreStack)-1) if food[oldpos[0]][oldpos[1]] or food[oldpos[0]+1][oldpos[1]] or food[oldpos[0]-1][oldpos[1]] or food[oldpos[0]][oldpos[1]-1] or food[oldpos[0]][oldpos[1]+1]: prob = PositionSearchProblem(state, start=pos, goal=oldpos, warn=False) self.actlist = [] self.actlist = search.bfs(prob) return self.actlist.pop(0) return 'Stop'
def findPathToClosestDot(self, gameState): """ Returns a path (a list of actions) to the closest dot, starting from gameState. """ # Here are some useful elements of the startState startPosition = gameState.getPacmanPosition() food = gameState.getFood() walls = gameState.getWalls() problem = AnyFoodSearchProblem(gameState) "*** YOUR CODE HERE ***" #using bfs as suboptimal searching algorithm. return search.bfs(problem)
def getAction(self, state): if self.pT == 0: print self.disTime self.pT = 1 curPos = state.getPacmanPosition() if self.path[self.mark] == curPos: self.mark += 1 nextpos = self.path[self.mark] prob = PositionSearchProblem(state, start=curPos, goal=nextpos, warn=False, visualize=False) move = (search.bfs(prob))[0] self.cost += 1 print self.cost print time.time() - self.starttime return move
def findPathToClosestDot(self, gameState): """ Returns a path (a list of actions) to the closest dot, starting from gameState. """ startPosition = gameState.getPacmanPosition() food = gameState.getFood() walls = gameState.getWalls() problem = AnyFoodSearchProblem(gameState) #validDirections = bfs(problem) return bfs(problem) "*** YOUR CODE HERE ***" util.raiseNotDefined()
def findPathToClosestDot(self, gameState): "Returns a path (a list of actions) to the closest dot, starting from gameState" # Here are some useful elements of the startState startPosition = gameState.getPacmanPosition() food = gameState.getFood() walls = gameState.getWalls() problem = AnyFoodSearchProblem(gameState) #minDist=1000000 #minPellet=(-1,-1) #for pellet in food.asList(): # if mazeDistance(startPosition, pellet, gameState)>minDist: # minDist=mazeDistance(startPosition, pellet,gameState) # minPellet=pellet #print gameState #print search.bfs(problem) return search.bfs(problem)
def findPathToClosestDot(self, gameState): """ Returns a path (a list of actions) to the closest dot, starting from gameState. """ # Here are some useful elements of the startState startPosition = gameState.getPacmanPosition() food = gameState.getFood() walls = gameState.getWalls() problem = AnyFoodSearchProblem(gameState) return search.bfs(problem) # (nearestFoodDistance, nearestFood) = min( # [(util.manhattanDistance(startPosition, foodPos), foodPos) for foodPos in food.asList()]) # print walls.asList() # print food.asList() "*** YOUR CODE HERE ***"
def findPathToClosestDot(self, gameState): """ Returns a path (a list of actions) to the closest dot, starting from gameState. """ # Here are some useful elements of the startState startPosition = gameState.getPacmanPosition() food = gameState.getFood() #in grid notation/list of list walls = gameState.getWalls() problem = AnyFoodSearchProblem(gameState) "*** YOUR CODE HERE ***" #(successor, action, stepCost) #successor = (x,y) #action = String #stepCost = int ans = search.bfs(problem) return ans
def findPathToClosestDot(self, gameState): "Returns a path (a list of actions) to the closest dot, starting from gameState" # Here are some useful elements of the startState startPosition = gameState.getPacmanPosition() food = gameState.getFood() walls = gameState.getWalls() problem = AnyFoodSearchProblem(gameState) "*** YOUR CODE HERE ***" problem.startState=startPosition path=[] while not problem.food.count()==0: path.extend(search.bfs(problem)) nextPosition=(startPosition[0]-path.count('West')+path.count('East'),startPosition[1]+path.count('North')-path.count('South')) problem.food[nextPosition[0]][nextPosition[1]]=False problem.startState=nextPosition return path util.raiseNotDefined()
def mazeDistance(point1, point2, gameState): """ Regresa la distancia en pasos dentro del laberinto entre 2 puntos, usando las funciones de busqueda que ya tienes. El gameState puede ser cualquier estado del mundo La posicion de pacman en este estado es ignorada. Ejemplo de uso: mazeDistance( (2,4), (5,6), gameState) Este metodo puede ser util para la tarea 4. """ x1, y1 = point1 x2, y2 = point2 walls = gameState.getWalls() assert not walls[x1][y1], 'point1 is a wall: ' + str(point1) assert not walls[x2][y2], 'point2 is a wall: ' + str(point2) prob = PositionSearchProblem(gameState, start=point1, goal=point2, warn=False, visualize=False) return len(search.bfs(prob))
def findPathToClosestDot(self, gameState): """ Returns a path (a list of actions) to the closest dot, starting from gameState. """ # Here are some useful elements of the startState startPosition = gameState.getPacmanPosition() food = gameState.getFood() walls = gameState.getWalls() problem = AnyFoodSearchProblem(gameState) "*** YOUR CODE HERE ***" cost = 1000000000 for i in range(food.width): for j in range(food.height): if food[i][j]: path = search.bfs(problem) if len(path) < cost: cost = len(path) return path util.raiseNotDefined()
def mazeDistance(point1, point2, gameState): """ Returns the maze distance between any two points, using the search functions you have already built. The gameState can be any game state -- Pacman's position in that state is ignored. Example usage: mazeDistance( (2,4), (5,6), gameState) This might be a useful helper function for your ApproximateSearchAgent. """ x1, y1 = point1 x2, y2 = point2 walls = gameState.getWalls() assert not walls[x1][y1], 'point1 is a wall: ' + str(point1) assert not walls[x2][y2], 'point2 is a wall: ' + str(point2) prob = PositionSearchProblem(gameState, start=point1, goal=point2, warn=False, visualize=False) return len(search.bfs(prob))
def findPathToClosestDot(self, gameState): """ Returns a path (a list of actions) to the closest dot, starting from gameState. """ # Here are some useful elements of the startState startPosition = gameState.getPacmanPosition() food = gameState.getFood() walls = gameState.getWalls() problem = AnyFoodSearchProblem(gameState) foodList = food.asList() distance = float('inf') closestFoodPos = startPosition #for foodPos in foodList: # curPath = pathToDot(startPosition, foodPos, gameState) # if distance > len(curPath): # distance = len(curPath) # closestFoodPath = curPath return search.bfs(problem)
def findPath2(self, state): from game import Directions s = Directions.SOUTH w = Directions.WEST n = Directions.NORTH e = Directions.EAST originPath = [] foodMap = state.getFood() unvisited = foodMap.asList() curPos = state.getPacmanPosition() originPath.append(curPos) while len(unvisited) > 0: minDis = 999999 minMD = 999999 for pos in unvisited: # print curPos, pos t = util.manhattanDistance(curPos,pos) if t < minDis: tt = mazeDistance(curPos,pos,state) if tt < minMD: minDis = t minMD = tt nextpos = pos prob = PositionSearchProblem(state, start=curPos, goal=nextpos, warn=False, visualize=False) move = search.bfs(prob)[0] x, y = curPos if move == s: y -= 1 if move == w: x -= 1 if move == n: y += 1 if move == e: x += 1 curPos = (x,y) if curPos in unvisited: unvisited.remove(curPos) originPath.append(curPos) return originPath
def findPathToClosestDot(self, gameState): """ Returns a path (a list of actions) to the closest dot, starting from gameState. """ # Here are some useful elements of the startState startPosition = gameState.getPacmanPosition() food = gameState.getFood() walls = gameState.getWalls() problem = AnyFoodSearchProblem(gameState) "*** YOUR CODE HERE ***" # util.raiseNotDefined() startPosition = gameState.getPacmanPosition() food = gameState.getFood() walls = gameState.getWalls() problem = AnyFoodSearchProblem(gameState) x, y = startPosition path = [] mind = 99999 mfx = 99999 mfy = 99999 for idx, fx in enumerate(food): for idy, fy in enumerate(fx): if food[idx][idy]: md = mazeDistance((x, y), (idx, idy), gameState) if md < mind: mfx = idx mfy = idy mind = md prob = PositionSearchProblem(gameState, start=(x, y), goal=(mfx, mfy), warn=False, visualize=False) return search.bfs(prob)
def tick(self): if self.state and self.state.mode == LightState.RUNNING: p_loc = (self.state.pacman.x, self.state.pacman.y) # update game state if self.grid[p_loc[0]][p_loc[1]] in [o, O]: self.grid[p_loc[0]][p_loc[1]] = e path = bfs(self.grid, p_loc, self.state, [o, O]) print(path) if path != None: next_loc = path[1] # Figure out position we need to move new_msg = PacmanCommand() new_msg.dir = self._get_direction(p_loc, next_loc) self.write(new_msg.SerializeToString(), MsgType.PACMAN_COMMAND) return new_msg = PacmanCommand() new_msg.dir = PacmanCommand.STOP self.write(new_msg.SerializeToString(), MsgType.PACMAN_COMMAND)
def betterEvaluationFunction(currentGameState): """ Your extreme ghost-hunting, pellet-nabbing, food-gobbling, unstoppable evaluation function (question 5). DESCRIPTION: There are three main evaluated aspects of the game state: Current Score: The only way that changes in state as a result of actions can be accounted for, e.g. eating a ghost, eating food Food Distance: The inverse distance to the nearest food, found using bfs. If there is food nearby, we should go for it. Enemy Distance: The inverse distance to each enemy. Only non-zero if the ghost is scared, to prioritize going for it. """ pacman_pos = currentGameState.getPacmanPosition() food = currentGameState.getFood() ghost_states = currentGameState.getGhostStates() scared_times = [g.scaredTimer for g in ghost_states] anyfood = searchAgents.AnyFoodSearchProblem(currentGameState) food_distance = search.bfs(anyfood) food_distance = 1 / len(food_distance) if food_distance else 0 enemy_dist = [ manhattanDistance(pacman_pos, g.configuration.pos) for g in ghost_states ] for i, d in enumerate(enemy_dist): d = d if d != 0 else 0.00001 d = 1 / d if d < 5 or scared_times[i] != 0 else 0 d = 0 if scared_times[i] == 0 else -d enemy_dist[i] = d enemy_dist = sum(enemy_dist) score = currentGameState.getScore() return score + 0.1 * food_distance - enemy_dist
def foodHeuristic(state, problem): """ Your heuristic for the FoodSearchProblem goes here. This heuristic must be consistent to ensure correctness. First, try to come up with an admissible heuristic; almost all admissible heuristics will be consistent as well. If using A* ever finds a solution that is worse uniform cost search finds, your heuristic is *not* consistent, and probably not admissible! On the other hand, inadmissible or inconsistent heuristics may find optimal solutions, so be careful. The state is a tuple ( pacmanPosition, foodGrid ) where foodGrid is a Grid (see game.py) of either True or False. You can call foodGrid.asList() to get a list of food coordinates instead. If you want access to info like walls, capsules, etc., you can query the problem. For example, problem.walls gives you a Grid of where the walls are. If you want to *store* information to be reused in other calls to the heuristic, there is a dictionary called problem.heuristicInfo that you can use. For example, if you only want to count the walls once and store that value, try: problem.heuristicInfo['wallCount'] = problem.walls.count() Subsequent calls to this heuristic can access problem.heuristicInfo['wallCount'] """ position, foodGrid = state "*** YOUR CODE HERE ***" #We initialize the heuristic for in case there was no food #So there are not negative values distances = [0] for menjar in foodGrid.asList(): prob = PositionSearchProblem(problem.startingGameState, start=position, goal=menjar, warn=False, visualize=False) distances.append(len(search.bfs(prob))) #Return the longest path return max(distances)
def findPathToClosestDot(self, gameState): """ Returns a path (a list of actions) to the closest dot, starting from gameState. """ # Here are some useful elements of the startState startPosition = gameState.getPacmanPosition() food = gameState.getFood() walls = gameState.getWalls() problem = AnyFoodSearchProblem(gameState) "*** YOUR CODE HERE ***" unvisited = util.Queue() unvisited.push(startPosition) dotReached = [] visited = [] while not unvisited.isEmpty(): nextPosition = unvisited.pop() if problem.isGoalState(nextPosition): dotReached.append(nextPosition) else: successors = problem.getSuccessors(nextPosition) for successor in successors: if not successor[0] in visited: unvisited.push(successor[0]) visited.append(nextPosition) return min([ search.bfs( PositionSearchProblem(gameState, start=startPosition, goal=dot, warn=False, visualize=False)) for dot in dotReached ], key=len)
def findPathToClosestDot(self, gameState): """ Returns a path (a list of actions) to the closest dot, starting from gameState. """ # Here are some useful elements of the startState startPosition = gameState.getPacmanPosition() food = gameState.getFood() walls = gameState.getWalls() problem = AnyFoodSearchProblem(gameState) if problem.closestFood == None: closestFood = self.findClosestDot(gameState) problem.closestFood = closestFood if problem.closestFood != None: moves = search.bfs(problem) if bool(moves): return moves self.closestFood = None return []
def findPathToClosestDot(self, gameState): """ Returns a path (a list of actions) to the closest dot, starting from gameState. """ # Here are some useful elements of the startState startPosition = gameState.getPacmanPosition() food = gameState.getFood() walls = gameState.getWalls() problem = AnyFoodSearchProblem(gameState) "*** YOUR CODE HERE ***" food_List = food.asList() closest_dot = food_List[0] for dot in food.asList()[1:]: if util.manhattanDistance(startPosition, dot) < util.manhattanDistance( startPosition, closest_dot): closest_dot = dot prob = AnyFoodSearchProblem(gameState) return search.bfs(prob)
def do_algorithm(self): if self.treasure_set: # Can't find path to null position. if self.algorithm == "dfs": self.opponent_path = search.dfs(self.maze_grid, self.opponent_start_pos, self.treasure_pos) elif self.algorithm == "bfs": self.opponent_path = search.bfs(self.maze_grid, self.opponent_start_pos, self.treasure_pos) elif self.algorithm == "a_star": self.opponent_path = search.a_star(self.maze_grid, self.opponent_start_pos, self.treasure_pos) # Path is found if self.opponent_path is not None: self.path_started = True # Already started so disable key. # You might be surprised how important this is! self.screen.onkey(None, "s") self.start_or_continue_path() else: self.reset() # Goal unreachable.
def getAction(self, state): """ From game.py: The Agent will receive a GameState and must return an action from Directions.{North, South, East, West, Stop} """ "*** YOUR CODE HERE ***" if not self.moves: currx, curry = state.getPacmanPosition() walls = state.getWalls() mcdonalds = [] foodGrid = state.getFood() for i in range(currx - 2, currx + 1): for j in range(curry - 2, curry + 1): if i >= 0 and j >= 0 and i <= self.right and j <= self.top and foodGrid[ i][j] and not walls[i][j]: score = find_manhattan_distance( state.getPacmanPosition(), (i, j)), 0, (i, j) mcdonalds.append(score) if not mcdonalds: for x, row in enumerate(foodGrid): for y, cell in enumerate(row): if foodGrid[x][y]: score = mazeDistance(state.getPacmanPosition(), (x, y), state), self.adjacentDots( state, x, y), (x, y) mcdonalds.append(score) if mcdonalds: coordinate = min(mcdonalds)[2] prob = PositionSearchProblem(state, start=state.getPacmanPosition(), goal=coordinate, warn=False) self.moves.extend(search.bfs(prob)) a = self.moves.pop(0) return a
def findPathToClosestDot(self, gameState): """ Returns a path (a list of actions) to the closest dot, starting from gameState. """ # Here are some useful elements of the startState startPosition = gameState.getPacmanPosition() food = gameState.getFood() walls = gameState.getWalls() problem = AnyFoodSearchProblem(gameState) "*** YOUR CODE HERE ***" # util.raiseNotDefined() path = [] x, y = startPosition closeX, closeY = 100000, 100000 distance = 100000 # print(food) for foodSpot in food.asList(): # print(foodSpot) foodX, foodY = foodSpot tmp = mazeDistance((x, y), (foodX, foodY), gameState) if (tmp < distance): distance = tmp closeX = foodX closeY = foodY prob = PositionSearchProblem(gameState, start=startPosition, goal=(closeX, closeY), warn=False, visualize=False) return search.bfs(prob)
def findPathToClosestDot(self, gameState): """ Returns a path (a list of actions) to the closest dot, starting from gameState. """ # Here are some useful elements of the startState startPosition = gameState.getPacmanPosition() food = gameState.getFood() # The below code finds the closest dot considering pacmans position x1, y1 = startPosition foodlist = food.asList() # for each food position find the distance from the current state distance = [ mazeDistance((x1, y1), (x2, y2), gameState) for x2, y2 in foodlist ] # find the closest dot with minimum distance and get the coordinates. goal = foodlist[distance.index(min(distance))] # Run a breadth first search from the current position to the nearest goal and return bfs path. prob = PositionSearchProblem(gameState, start=startPosition, goal=goal, warn=False, visualize=False) return search.bfs(prob)
def findPathToClosestDot(self, gameState): """ Returns a path (a list of actions) to the closest dot, starting from gameState. """ # Here are some useful elements of the startState startPosition = gameState.getPacmanPosition() food = gameState.getFood() walls = gameState.getWalls() problem = AnyFoodSearchProblem(gameState) "*** YOUR CODE HERE ***" foodCoordinates = food.asList() distClosest = 100000 foodClosest = None for food in foodCoordinates: dist = ((startPosition[0] - food[0])**2 + (startPosition[1] - food[1])**2)**0.5 if dist < distClosest: distClosest = dist foodClosest = food problem.goal = foodClosest return search.bfs(problem)
def cornersHeuristic(state, problem): """ A heuristic for the CornersProblem that you defined. state: The current search state (a data structure you chose in your search problem) problem: The CornersProblem instance for this layout. This function should always return a number that is a lower bound on the shortest path from the state to a goal of the problem; i.e., it should be admissible. """ corners = problem.corners # These are the corner coordinates walls = problem.walls # These are the walls of the maze, as a Grid (game.py) visitedCorners = state[1] distance = [] if problem.isGoalState(state): return 0 #compute Manhattan Distance to all corners (that are not visited) and pick the least for i in corners: if i not in visitedCorners: prob = PositionSearchProblem(problem.startingGameState, start=state[0], goal=i, warn=False, visualize=False) distance.append( (len(search.bfs(prob)) * 100 + manhattanHeuristic_mod( state[0], i) + euclideanHeuristic_mod(state[0], i)) / 102) return max(distance)
def on_click(): """ This function defines the action of the 'Next' button. """ global algo, counter, next_button, romania_problem, start, goal romania_problem = GraphProblem(start.get(), goal.get(), romania_map) if "Breadth-First Tree Search" == algo.get(): node = breadth_first_tree_search(romania_problem) if node is not None: final_path = bfts(romania_problem).solution() final_path.append(start.get()) display_final(final_path) next_button.config(state="disabled") mark_target(node) draw_tree() counter += 1 elif "Depth-First Tree Search" == algo.get(): node = depth_first_tree_search(romania_problem) if node is not None: final_path = dfts(romania_problem).solution() final_path.append(start.get()) display_final(final_path) next_button.config(state="disabled") mark_target(node) draw_tree() counter += 1 elif "Breadth-First Graph Search" == algo.get(): node = breadth_first_graph_search(romania_problem) if node is not None: final_path = bfs(romania_problem).solution() final_path.append(start.get()) display_final(final_path) next_button.config(state="disabled") mark_target(node) draw_tree() counter += 1 elif "Depth-First Graph Search" == algo.get(): node = depth_first_graph_search(romania_problem) if node is not None: final_path = dfgs(romania_problem).solution() final_path.append(start.get()) display_final(final_path) next_button.config(state="disabled") mark_target(node) draw_tree() counter += 1 elif "Uniform Cost Search" == algo.get(): node = uniform_cost_search(romania_problem) if node is not None: final_path = ucs(romania_problem).solution() final_path.append(start.get()) display_final(final_path) next_button.config(state="disabled") mark_target(node) draw_tree() counter += 1 elif "A* - Search" == algo.get(): node = astar_search(romania_problem) if node is not None: final_path = asts(romania_problem).solution() final_path.append(start.get()) display_final(final_path) next_button.config(state="disabled") mark_target(node) draw_tree() counter += 1
[(1, 5), (1, 6), (2, 5)], [(2, 6), (3, 5), (3, 6)], [(3, 1), (3, 2), (4, 1), (4, 2)], [(5, 1), (6, 1), (6, 2)], [(5, 2), (5, 3), (6, 3)], [(5, 4), (4, 5), (5, 5), (6, 5)], [(4, 6), (5, 6), (6, 6), (5, 7)], [(5, 8), (6, 8), (6, 7)], [(7, 3), (8, 3), (8, 4)], [(7, 4), (7, 5), (8, 5)], [(7, 6), (7, 7), (8, 6)], ] return Layton(board=board, shapes=shapes, current=[(3, 1), (3, 2), (4, 1), (4, 2)], goal=[(1, 5), (1, 6), (2, 5), (2, 6)]) if __name__ == '__main__': start = gen_start() # Each of these take 20-30 mins. # Use the Java version which the the exact port # but finishes in <3 minutes. goal, cost, iters = bfs(start, debug=True) print('bfs2-cost: %d' % cost) print('bfs2-pops: %d' % iters) goal, cost, iters = astar(start, debug=True) print('astar2-cost: %d' % cost) print('astar2-pops: %d' % iters)
def _find_distance_of_closest_pellet(self, target_loc): return len(bfs(self.grid, target_loc, [o])) - 1
def findPathToClosestDot(self, gameState): problem = AnyFoodSearchProblem(gameState, self.index) actions = search.bfs(problem) return problem.lastTargetFood, actions
def foodHeuristic(state, problem): """ Your heuristic for the FoodSearchProblem goes here. This heuristic must be consistent to ensure correctness. First, try to come up with an admissible heuristic; almost all admissible heuristics will be consistent as well. If using A* ever finds a solution that is worse uniform cost search finds, your heuristic is *not* consistent, and probably not admissible! On the other hand, inadmissible or inconsistent heuristics may find optimal solutions, so be careful. The state is a tuple ( pacmanPosition, foodGrid ) where foodGrid is a Grid (see game.py) of either True or False. You can call foodGrid.asList() to get a list of food coordinates instead. If you want access to info like walls, capsules, etc., you can query the problem. For example, problem.walls gives you a Grid of where the walls are. If you want to *store* information to be reused in other calls to the heuristic, there is a dictionary called problem.heuristicInfo that you can use. For example, if you only want to count the walls once and store that value, try: problem.heuristicInfo['wallCount'] = problem.walls.count() Subsequent calls to this heuristic can access problem.heuristicInfo['wallCount'] """ position, foodGrid = state "*** YOUR CODE HERE ***" distances_x = [] distances_manhattan = [] distances_y = [] maxes = [] global farthest_Coordinate for coordinate in foodGrid.asList(): distances_x.append((abs(position[0] - coordinate[0]))) distances_y.append(abs(position[1] - coordinate[1])) distances_manhattan.append((abs(position[0] - coordinate[0])) + (abs(position[1] - coordinate[1]))) for coordinate in foodGrid.asList(): if max(distances_manhattan) == (abs(position[0] - coordinate[0])) + ( abs(position[1] - coordinate[1])): farthest_Coordinate = coordinate if len(foodGrid.asList()) < 3: prob = PositionSearchProblem(problem.startingGameState, start=position, goal=farthest_Coordinate, warn=False) if ((search.bfs(prob))) != None: maxes.append(len(search.bfs(prob))) elif len(foodGrid.asList()) > 9: prob = PositionSearchProblem(problem.startingGameState, start=position, goal=farthest_Coordinate, warn=False) if ((search.bfs(prob))) != None: maxes.append(len(search.bfs(prob))) if len(distances_x) == 0: return 0 maxes.append(max(distances_manhattan)) maxes.append((max(distances_x) + max(distances_y))) maxes.append(len(foodGrid.asList())) return max(maxes)
def findPathToClosestDot(self, gameState): "Returns a path (a list of actions) to the closest dot, starting from gameState" # Here are some useful elements of the startState problem = AnyFoodSearchProblem(gameState) return search.bfs(problem)
from mynode import MyNode from item import INITIAL,DIRECTION,POINT_TABLE from time import sleep,time from sys import argv # ------------------------------------------ main ----------------------------------------- if __name__ == "__main__": #SETUP game_board = board.Board(DIRECTION,INITIAL) initial_board = np.copy(game_board.board_array) initial_node = MyNode(initial_board,None,0,32) # arguments assigment method = int(argv[1]) TIME_LIMIT = int(argv[2]) if(method == 1): result_1 = bfs(initial_node,POINT_TABLE,TIME_LIMIT) elif(method == 2): result_2 = dfs(initial_node,POINT_TABLE,TIME_LIMIT) elif(method == 3): result_3 = ids(initial_node,POINT_TABLE,TIME_LIMIT) elif(method == 4): result_4 = dfs_rand(initial_node,POINT_TABLE,TIME_LIMIT) elif(method == 5): result_5 = dfs_spec(initial_node,TIME_LIMIT) else: print("\n1-) Breadth First Search\n2-) Depth First Search\n3-) Iterative Deepening Search\n4-) Depth First with Random") print("5-) Depth First with Heuristic")
def _compute_moves(self, pt1, pt2): """ Returns the moves needed to move from pt1 to pt2 on this PacMan map. """ return search.bfs(PacManMapDistanceProblem(self.walls, pt1, pt2))
import InterfazGrafica import search import utilities print("----------------Busqueda por amplitud---------------") utilities.init(search.bfs) print("----------------Costo uniforme------------") utilities.init(search.costo_uniforme) print("-----------------A*----------------------") utilities.init(search.a) InterfazGrafica.viajeChihiro(search.bfs().calculate_nodo(), "Busqueda por amplitud") InterfazGrafica.viajeChihiro(search.costo_uniforme().calculate_nodo(), "Costo uniforme") InterfazGrafica.viajeChihiro(search.a().calculate_nodo(), "A*")
# searchAgents.py
def bfsSearch(position, food): return len(search.bfs(PositionSearchProblem(problem.startingGameState, goal=food, start=position, warn=False, visualize=False)))
def getAction(self, state): """ From game.py: The Agent will receive a GameState and must return an action from Directions.{North, South, East, West, Stop} """ "*** YOUR CODE HERE ***" if self.moveIndex < len(self.moveList): result = self.moveList[self.moveIndex] self.moveIndex+=1 print("ACTION: {}".format(result)) return result else: currX, currY = state.getPacmanPosition() foodGrid = state.getFood() bestDist = float('inf') bestCoord = None walls = state.getWalls() # adjacent squares, favor moving to one side (bottom left) if isValid(currX - 1, currY, walls, foodGrid): currDist = dist(currX, currY, currX - 1, currY) if currDist < bestDist: bestDist = currDist bestCoord = (currX - 1, currY) if isValid(currX, currY - 1, walls, foodGrid): currDist = dist(currX, currY - 1, currX, currY) if currDist < bestDist: bestDist = currDist bestCoord = (currX, currY - 1) if isValid(currX - 1, currY - 1, walls, foodGrid): currDist = dist(currX, currY - 1, currX - 1, currY - 1) if currDist < bestDist: bestDist = currDist bestCoord = (currX - 1, currY - 1) # 2 layers away if isValid(currX - 2, currY, walls, foodGrid): currDist = mazeDistance(state.getPacmanPosition(), (currX - 2, currY), state)#dist(currX, currY, currX - 1, currY) if currDist < bestDist: bestDist = currDist bestCoord = (currX - 2, currY) if isValid(currX, currY - 2, walls, foodGrid): currDist = mazeDistance(state.getPacmanPosition(), (currX, currY - 2), state)#dist(currX, currY, currX - 1, currY) if currDist < bestDist: bestDist = currDist bestCoord = (currX, currY - 2) if True: if isValid(currX - 2, currY - 1, walls, foodGrid): currDist = mazeDistance(state.getPacmanPosition(), (currX - 2, currY - 1), state)#dist(currX, currY, currX - 1, currY) if currDist < bestDist: bestDist = currDist bestCoord = (currX - 2, currY - 1) if isValid(currX - 1, currY - 2, walls, foodGrid): currDist = mazeDistance(state.getPacmanPosition(), (currX - 1, currY - 2), state)#dist(currX, currY, currX - 1, currY) if currDist < bestDist: bestDist = currDist bestCoord = (currX - 1, currY - 2) if isValid(currX - 2, currY - 2, walls, foodGrid): currDist = mazeDistance(state.getPacmanPosition(), (currX - 2, currY - 2), state)#dist(currX, currY, currX - 1, currY) if currDist < bestDist: bestDist = currDist bestCoord = (currX - 2, currY - 2) if bestCoord is not None: prob = PositionSearchProblem(state, start = state.getPacmanPosition(), goal=bestCoord) self.moveList.extend(search.bfs(prob)) else: # brute force, find the best element for x, row in enumerate(foodGrid): for y, cell in enumerate(row): if foodGrid[x][y]: score = mazeDistance(state.getPacmanPosition(), (x, y), state) if score < bestDist: bestDist = score bestCoord = (x, y) prob = PositionSearchProblem(state, start = state.getPacmanPosition(), goal=bestCoord) self.moveList.extend(search.bfs(prob)) result = self.moveList[self.moveIndex] self.moveIndex+=1 return result