def findPathToClosestDot(self, gameState): "Returns a path (a list of actions) to the closest dot, starting from gameState" # Here are some useful elements of the startState startPosition = gameState.getPacmanPosition() food = gameState.getFood() walls = gameState.getWalls() problem = AnyFoodSearchProblem(gameState) "*** YOUR CODE HERE ***" """listFoodDistance = [0] for i in range(0, food.width): for j in range(0, food.height): if food[i][j] == True: #listFoodDistance.append(abs(position[0] - i) + abs(position[1] - j)) listFoodDistance.append((i,j), (mazeDistance(startPosition, (i,j), gameState))) closestFood = startPosition minDist = food.width * food.height for food in listFoodDistance: if food[1] < minDist: closestFood = food[0] minDist = food[1] search.aStarSearch() print listFoodDistance""" return search.aStarSearch(problem)
def foodHeuristic(state, problem): """ Your heuristic for the FoodSearchProblem goes here. This heuristic must be consistent to ensure correctness. First, try to come up with an admissible heuristic; almost all admissible heuristics will be consistent as well. If using A* ever finds a solution that is worse uniform cost search finds, your heuristic is *not* consistent, and probably not admissible! On the other hand, inadmissible or inconsistent heuristics may find optimal solutions, so be careful. The state is a tuple ( pacmanPosition, foodGrid ) where foodGrid is a Grid (see game.py) of either True or False. You can call foodGrid.asList() to get a list of food coordinates instead. If you want access to info like walls, capsules, etc., you can query the problem. For example, problem.walls gives you a Grid of where the walls are. If you want to *store* information to be reused in other calls to the heuristic, there is a dictionary called problem.heuristicInfo that you can use. For example, if you only want to count the walls once and store that value, try: problem.heuristicInfo['wallCount'] = problem.walls.count() Subsequent calls to this heuristic can access problem.heuristicInfo['wallCount'] """ position, foodGrid = state "*** YOUR CODE HERE ***" maxDist = 0 for food in foodGrid.asList(): prob = PositionSearchProblem(problem.startingGameState, start=position, goal=food, warn=False, visualize=False) dist = len(search.aStarSearch(prob,manhattanHeuristic)) if dist > maxDist: maxDist = dist return maxDist
def pathToClosestFood(self, gameState, cRegion, fRegion): problem = ApproximateSearchProblem(gameState, close_region=cRegion, far_region=fRegion) "*** YOUR CODE HERE ***" action = search.aStarSearch(problem, ApproximateHeuristic) print "Actions: " + str(action) return action
def getAction(self, state): """ From game.py: The Agent will receive a GameState and must return an action from Directions.{North, South, East, West, Stop} """ return search.aStarSearch(self.prob, foodHeuristic)
def getAction(self, state): """ From game.py: The Agent will receive a GameState and must return an action from Directions.{North, South, East, West, Stop} """ if len(self.answer) > 0: answer = self.answer[0] self.answer = self.answer[1:] return answer else: self.time = 1 if state.getFood().count() <= 20 and self.time == 1: problem = FoodSearchProblem(state) self.answer = search.aStarSearch(problem, foodHeuristic) answer = self.answer[0] self.answer = self.answer[1:] return answer problem = AnyFoodSearchProblem(state) self.answer = search.bfs(problem) answer = self.answer[0] self.answer = self.answer[1:] return answer
def getAction(self, state): """ From game.py: The Agent will receive a GameState and must return an action from Directions.{North, South, East, West, Stop} """ problem = FoodSearchProblem(state) return search.aStarSearch(problem, approxHeuristic)[0]
def findPathToClosestDot(self, gameState): "Returns a path (a list of actions) to the closest dot, starting from gameState" # Here are some useful elements of the startState startPosition = gameState.getPacmanPosition() food = gameState.getFood() walls = gameState.getWalls() problem = AnyFoodSearchProblem(gameState) return search.aStarSearch(problem)
def getAction(self, state): """ From game.py: The Agent will receive a GameState and must return an action from Directions.{North, South, East, West, Stop} """ "*** YOUR CODE HERE ***" problem = FoodSearchProblem(state) return search.aStarSearch(problem)
def registerInitialState(self, state): "This method is called before any moves are made." problem = CornersProblem(state) answer = search.aStarSearch(problem, cornersHeuristic) self.answer = answer self.secondAnswer = [] self.time = 0 self.initialFoodCount = state.getFood().count()
def test_tetris(ntrial=10, lookahead=1, heuristic=evaluate_state, watchGames=False, verbose=False): """ Test harness """ if lookahead < 1: print "Bad Lookahead! Please pick 1 for no lookahead, 2 for 1-piece, etc..." return else: print "Lookahead: " + str(lookahead - 1) + " pieces" if verbose: print "Verbose Printing Enabled" else: print "Verbose Printing Disabled" if watchGames: print "Game Replay Enabled" else: print "Game Replay Disabled" total_lines = [] for i in range(ntrial): problem = TetrisSearchProblem(lookahead=lookahead,verbose=verbose) current_node = None # Game loop: keep playing the game until all of the pieces are done while current_node is None or len(current_node["pieces"]) > 0: game_replay, goal_node = search.aStarSearch(problem, heuristic) current_node = goal_node if watchGames: for grid in game_replay: print_grid(grid) sleep(0.2) sleep(2) lines_cleared = 0 for j in range(len(game_replay)-1): before = max(get_height_list(game_replay[j])) after = max(get_height_list(game_replay[j+1])) if after < before: lines_cleared += before - after print "Lines cleared: " + str(lines_cleared) with open('gameLogs/trial_3'+str(i)+'_linesCleared='+str(lines_cleared)+'.txt', 'w') as fout: for g in game_replay: fout.write(str(g)) fout.write('\n') break #return # TODO: remove once we have a real goal state total_lines.append(lines_cleared) print "Lines by Game: " + str(total_lines) print "Total Lines: " + str(sum(total_lines)) + " in " + str(ntrial) + " games."
def chooseAction(self,gameState): currObs = self.getCurrentObservation() self.isPacman = currObs.getAgentState(self.index).isPacman opponents = self.getOpponents(currObs) self.visibleAgents= [] for x in opponents: self.visibleAgents += [currObs.getAgentPosition(x)] food = self.getFood(currObs) capsules = self.getCapsules(currObs) foodList= food.asList(True) foodList+=capsules defendedFood = self.getFoodYouAreDefending(currObs).asList(True) mypos = gameState.getAgentState(self.index).getPosition() #check and initialise a few variables only at the start of the game if self.first: self.allFood = len(foodList) self.first = False self.width = currObs.getWalls().width self.height= currObs.getWalls().height self.isRed = currObs.isOnRedTeam(self.index) #goal = random.choice(food.asList(True)) self.foodLeft = len(foodList) self.foodEaten = self.allFood - self.foodLeft #CHOOSE GOAL Here treshHold = self.foodLeft/3 #treshHold = 4 if self.foodEaten <=treshHold : #while foodEaten is less than 5 keep eating goal= self.closest(foodList,mypos) elif self.isPacman : #defend and return food #goal = self.closest(currObs,defendedFood,mypos) goal = self.getClosestGoal(currObs,mypos) else: #after touching base, return to eat more food self.allFood-=self.foodEaten self.foodEaten = 0 goal= self.closest(foodList,mypos) #goal = random.choice(food.asList(True)) afsp = searchAgents.AnyFoodSearchProblem(currObs,self.index,food,goal,self.visibleAgents,opponents,self.getMazeDistance) self.a = search.aStarSearch(afsp, searchAgents.manhattanHeuristic) action = None if len(self.a) != 0: action = self.a.pop(0) else: action = random.choice(gameState.getLegalActions(self.index)) return action
def findPathToClosestDot(self, gameState): """ Returns a path (a list of actions) to the closest dot, starting from gameState. """ # Here are some useful elements of the startState startPosition = gameState.getPacmanPosition() food = gameState.getFood() walls = gameState.getWalls() problem = AnyFoodSearchProblem(gameState) "*** YOUR CODE HERE ***" #util.raiseNotDefined() actions = search.aStarSearch(problem) return actions
def getAction(self, gameState): """ From game.py: The Agent will receive a GameState and must return an action from Directions.{North, South, East, West, Stop} """ "*** YOUR CODE HERE ***" startPosition = gameState.getPacmanPosition() food = gameState.getFood() walls = gameState.getWalls() problem = AnyFoodSearchProblem(gameState) actions = search.aStarSearch(problem) return actions
def registerInitialState(self, state): self.searchFunction = lambda prob: search.aStarSearch(prob, foodHeuristic) self.searchType = FoodSearchProblem self.counter = 0 self.actions = [] currentState = state while(currentState.getFood().count() > 0): nextPathSegment = self.findPathToClosestDot(currentState) # The missing piece self.actions += nextPathSegment for action in nextPathSegment: legal = currentState.getLegalActions() if action not in legal: t = (str(action), str(currentState)) raise Exception, 'findPathToClosestDot returned an illegal move: %s!\n%s' % t currentState = currentState.generateSuccessor(0, action) self.actionIndex = 0
def find_tetris(problem): """ Continues until we find a tetris """ current_node = None # Game loop: keep playing the game until all of the pieces are done while current_node is None or len(current_node["pieces"]) > 0: game_replay, goal_node = search.aStarSearch(problem, heuristic=evaluate_state) current_node = goal_node for grid in game_replay: print_grid(grid) print sleep(1) return # TODO: remove once we have a real goal state
def mazeDistanceAStar(point1, point2, gameState): """ Returns the maze distance between any two points, using the search functions you have already built. The gameState can be any game state -- Pacman's position in that state is ignored. Example usage: mazeDistance( (2,4), (5,6), gameState) This might be a useful helper function for your ApproximateSearchAgent. """ x1, y1 = point1 x2, y2 = point2 walls = gameState.getWalls() assert not walls[x1][y1], 'point1 is a wall: ' + point1 assert not walls[x2][y2], 'point2 is a wall: ' + str(point2) prob = PositionSearchProblem(gameState, start=point1, goal=point2, warn=False) return len(search.aStarSearch(prob, manhattanHeuristic))
def chooseAction(self,gameState): currObs = self.getCurrentObservation() self.position = currObs.getAgentPosition(self.index) opponents = self.getOpponents(currObs) defendedFood = self.getFoodYouAreDefending(currObs).asList(True) # if not enemies around go around # for i in opponents: # if not currObs.getAgentPosition(i): # continue for i in opponents: self.visibleAgents += [currObs.getAgentPosition(i)] if self.visibleAgents[0] == None and self.visibleAgents[1] == None: # wander around # closestFood = self.closest(defendedFood,self.position) randomFood = random.choice(defendedFood) # defendingProblem = searchAgents.AnyFoodSearchProblem(currObs, self.index , defendedFood, closestFood,self.visibleAgents,opponents) # posProb = PositionSearchProblem(currObs, costFn = lambda x: 1, closestFood, start=None, warn=True, visualize=True) print "mypos, closestFood", currObs.getAgentPosition(self.index)," " defendingProblem = PositionSearchProblem(currObs, self.index, closestFood) # goal is to kill pacman else: # opponentsDist=[] # for i in self.visibleAgents: # if i is None: # continue print "lito ",self.visibleAgents, self.position closestOpponent = self.closest(self.visibleAgents, self.position) # self.manhattanDist(self.position, i) # print "adfas", self.position, i # closestOpponent = # defendingProblem = searchAgents.AnyFoodSearchProblem(currObs, self.index , defendedFood, closestOpponent, self.visibleAgents,opponents) # defendingProblem = DefendingProblem(currObs, guardIndex, defendedFood) # posProb = PositionSearchProblem(currObs, closestOpponent) defendingProblem = PositionSearchProblem(currObs, self.index, closestOpponent) actions = search.aStarSearch(defendingProblem,searchAgents.manhattanHeuristic) return actions[0]
def findPathToClosestDot(self, gameState): """ Returns a path (a list of actions) to the closest dot, starting from gameState. """ # Here are some useful elements of the startState startPosition = gameState.getPacmanPosition() food = gameState.getFood() walls = gameState.getWalls() problem = AnyFoodSearchProblem(gameState) "*** YOUR CODE HERE ***" startPosition = gameState.getPacmanPosition() food = gameState.getFood() walls = gameState.getWalls() problem = AnyFoodSearchProblem(gameState) problem.goal = helper(startPosition, food.asList()) actions = search.aStarSearch(problem, manhattanHeuristic) return actions
def registerInitialState(self, state): """ This is the first time that the agent sees the layout of the game board. Here, we choose a path to the goal. In this phase, the agent should compute the path to the goal and store it in a local variable. state: a GameState object (pacman.py) """ if self.searchFunction == None: import sys print "No search function provided for SearchAgent" sys.exit(1) # If you wrap your solution in the timing code provided, you'll know how long the pathfinding takes. starttime = time.time() self.searchFunction=lambda x: search.aStarSearch(x, getFoodHeuristic(state)) problem = self.searchType(state) self.actions = deque(self.searchFunction(problem)) print 'Path found with total cost of %d in %.1f seconds' % (problem.getCostOfActions(self.actions), time.time() - starttime)
def test2(): import time t0 = time.time() total = 0 for piece in PIECES: b = Board(data='0,0,0,1,1,1,0,0,0,0;0,0,0,0,0,0,0,0,0,0;0,0,0,0,0,0,0,0,0,0;0,0,0,0,0,0,0,0,0,0;0,0,0,0,0,0,0,0,0,0;0,0,0,0,0,0,0,0,0,0;0,0,0,0,0,0,0,0,0,0;0,0,0,0,0,0,0,0,0,0;0,0,0,0,0,0,0,0,0,0;0,0,0,0,0,0,0,0,0,0;0,0,0,0,0,0,0,0,0,0;0,0,0,0,0,0,0,0,0,0;0,0,0,0,0,0,0,0,0,0;0,0,0,2,2,0,0,0,0,0;0,0,0,0,2,2,0,0,0,0;0,0,0,2,2,0,0,0,0,0;0,0,0,0,2,2,0,0,0,0;0,0,0,0,2,2,0,0,0,0;0,0,0,2,2,2,0,0,0,0;0,0,0,2,2,2,0,0,0,0') base_piece = Piece(piece, right_rotations=0) res = b.get_valid_positions(piece) total += len(res) for el in res: b = Board(data='0,0,0,1,1,1,0,0,0,0;0,0,0,0,0,0,0,0,0,0;0,0,0,0,0,0,0,0,0,0;0,0,0,0,0,0,0,0,0,0;0,0,0,0,0,0,0,0,0,0;0,0,0,0,0,0,0,0,0,0;0,0,0,0,0,0,0,0,0,0;0,0,0,0,0,0,0,0,0,0;0,0,0,0,0,0,0,0,0,0;0,0,0,0,0,0,0,0,0,0;0,0,0,0,0,0,0,0,0,0;0,0,0,0,0,0,0,0,0,0;0,0,0,0,0,0,0,0,0,0;0,0,0,2,2,0,0,0,0,0;0,0,0,0,2,2,0,0,0,0;0,0,0,2,2,0,0,0,0,0;0,0,0,0,2,2,0,0,0,0;0,0,0,0,2,2,0,0,0,0;0,0,0,2,2,2,0,0,0,0;0,0,0,2,2,2,0,0,0,0') b.place_piece(el[0], el[1], el[2]) start_loc = (3, -1) problem = BoardSearchProblem(b, el[0]._type, el[0].right_rotations, (el[1], el[2]), base_piece.right_rotations, start_loc) path = aStarSearch(problem, boardHeuristic) print b print path print time.time() - t0 print total
def get_path(self, start_piece, start_loc, end_piece, end_loc): piece_type = start_piece._type start_rot = start_piece.right_rotations end_rot = end_piece.right_rotations problem = BoardSearchProblem(self, piece_type, end_rot, end_loc, start_rot, start_loc) backwards_path = aStarSearch(problem, boardHeuristic) if backwards_path == 'Error': return [] path_map = {'up': 'down', 'left': 'right', 'right':'left',\ 'turnleft': 'turnright', 'turnright': 'turnleft'} path = [] for action in backwards_path[::-1]: path.append(path_map[action]) while path and path[-1] == 'down': path.pop() path.append('drop') return path
def foodHeuristic(state, problem): """ Your heuristic for the FoodSearchProblem goes here. This heuristic must be consistent to ensure correctness. First, try to come up with an admissible heuristic; almost all admissible heuristics will be consistent as well. If using A* ever finds a solution that is worse uniform cost search finds, your heuristic is *not* consistent, and probably not admissible! On the other hand, inadmissible or inconsistent heuristics may find optimal solutions, so be careful. The state is a tuple ( pacmanPosition, foodGrid ) where foodGrid is a Grid (see game.py) of either True or False. You can call foodGrid.asList() to get a list of food coordinates instead. If you want access to info like walls, capsules, etc., you can query the problem. For example, problem.walls gives you a Grid of where the walls are. If you want to *store* information to be reused in other calls to the heuristic, there is a dictionary called problem.heuristicInfo that you can use. For example, if you only want to count the walls once and store that value, try: problem.heuristicInfo['wallCount'] = problem.walls.count() Subsequent calls to this heuristic can access problem.heuristicInfo['wallCount'] """ position, foodGrid = state "*** YOUR CODE HERE ***" if 'foodCordinates' not in problem.heuristicInfo: foodCordinates = [] for i in range(foodGrid.width): for j in range(foodGrid.height): if foodGrid[i][j] is True: foodCordinates.append((i, j)) problem.heuristicInfo['foodCordinates'] = foodCordinates if 'verticalWalls' not in problem.heuristicInfo: verticalWalls = [] for i in range(problem.walls.width): start = -1 finish = -1 verticalWalls.append([]) for j in range(problem.walls.height): if problem.walls[i][j] is True: if start == -1: start = j finish += 1 elif finish > -1: verticalWalls[i].append(VerticalWall(start, start + finish)) start = -1 finish = -1 verticalWalls[i].append(VerticalWall(start, start + finish)) problem.heuristicInfo['verticalWalls'] = verticalWalls if 'horizontalWalls' not in problem.heuristicInfo: horizontalWalls = [] for i in range(problem.walls.height): start = -1 finish = -1 horizontalWalls.append([]) for j in range(problem.walls.width): if problem.walls[j][i] is True: if start == -1: start = j finish += 1 elif finish > -1: horizontalWalls[i].append( HorizontalWall(start, start + finish)) start = -1 finish = -1 horizontalWalls[i].append(HorizontalWall(start, start + finish)) problem.heuristicInfo['horizontalWalls'] = horizontalWalls # Remove eated food from foodCordinates verticalWalls = problem.heuristicInfo['verticalWalls'] horizontalWalls = problem.heuristicInfo['horizontalWalls'] foodCordinates = problem.heuristicInfo['foodCordinates'][:] eatedFood = [] for food in foodCordinates: if foodGrid[food[0]][food[1]] is False: eatedFood.append(food) for food in eatedFood: foodCordinates.remove(food) if foodCordinates is None or len(foodCordinates) == 0: return 0 if 'heuristicProblem' not in problem.heuristicInfo: foodToRemove = FoodSurrounded.foodToRemove(problem.walls, foodCordinates[:]) foodToFind = foodCordinates[:] for f in foodToRemove: if f in foodToFind: foodToFind.remove(f) heuristicProblem = FoodSubSearchProblem(position, foodToFind, verticalWalls, horizontalWalls) heuristicProblem.result = aStarSearch(heuristicProblem, lambda node, problem: len(node.leftFood)) print 'Expanded', heuristicProblem._expanded problem.heuristicInfo['heuristicProblem'] = heuristicProblem heuristicProblem = problem.heuristicInfo['heuristicProblem'] heuristicProblemResult = heuristicProblem.result[:] for eated in eatedFood: if eated in heuristicProblemResult: heuristicProblemResult.remove(eated) result = heuristicProblem.getCostOfActions(position, heuristicProblemResult) return result
def closestFoodMazeDist(gameState): from search import aStarSearch return len(aStarSearch(AnyFoodSearchProblem(gameState)))
def __init__(self, heuristic): self.searchFunction = lambda prob: search.aStarSearch(prob, heuristic) searchType = PositionSearchProblem
import time if len(sys.argv) != 1 and len(sys.argv) != 4: print("Usage: PlanningProblem.py domainName problemName heuristicName(max, sum or zero)") exit() domain = 'dwrDomain.txt' problem = 'dwrProblem.txt' heuristic = lambda x,y: 0 if len(sys.argv) == 4: domain = str(sys.argv[1]) problem = str(sys.argv[2]) if str(sys.argv[3]) == 'max': heuristic = maxLevel elif str(sys.argv[3]) == 'sum': heuristic = levelSum elif str(sys.argv[3]) == 'zero': heuristic = lambda x,y: 0 else: print("Usage: PlanningProblem.py domainName problemName heuristicName(max, sum or zero)") exit() prob = PlanningProblem(domain, problem) start = time.clock() plan = aStarSearch(prob, heuristic) elapsed = time.clock() - start if plan is not None: print("Plan found with %d actions in %.2f seconds" % (len(plan), elapsed)) else: print("Could not find a plan in %.2f seconds" % elapsed) print("Search nodes expanded: %d" % prob._expanded)
def runTest(self): print "Path result for DFS:",search.depthFirstSearch(self) print "Path result for BFS:",search.breadthFirstSearch(self) print "Path result for UCS:",search.uniformCostSearch(self) #print "Path result for A*:",search.aStarSearch(self,search.nullHeuristic) print "Path result for A* with letter heuristic:",search.aStarSearch(self,letterHeuristic)
def __init__(self,prob,heur): self.searchFunction = search.aStarSearch(prob, heur) self.searchType = FoodSearchProblem
def __init__(self): self.searchFunction = lambda prob: search.aStarSearch( prob, cornersHeuristic) self.searchType = CornersProblem
def __init__(self): self.searchFunction = lambda prob: search.aStarSearch(prob, cornersHeuristic) self.searchType = CornersProblem
"Usage: PlanningProblem.py domainName problemName heuristicName(max, sum or zero)" ) exit() domain = 'dwrDomain.txt' problem = 'dwrProblem.txt' heuristic = lambda x, y: 0 if len(sys.argv) == 4: domain = str(sys.argv[1]) problem = str(sys.argv[2]) if str(sys.argv[3]) == 'max': heuristic = maxLevel elif str(sys.argv[3]) == 'sum': heuristic = levelSum elif str(sys.argv[3]) == 'zero': heuristic = lambda x, y: 0 else: print( "Usage: PlanningProblem.py domainName problemName heuristicName(max, sum or zero)" ) exit() prob = PlanningProblem(domain, problem) start = time.clock() plan = aStarSearch(prob, heuristic) elapsed = time.clock() - start if plan is not None: print("Plan found with %d actions in %.2f seconds" % (len(plan), elapsed)) else: print("Could not find a plan in %.2f seconds" % elapsed) print("Search nodes expanded: %d" % prob._expanded)
def __init__(self): self.searchFunction = lambda prob: search.aStarSearch( prob, foodHeuristic) self.searchType = FoodSearchProblem
def foodHeuristic(state, problem): """ Your heuristic for the FoodSearchProblem goes here. This heuristic must be consistent to ensure correctness. First, try to come up with an admissible heuristic; almost all admissible heuristics will be consistent as well. If using A* ever finds a solution that is worse uniform cost search finds, your heuristic is *not* consistent, and probably not admissible! On the other hand, inadmissible or inconsistent heuristics may find optimal solutions, so be careful. The state is a tuple ( pacmanPosition, foodGrid ) where foodGrid is a Grid (see game.py) of either True or False. You can call foodGrid.asList() to get a list of food coordinates instead. If you want access to info like walls, capsules, etc., you can query the problem. For example, problem.walls gives you a Grid of where the walls are. If you want to *store* information to be reused in other calls to the heuristic, there is a dictionary called problem.heuristicInfo that you can use. For example, if you only want to count the walls once and store that value, try: problem.heuristicInfo['wallCount'] = problem.walls.count() Subsequent calls to this heuristic can access problem.heuristicInfo['wallCount'] """ position, foodGrid = state "*** YOUR CODE HERE ***" if 'foodCordinates' not in problem.heuristicInfo: foodCordinates = [] for i in range(foodGrid.width): for j in range(foodGrid.height): if foodGrid[i][j] is True: foodCordinates.append((i, j)) problem.heuristicInfo['foodCordinates'] = foodCordinates if 'verticalWalls' not in problem.heuristicInfo: verticalWalls = [] for i in range(problem.walls.width): start = -1 finish = -1 verticalWalls.append([]) for j in range(problem.walls.height): if problem.walls[i][j] is True: if start == -1: start = j finish += 1 elif finish > -1: verticalWalls[i].append(VerticalWall( start, start + finish)) start = -1 finish = -1 verticalWalls[i].append(VerticalWall(start, start + finish)) problem.heuristicInfo['verticalWalls'] = verticalWalls if 'horizontalWalls' not in problem.heuristicInfo: horizontalWalls = [] for i in range(problem.walls.height): start = -1 finish = -1 horizontalWalls.append([]) for j in range(problem.walls.width): if problem.walls[j][i] is True: if start == -1: start = j finish += 1 elif finish > -1: horizontalWalls[i].append( HorizontalWall(start, start + finish)) start = -1 finish = -1 horizontalWalls[i].append(HorizontalWall(start, start + finish)) problem.heuristicInfo['horizontalWalls'] = horizontalWalls # Remove eated food from foodCordinates verticalWalls = problem.heuristicInfo['verticalWalls'] horizontalWalls = problem.heuristicInfo['horizontalWalls'] foodCordinates = problem.heuristicInfo['foodCordinates'][:] eatedFood = [] for food in foodCordinates: if foodGrid[food[0]][food[1]] is False: eatedFood.append(food) for food in eatedFood: foodCordinates.remove(food) if foodCordinates is None or len(foodCordinates) == 0: return 0 if 'heuristicProblem' not in problem.heuristicInfo: foodToRemove = FoodSurrounded.foodToRemove(problem.walls, foodCordinates[:]) foodToFind = foodCordinates[:] for f in foodToRemove: if f in foodToFind: foodToFind.remove(f) heuristicProblem = FoodSubSearchProblem(position, foodToFind, verticalWalls, horizontalWalls) heuristicProblem.result = aStarSearch( heuristicProblem, lambda node, problem: len(node.leftFood)) print 'Expanded', heuristicProblem._expanded problem.heuristicInfo['heuristicProblem'] = heuristicProblem heuristicProblem = problem.heuristicInfo['heuristicProblem'] heuristicProblemResult = heuristicProblem.result[:] for eated in eatedFood: if eated in heuristicProblemResult: heuristicProblemResult.remove(eated) result = heuristicProblem.getCostOfActions(position, heuristicProblemResult) return result
def manhattanAStar(problem): """ A wrapper for A* that uses the Manhattan distance heuristic. """ return search.aStarSearch(problem, lambda x: manhattanDistance(x, problem.goal))
def __init__(self): self.searchFunction = lambda prob: search.aStarSearch(prob, foodHeuristic) self.searchType = FoodSearchProblem
def __init__(self, prob, heur): self.searchFunction = search.aStarSearch(prob, heur) self.searchType = FoodSearchProblem