def best_first_search_tree(problem, f): """Search the nodes with the lowest f scores first. You specify the function f(node) that you want to minimize; for example, if f is a heuristic estimate to the goal, then we have greedy best first search; if f is node.depth then we have breadth-first search. There is a subtlety: the line "f = memoize(f, 'f')" means that the f values will be cached on the nodes as they are computed. So after doing a best first search you can examine the f values of the path returned.""" # print("he sido llamado") f = memoize(f, 'f') node = Node(problem.initial) frontier = PriorityQueue('min', f) frontier.append(node) # frontier.mostrar() # explored = set() while frontier: node = frontier.pop() if problem.goal_test(node.state): return node # explored.add(node.state) for child in node.expand(problem): frontier.append(child) '''if child.state not in explored and child not in frontier: frontier.append(child) elif child in frontier: if f(child) < frontier[child]: # mira si ya hay una forma de llegar q es mayor a la que encontre ahora? del frontier[child] frontier.append(child)''' return None
def astar_search(problem): node = Node(problem.initial) frontier = [] explored = set() h = problem.manhattanDist(node) g = node.path_cost f = h + g heapq.heappush(frontier, (h, node)) while frontier: node = heapq.heappop(frontier)[1] if problem.goal_test(node.state): print(node.solution()) return node explored.add(node.state) for child in node.expand(problem): h = problem.manhattanDist(child) g = node.path_cost + 1 f = g + h if child.state not in explored and child not in frontier: child.path_cost = g heapq.heappush(frontier, (f, child)) elif child in frontier and child.path_cost > g: new_child = child new_child.cost = g frontier.remove(child) heapq.heappush(frontier, (f, new_child)) return None
def simulated_annealing_plot(problem, values_for_schedule): """[Figure 4.5] CAUTION: This differs from the pseudocode as it returns a state instead of a Node.""" schedule = exp_schedule(values_for_schedule[0], values_for_schedule[1], values_for_schedule[2]) x = list() y = list() current = Node(problem.initial) for t in range(sys.maxsize): T = schedule(t) if T == 0: plt.scatter(x, y) plt.show() return current.state neighbors = current.expand(problem) if not neighbors: plt.scatter(x, y) plt.show() return current.state next_choice = random.choice(neighbors) delta_e = problem.value(next_choice.state) - problem.value( current.state) y.append(problem.value(current.state)) x.append(t) if delta_e > 0 or probability(np.exp(delta_e / T)): current = next_choice
def my_best_first_graph_search(problem, f): """ Taken from Lab 3 Search the nodes with the lowest f scores first. You specify the function f(node) that you want to minimize; for example, if f is a heuristic estimate to the goal, then we have greedy best first search; if f is node.depth then we have breadth-first search. There is a subtlety: the line "f = memoize(f, 'f')" means that the f values will be cached on the nodes as they are computed. So after doing a best first search you can examine the f values of the path returned. """ # keep a track of the number of iterations for use in evaluation iterations = 0 f = memoize(f, 'f') node = Node(problem.initial) iterations += 1 # This is the goal state if problem.goal_test(node.state): iterations += 1 return (iterations, node) # Create a priority queue that is ordered by its distance # from the distance travelled so far (g) + the straight line distance # from the new node to the goal state (h) frontier = PriorityQueue('min', f) frontier.append(node) iterations += 1 explored = set() # Loop until there is no more nodes to visit while frontier: # Get the node with minimum f(n) = g(n) + h(n) node = frontier.pop() iterations += 1 # We have reached the goal, return the solution if problem.goal_test(node.state): iterations += 1 return iterations # Mark the node as visited explored.add(node.state) # Loop over the nodes neighbours and find the next node # with minimum f(n) for child in node.expand(problem): # Only consider new nodes which havent been explored yet # and the ones which we are about to explore in the # loop if child.state not in explored and child not in frontier: frontier.append(child) iterations += 1 # Update the new distance (f(n)) for this node # if it is smaller than the previous known one elif child in frontier: incumbent = frontier[child] if f(child) < f(incumbent): del frontier[incumbent] frontier.append(child) iterations += 1 iterations += 1 return iterations
def best_first_tree_search(problem, f, display=False): # from search -- just modified to make it tree search f = memoize(f, 'f') node = Node(problem.initial) frontier = PriorityQueue('min', f) frontier.append(node) while frontier: node = frontier.pop() if problem.goal_test(node.state): return node for child in node.expand(problem): frontier.append(child) return None
def simulated_annealing(problem, schedule=exp_schedule()): current = Node(problem.initial) for t in range(sys.maxsize): T = schedule(t) if T == 0: return current.state neighbors = current.expand(problem) if not neighbors: return current.state next = random.choice(neighbors) delta_e = problem.value(current.state) - problem.value(next.state) if delta_e > 0 or probability(math.exp(delta_e / T)): current = next
def breadth_first_graph_search(problem): node = Node(problem.initial) if problem.goal_test(node.state): return node frontier = deque([node]) explored = set() while frontier: node = frontier.popleft() explored.add(node.state) for child in node.expand(problem): if child.state not in explored and child not in frontier: if problem.goal_test(child.state): print(child.solution()) return child frontier.append(child) return None
def best_first_greedy_search(problem): node = Node(problem.initial) frontier = [] explored = set() h = problem.manhattanDist(node) heapq.heappush(frontier, (h, node)) while frontier: node = heapq.heappop(frontier)[1] if problem.goal_test(node.state): print(node.solution()) return node explored.add(node.state) for child in node.expand(problem): if child.state not in explored and child not in frontier: h = problem.manhattanDist(child) heapq.heappush(frontier, (h, child)) return None
def breadth_first_search_for_vis(problem): node = Node(problem.initial) reached = [] reached.append(node.state) if problem.goal_test(node.state): return (node, reached) frontier = deque([node]) explored = set() while frontier: node = frontier.popleft() explored.add(node.state) reached.append(node.state) for child in node.expand(problem): if child.state not in explored and child not in frontier: if problem.goal_test(child.state): return (child, reached) frontier.append(child) return (failure, reached)
def __best_first_graph_search(self, problem, f): """Search the nodes with the lowest f scores first. You specify the function f(node) that you want to minimize; for example, if f is a heuristic estimate to the goal, then we have greedy best first search; if f is node.depth then we have breadth-first search. There is a subtlety: the line "f = memoize(f, 'f')" means that the f values will be cached on the nodes as they are computed. So after doing a best first search you can examine the f values of the path returned.""" f = self.__memoize(f, 'f') node = Node(problem.initial) assert node != None and node.state != None if problem.goal_test(node.state): return node frontier = PriorityQueue(min, f) frontier.append(node) explored = set() step = 0 while frontier: step+=1 node = frontier.pop() assert node != None and node.state != None, "Estratto un nodo None" #print '---- CURRENT NODE ----' #print node.state if problem.goal_test(node.state): return node, len(explored)+1 explored.add(node.state) for child in node.expand(problem): assert child != None and child.state != None if child.state not in explored and child not in frontier: frontier.append(child) elif child in frontier: incumbent = frontier[child] if f(child) < f(incumbent): del frontier[incumbent] frontier.append(child) return None
def breadth_first_count_nodes_at_depth(problem, depth=28): node = Node(problem.initial) node_count = 1 frontier = FIFOQueue() explored = set() frontier.append(node) old_depth = 1 while frontier: node = frontier.pop() explored.add(node.state) if node.depth != old_depth: old_depth = node.depth print node.depth for child in node.expand(problem): if child.state not in explored and child not in frontier: if node.depth == depth + 1: return node_count if node.depth == depth: node_count += 1 frontier.append(child) return None
def best_first_search_for_vis(problem, f): f = memoize(f, 'f') node = Node(problem.initial) frontier = PriorityQueue('min', f) frontier.append(node) explored = set() reached = [] while frontier: node = frontier.pop() reached.append(node.state) if problem.goal_test(node.state): return (node, reached) explored.add(node.state) for child in node.expand(problem): if child.state not in explored and child not in frontier: frontier.append(child) elif child in frontier: if f(child) < frontier[child]: del frontier[child] frontier.append(child) return (failure, reached)
def sarkissian_hw6_2(problem): node = Node(problem.initial) if problem.goal_test(node.state): return node # heuristic function = the node's straight line distance to the goal node + the node's path cost def h(_node): return problem.straight_line_distance(_node.state) + _node.path_cost frontier = NodePriorityQueue(h) frontier.push(node) visited = [] while len(frontier) > 0: # pop a node out of the queue (this will always be the node with the lowest hueristic) node = frontier.pop( )[1] # NodePriorityQueue.pop() returns a tuple = (heuristic(node), node) visited.append(node.state) # if that node is the goal node, return the solution if problem.goal_test(node.state): print(f"Nodes Visited: {len(visited)}") print(f"Distance Traveled: {node.path_cost} km") return [ (n.state.lower(), h(n)) for n in node.path() ] # returns a list of 2-tuples (node.state, heuristic(node)) # if that node isn't the goal node, add its children to the frontier if they haven't already been visited for child in node.expand(problem): if child.state not in visited: frontier.push(child) return "FAILED"
def grid_hill_climbing_search(problem): """From the initial node, keep choosing the neighbor with highest value, stopping when no neighbor is better. [Figure 4.2]""" # useful variables N = len(problem.grid) # y M = len(problem.grid[0]) # x G = nx.grid_2d_graph(N, M) # we use these two variables at the time of visualisations iterations = 0 all_node_colors = [] node_colors = assign_node_initial_colors(G.nodes(), problem.grid, problem) all_node_colors.append(list(node_colors)) # cache h = memoize(problem.h, 'h') node = Node(problem.initial) if problem.goal_test(node.state): #escrever paradaquando resolve node_colors[ node.state[0] * M + node.state[1]] = "orange" # current position being explored iterations += 1 all_node_colors.append(list(node_colors)) node_colors[node.state[0] * M + node.state[1]] = assign_color_by_grid_spot( node.state[0], node.state[1], problem.grid, problem ) # get back to the original color on the next iteration goal_node = node pacman_pos = problem.initial # pacman position after taking action pacman_old = problem.initial # pacman position before taking action problem.activate_pacman( ) # signal the object problem that pacman is now traversing for action in goal_node.solution( ): # set of actions to go from root to goal pacman_old = pacman_pos pacman_pos = problem.result( pacman_pos, action ) # grid automatically changed after problem.result and the action taken node_colors[pacman_pos[0] * M + pacman_pos[1]] = assign_color_by_grid_spot( pacman_pos[0], pacman_pos[1], problem.grid, problem) # current position being explored node_colors[pacman_old[0] * M + pacman_old[1]] = assign_color_by_grid_spot( pacman_old[0], pacman_old[1], problem.grid, problem) iterations += 1 all_node_colors.append(list(node_colors)) problem.deactivate_pacman() return (iterations, all_node_colors, node) while True: node_colors[ node.state[0] * M + node.state[1]] = "orange" # current position being explored iterations += 1 all_node_colors.append(list(node_colors)) node_colors[node.state[0] * M + node.state[1]] = assign_color_by_grid_spot( node.state[0], node.state[1], problem.grid, problem ) # get back to the original color on the next iteration neighbors = node.expand(problem) #No options if not neighbors: return (iterations, all_node_colors, node) #Test if any option is the goal for neighbor in neighbors: if problem.goal_test(neighbor.state): #pre node_colors[ neighbor.state[0] * M + neighbor. state[1]] = "orange" # current position being explored iterations += 1 all_node_colors.append(list(node_colors)) node_colors[ neighbor.state[0] * M + neighbor.state[1]] = assign_color_by_grid_spot( neighbor.state[0], neighbor.state[1], problem.grid, problem ) # get back to the original color on the next iteration #pos problem.activate_pacman() pacman_pos = problem.Teste(neighbor.state, node.state) node_colors[neighbor.state[0] * M + neighbor.state[1]] = assign_color_by_grid_spot( neighbor.state[0], neighbor.state[1], problem.grid, problem) # current position being explored node_colors[node.state[0] * M + node.state[1]] = assign_color_by_grid_spot( node.state[0], node.state[1], problem.grid, problem) iterations += 1 all_node_colors.append(list(node_colors)) problem.deactivate_pacman() return (iterations, all_node_colors, neighbor) #pick highest valeu action neighbor = argmin_random_tie(neighbors, key=lambda node: h(node)) if h(neighbor) >= h(node): return (iterations, all_node_colors, neighbor) problem.activate_pacman() problem.Teste(neighbor.state, node.state) node_colors[neighbor.state[0] * M + neighbor.state[1]] = assign_color_by_grid_spot( neighbor.state[0], neighbor.state[1], problem.grid, problem) # current position being explored node_colors[node.state[0] * M + node.state[1]] = assign_color_by_grid_spot( node.state[0], node.state[1], problem.grid, problem) iterations += 1 all_node_colors.append(list(node_colors)) problem.deactivate_pacman() node = neighbor return (iterations, all_node_colors, neighbor)