def nodes_at_depth(problem, initial=None, depth=27, verbosity=1): """Breadth-first search to see how many unique states exist at a given distance/depth from `initial`""" if initial is None: initial = problem.goal visited = set() # set of hashable states already visited frontier = set() # set of hashable states at fringe being explored now node = Node(problem.initial) frontier.add(node) d = 0 while frontier and d < depth: if verbosity: print('='*40 + ' {0:03d}'.format(d) + ' ' + '='*40) d += 1 children = set() # set of nodes to explore next for node in frontier: hashable_state = force_hashable(node.state) visited.add(hashable_state) for child in node.expand(problem): hashable_child = force_hashable(child.state) if hashable_child not in visited: children.add(child) frontier = set(children) if verbosity > 1: print(','.join(''.join(str(i) for i in node.state) for node in frontier)) elif verbosity: print(len(children)) return frontier
def best_first_tree_search(problem, f=h_npuzzle_manhattan): """Search the nodes with the lowest f scores first. You specify the function f(node) that you want to minimize; for example, if f is a heuristic estimate to the goal, then we have greedy best first search; if f is node.depth then we have breadth-first search. There is a subtlety: the line "f = memoize(f, 'f')" means that the f values will be cached on the nodes as they are computed. So after doing a best first search you can examine the f values of the path returned.""" f = memoize(f, 'f') node = Node(problem.initial) if problem.goal_test(node): return node # frontier = PriorityQueue(min, f) # frontier.append(node) frontier = BuiltinPriorityQueue() frontier_set = set() frontier.put((f(node), node)) frontier_set.add(node) # explored = set() while frontier: print(list(frontier.queue)) # print(list(frontier)) #node0 = min((f(n[1]), n[1]) for n in frontier)[1] node = frontier.get()[1] frontier_set.discard(node) #assert(node0.state == node.state) if problem.goal_test(node): return node # explored.add(force_hashable(node.state)) for child in node.expand(problem): if child not in frontier_set: frontier.put((f(child), child)) frontier_set.add(child) # else: # incumbent = frontier[child] # if f(child) < f(incumbent): # warnings.warn("This shouldn't ever happen, child in frontier but new child has lower cost!\nchild={0}, f(child)={1}, f(incumbent)={2}".format(child, f(child), f(incumbent))) # del frontier[incumbent] # frontier.append(child) return None
def astar_search(problem, heuristic=h_npuzzle_manhattan): """Modified version of Norvig's A* graph search algorithm Allows unhashable states by converting them to hashable types inside Node Search the nodes with the lowest heuristic scores first. You specify the function heuristic(node) that you want to minimize; for example, if heuristic is a heuristic estimate to the goal, then we have greedy best first search; if heuristic is node.depth then we have breadth-first search. There is a subtlety: the line "heuristic = memoize(heuristic, 'heuristic')" means that the heuristic values will be cached on the nodes as they are computed. So after doing a best first search you can examine the heuristic values of the path returned.""" max_depth = 0 heuristic = memoize(heuristic, 'heuristic') node = Node(problem.initial) if problem.goal_test(node): return node frontier = PriorityQueue(min, heuristic) frontier.append(node) explored = set() while frontier: node = frontier.pop() if node.depth > max_depth: max_depth = node.depth print(max_depth, len(frontier)) if problem.goal_test(node): return node, explored explored.add(force_hashable(node.state)) for child in node.expand(problem): hashable_child = force_hashable(child.state) if hashable_child not in explored and child not in frontier: frontier.append(child) elif hashable_child in frontier: hashable_incumbent = frontier[hashable_child] if heuristic(hashable_child) < heuristic(hashable_incumbent): del frontier[hashable_incumbent] frontier.append(hashable_child) return None, explored