def uniform_cost_search(problem): """ Uniform cost search reference: page 84 Peter Norvig AI book """ color = defaultdict(lambda: WHITE) distance = defaultdict(lambda: float('inf')) parent = defaultdict(lambda: None) color[problem.initial] = GRAY distance[problem.initial] = 0 #parent[problem.initial] = None pq = CustomPriorityQueue() initial_state = (0, problem.initial) pq.add(initial_state) while pq.empty() == False: (pathcost, u) = pq.pop() children_states = problem.children(u) if problem.goal_test(u): return (pathcost, reconstruct_path(parent, problem.goal)) for (child, cost) in children_states: if color[child] == WHITE: color[child] = GRAY distance[child] = pathcost + cost pq.add((distance[child], child)) parent[child] = u elif color[child] == GRAY and distance[child] > pathcost + cost: distance[child] = pathcost + cost parent[child] = u pq.replace(child, (distance[child], child)) color[u] = BLACK
def uniform_cost_search(problem): """ Uniform cost search reference: page 84 Peter Norvig AI book """ color = defaultdict(lambda: WHITE) distance = defaultdict(lambda: float('inf')) parent = defaultdict(lambda: None) color[problem.initial] = GRAY distance[problem.initial] = 0 #parent[problem.initial] = None pq = CustomPriorityQueue() initial_state = (0,problem.initial) pq.add(initial_state) while pq.empty() == False: (pathcost,u) = pq.pop() children_states = problem.children(u) if problem.goal_test(u): return (pathcost,reconstruct_path(parent,problem.goal)) for (child,cost) in children_states: if color[child] == WHITE: color[child] = GRAY distance[child] = pathcost + cost pq.add((distance[child],child)) parent[child] = u elif color[child] == GRAY and distance[child] > pathcost + cost: distance[child] = pathcost + cost parent[child] = u pq.replace(child,(distance[child],child)) color[u] = BLACK
def a_star(problem): """ A* search reference: http://en.wikipedia.org/wiki/A*_search_algorithm """ closedset = Set() openset = Set() parent = defaultdict(lambda: None) g = {} f = {} openset.add(problem.initial) g[problem.initial] = 0 f[problem.initial] = g[problem.initial] + problem.h( problem.initial, problem.goal) pq = CustomPriorityQueue() pq.add((f[problem.initial], problem.initial)) while pq.empty() == False: (current_f, current) = pq.pop() if problem.goal_test(current): return (g[current], reconstruct_path(parent, problem.goal)) openset.remove(current) closedset.add(current) children = problem.children(current) for (child, cost) in children: tentative_g_score = g[current] + cost if child in closedset and tentative_g_score >= g[child]: continue if child not in closedset or tentative_g_score < g[ child]: # don't need to initialize for each i in g.V: g[i]=inf. The reason is # if a node x is not initilized that means there was visited. # That means it the node x is not in openset. Hence the node # x is also not in closedset parent[child] = current g[child] = tentative_g_score f[child] = g[child] + problem.h(child, problem.goal) #print "edge:",current,"-->",child,"real:",cost, "approx:",problem.h(current,child) if child not in openset: openset.add(child) pq.add((f[child], child)) elif child in openset: # Replace the priority queue f[child] value. Wiki algo didn't use a priority queue. # They choose the current node by selecting the node in openset having the lowest f[] value # Therefore they are getting the updated f values from f[]. In our case, the lowest f[] value # is taken from the priority queue. Therefore, we need to keep the most uptodate f values in # the priority queue. The replace operation is optimized according to python documentation. pq.replace(child, (f[child], child)) #print "replace" return None
def a_star(problem): """ A* search reference: http://en.wikipedia.org/wiki/A*_search_algorithm """ closedset = Set() openset = Set() parent = defaultdict(lambda: None) g = {} f = {} openset.add(problem.initial) g[problem.initial] = 0 f[problem.initial] = g[problem.initial] + problem.h(problem.initial,problem.goal) pq = CustomPriorityQueue() pq.add((f[problem.initial],problem.initial)) while pq.empty() == False: (current_f, current) = pq.pop() if problem.goal_test(current): return (g[current],reconstruct_path(parent,problem.goal)) openset.remove(current) closedset.add(current) children = problem.children(current) for (child,cost) in children: tentative_g_score = g[current] + cost if child in closedset and tentative_g_score >= g[child]: continue if child not in closedset or tentative_g_score < g[child]: # don't need to initialize for each i in g.V: g[i]=inf. The reason is # if a node x is not initilized that means there was visited. # That means it the node x is not in openset. Hence the node # x is also not in closedset parent[child] = current g[child] = tentative_g_score f[child] = g[child] + problem.h(child,problem.goal) #print "edge:",current,"-->",child,"real:",cost, "approx:",problem.h(current,child) if child not in openset: openset.add(child) pq.add((f[child],child)) elif child in openset: # Replace the priority queue f[child] value. Wiki algo didn't use a priority queue. # They choose the current node by selecting the node in openset having the lowest f[] value # Therefore they are getting the updated f values from f[]. In our case, the lowest f[] value # is taken from the priority queue. Therefore, we need to keep the most uptodate f values in # the priority queue. The replace operation is optimized according to python documentation. pq.replace(child,(f[child],child)) #print "replace" return None
def a_star_beam_search(problem, beam_size): """ A* search reference: http://en.wikipedia.org/wiki/A*_search_algorithm """ closedset = Set() openset = Set() parent = defaultdict(lambda: None) g = {} f = {} openset.add(problem.initial) g[problem.initial] = 0 f[problem.initial] = g[problem.initial] + problem.h( problem.initial, problem.goal) pq = CustomPriorityQueue() pq.add((f[problem.initial], problem.initial)) while pq.empty() == False: (current_f, current) = pq.pop() if problem.goal_test(current): #todo path return (g[current], reconstruct_path(parent, problem.goal)) openset.remove(current) closedset.add(current) children = problem.children(current) children_count = 0 for (child, cost) in children: tentative_g_score = g[current] + cost if child in closedset and tentative_g_score >= g[child]: continue if (child not in closedset or tentative_g_score < g[child] ) and children_count < beam_size: parent[child] = current g[child] = tentative_g_score f[child] = g[child] + problem.h(child, problem.goal) children_count += 1 if child not in openset: openset.add(child) pq.add((f[child], child)) elif child in openset: pq.replace(child, (f[child], child)) return None
def a_star_beam_search(problem,beam_size): """ A* search reference: http://en.wikipedia.org/wiki/A*_search_algorithm """ closedset = Set() openset = Set() parent = defaultdict(lambda: None) g = {} f = {} openset.add(problem.initial) g[problem.initial] = 0 f[problem.initial] = g[problem.initial] + problem.h(problem.initial,problem.goal) pq = CustomPriorityQueue() pq.add((f[problem.initial],problem.initial)) while pq.empty() == False: (current_f, current) = pq.pop() if problem.goal_test(current): #todo path return (g[current],reconstruct_path(parent,problem.goal)) openset.remove(current) closedset.add(current) children = problem.children(current) children_count = 0 for (child,cost) in children: tentative_g_score = g[current] + cost if child in closedset and tentative_g_score >= g[child]: continue if (child not in closedset or tentative_g_score < g[child]) and children_count < beam_size: parent[child] = current g[child] = tentative_g_score f[child] = g[child] + problem.h(child,problem.goal) children_count += 1 if child not in openset: openset.add(child) pq.add((f[child],child)) elif child in openset: pq.replace(child,(f[child],child)) return None
def search(self, start: tuple, goal: tuple, swath_dict: Swath, smooth_path: bool = True): generation = 0 # number of nodes expanded openSet = { start: generation } # point_set of nodes considered for expansion closedSet = [] cameFrom = {start: None} cameFrom_by_edge = {start: None} # cost from start g_score = {start: 0} # f score (g score + heuristic) (estimation of cost to goal) f_score = {start: self.heuristic(start, goal)} # path length between nodes path_length = {start: 0} # priority queue of all visited node f scores f_score_open_sorted = CustomPriorityQueue() f_score_open_sorted.put( (start, f_score[start])) # put item in priority queue while len(openSet) != 0: node = f_score_open_sorted.get()[0] if self.dist(node, goal) < 5 and abs(node[2] - goal[2]) < 0.01: # print("goal", goal) print("node", node) # print("Found path") # goal is not exactly the same as node, so when we search for goal (key) # in the dictionary, it has to be the same as node goal = node path = [] new_path_length = [] path.append(node) new_path_length.append(path_length[node]) while node != start: pred = cameFrom[node] node = pred path.append(node) new_path_length.append(path_length[node]) orig_path = path.copy() if smooth_path: path.reverse() # path: start -> goal new_path_length.reverse() # print("path", path) add_nodes = int( len(path) ) # number of nodes to add in the path smoothing algorithm # cap at adding 10 nodes to reduce run time add_nodes = min(add_nodes, 10) # t0 = time.clock() smooth_path, x1, y1, x2, y2 = path_smoothing( path, new_path_length, self.cmap, start, goal, self.ship, add_nodes, self.primitives.num_headings, dist_cuttoff=50) # t1 = time.clock() - t0 # print("smooth time", t1) else: smooth_path = path x1 = [] y1 = [] x2 = 0 y2 = 0 for vi in path: x1.append(vi[0]) y1.append(vi[1]) print("g_score at goal", g_score[goal]) return True, smooth_path, closedSet, x1, y1, x2, y2, orig_path openSet.pop(node) closedSet.append(node) # find the base heading (e.g. cardinal or ordinal) num_base_h = self.primitives.num_headings // 4 arr = np.asarray([(node[2] + num_base_h - h[2]) % num_base_h for h in self.primitives.edge_set_dict.keys()]) base_heading = np.argwhere(arr == 0)[0, 0] # get the edge set based on the current node heading edge_set = self.primitives.edge_set_dict[(0, 0, base_heading)] for e in edge_set: neighbour = self.concat(node, e, base_heading, self.primitives.num_headings) # print("NEIGHBOUR",neighbour) if neighbour[0] - self.ship.max_ship_length / 2 >= 0 and \ neighbour[0] + self.ship.max_ship_length / 2 <= self.chan_w and \ neighbour[1] - self.ship.max_ship_length / 2 >= 0 and \ neighbour[1] + self.ship.max_ship_length / 2 < self.chan_h: # check if point is in closed point_set neighbour_in_closed_set, closed_set_neighbour = self.is_point_in_set( neighbour, closedSet) if neighbour_in_closed_set: continue # If near obstacle, check cost map to find cost of swath if self.near_obstacle(node, self.cmap.cost_map.shape, self.cmap.obstacles, threshold=self.ship.max_ship_length * 3): swath = self.get_swath(e, node, swath_dict) if type(swath) == str and swath == "Fail": continue mask = self.cmap.cost_map[swath] swath_cost = np.sum(mask) else: swath_cost = 0 temp_path_length = self.heuristic(node, neighbour) cost = swath_cost + temp_path_length temp_g_score = g_score[node] + cost # print("cost", cost) # check if point is in open set neighbour_in_open_set, open_set_neighbour = self.is_point_in_set( neighbour, openSet) if not neighbour_in_open_set: heuristic_value = self.heuristic(neighbour, goal) openSet[neighbour] = generation cameFrom[neighbour] = node cameFrom_by_edge[neighbour] = e path_length[neighbour] = temp_path_length # heading_delta[neighbour] = abs(neighbour[2] - node[2]) g_score[neighbour] = temp_g_score f_score[neighbour] = self.g_weight * g_score[ neighbour] + self.h_weight * heuristic_value f_score_open_sorted.put( (neighbour, f_score[neighbour])) elif temp_g_score < g_score[open_set_neighbour]: open_set_neighbour_heuristic_value = self.heuristic( open_set_neighbour, goal) cameFrom[open_set_neighbour] = node cameFrom_by_edge[open_set_neighbour] = e path_length[open_set_neighbour] = temp_path_length g_score[open_set_neighbour] = temp_g_score new_f_score = self.g_weight * g_score[open_set_neighbour] + \ self.h_weight * open_set_neighbour_heuristic_value f_score_open_sorted._update( (open_set_neighbour, f_score[open_set_neighbour]), new_f_score) f_score[open_set_neighbour] = new_f_score generation += 1 print("\nFail") return False, 'Fail', 'Fail', 'Fail', 'Fail', 'Fail', 'Fail', 'Fail'