def make_plan(self, state, expansions=5000): Node = namedtuple('Node', ('state', 'path', 'reward', 'done')) eval_node = self.eval_node start = Node(self.env._state, [], 0, False) frontier = PriorityQueue(key=eval_node) frontier.push(start) reward_to_state = defaultdict(lambda: -np.inf) # import IPython; IPython.embed() best_finished = start def expand(node): # print(node.state, node.reward, self.rts[node.state], V(env._observe(node.state))) # time.sleep(0.1) nonlocal best_finished # best_finished = min((best_finished, node), key=eval_node) s0, p0, r0, _ = node for a, s1, r, done in self.model.options(s0): node1 = Node(s1, p0 + [a], r0 + r, done) if node1.reward <= reward_to_state[s1]: # print('abandon') pass continue # cannot be better than an existing node # self.save('node', node) reward_to_state[s1] = node1.reward if done: best_finished = min((best_finished, node1), key=eval_node) else: frontier.push(node1) for i in range(expansions): self.save('frontier', [n[1].state for n in frontier]) if frontier: expand(frontier.pop()) else: break if frontier: # plan = min(best_finished, frontier.pop(), key=eval_node) plan = frontier.pop() raise RuntimeError('No plan found.') else: plan = best_finished # choices = concat([completed, map(get(1), take(100, frontier))]) # plan = min(choices, key=eval_node(noisy=True)) # self.log( # i, # len(plan.path), # -round(eval_node(plan, noisy=False), 2), # plan.done, # ) # self._trace['paths'].append(plan.path) self.save('plan', plan) return plan.path
def __construction(self): # la lista RLC contiene los k mejores candidatos # la otra lista contiene el resto de las arista que no pertenecen al camino ni a la RLC RCL = [] visited = initialize_array(self.cities_count()) other_edges = PriorityQueue() for i in range(0, len(self.coordinates) - 1): for j in range(i + 1, len(self.coordinates)): x, y = self.coordinates[i], self.coordinates[j] distance = euclidean_distance(x, y) other_edges.push((x, y), distance) for i in range(0, self.RCL_lenght): if other_edges.isEmpty(): break RCL.append(other_edges.pop()) tour = {} edges = 0 while edges != self.cities_count() - 1: r = int(round(random.uniform(0, len(RCL) - 1))) (city1, city2) = RCL[r] if visited[self.mapped_cities[city1]] < 2 and visited[ self.mapped_cities[city2]] < 2: if not city1 in tour: tour[city1] = [] if not city2 in tour: tour[city2] = [] tour[city1].append(city2) tour[city2].append(city1) if not self.__DFS(tour): edges += 1 visited[self.mapped_cities[city1]] += 1 visited[self.mapped_cities[city2]] += 1 else: tour[city1].remove(city2) tour[city2].remove(city1) # ya sea porque la arista se agrego al tour o porque la ciudad ya estaba visitada, removerla de la lista de candidatos y agregar otro elemento RCL.remove((city1, city2)) if not other_edges.isEmpty(): RCL.append(other_edges.pop()) result = [] for i in range(0, len(visited)): if visited[i] == 1: result = self.sort_tour(tour, current=self.mapped_cities[i]) break return result
def bidirectional_best_first_graph_search(problem, h=None, h_reverse=None): h = memoize(h or problem.h, 'h') h_reverse = memoize(h_reverse or problem.h_reverse, 'h_reverse') node_forward = Node(problem.initial) node_backward = Node(problem.goal) frontier_forward = PriorityQueue('min', h) frontier_backward = PriorityQueue('min', h_reverse) frontier_forward.append(node_forward) frontier_backward.append(node_backward) explored = set() while frontier_forward and frontier_backward: node_forward = frontier_forward.pop() # print(node_forward.state) if problem.goal_test_forward(node_forward.state): print('[f]meet point:') print(node_forward.state) while True: node_backward = frontier_backward.pop() if node_backward.state == node_forward.state: break return [node_forward, node_backward] explored.add(node_forward.state) for child in node_forward.expand(problem): if child.state not in explored and child not in frontier_forward: frontier_forward.append(child) problem.backward_goal.append(child.state) problem.backward_goal.remove(node_forward.state) node_backward = frontier_backward.pop() # print(node_backward.state) if problem.goal_test_backward(node_backward.state): print('[b]meet point:') print(node_backward.state) while True: node_forward = frontier_forward.pop() if node_backward.state == node_forward.state: break return [node_forward, node_backward] explored.add(node_backward.state) # problem.backward_goal = [problem.initial] for child in node_backward.expand(problem): if child.state not in explored and child not in frontier_backward: frontier_backward.append(child) problem.forward_goal.append(child.state) problem.forward_goal.remove(node_backward.state) return None
def depth_limited_best_first_graph_search(problem, f, depth_limit): f = memoize(f, 'f') node = Node(problem.initial) total_nodes = 0 if problem.goal_test(node.state): return node, total_nodes frontier = PriorityQueue(min, f) frontier.append(node) explored = set() while frontier: node = frontier.pop() total_nodes += 1 if problem.goal_test(node.state): return node, total_nodes explored.add(node.state) if node.depth < depth_limit: for child in node.expand(problem): if child.state not in explored and child not in frontier: frontier.append(child) elif child in frontier: incumbent = frontier[child] if f(child) < f(incumbent): del frontier[incumbent] frontier.append(child) return None, total_nodes
def least_constraining_value(var, assignment, csp): """ Implements Least Constraining Value Heuristic (LCV), which order the domain values in the order in which they rule out the fewest values in the remaining variables """ variables = csp.unassigned_variables() variables.remove(var) for X in assignment: if X in variables: variables.remove(X) minpq = PriorityQueue() for val in csp.domain(var): csp.assign_variable(var, val) values_ruled_out = 0 for X in variables: for v in csp.domain(X): csp.assign_variable(X, v) if not csp.check_consistency(): values_ruled_out += 1 csp.assign_variable(X, None) csp.assign_variable(var, None) minpq.push(val, values_ruled_out) domain_ordered = [] while not minpq.isEmpty(): domain_ordered.append(minpq.pop()) return domain_ordered
def aStarSearch(pos, goal): fringe, visited, best = PriorityQueue(), set(), {} fringe.push((pos, [], 0), manhattanDistance(pos, goal)) while not fringe.isEmpty(): current_point, actions, total_cost = fringe.pop() if current_point in visited or \ (current_point in best and best[current_point] <= total_cost): continue visited.add(current_point) best[current_point] = total_cost # current vertex is a solution if current_point == goal: return actions for (point, action) in getSuccessors(current_point): # if node not visited add it to the fringe if point not in visited: actions_copy = list(actions) actions_copy.append(action) cost = total_cost + 1 fringe.push((point, actions_copy, cost), \ cost + manhattanDistance(point, goal)) raise Exception('Problem Not Solved')
def best_first_graph_search(problem, f): """Search the nodes with the lowest f scores first. You specify the function f(node) that you want to minimize; for example, if f is a heuristic estimate to the goal, then we have greedy best first search; if f is node.depth then we have breadth-first search. There is a subtlety: the line "f = memoize(f, 'f')" means that the f values will be cached on the nodes as they are computed. So after doing a best first search you can examine the f values of the path returned.""" f = memoize(f, 'f') node = Node(problem.initial) if problem.goal_test(node.state): return node frontier = PriorityQueue(min, f) frontier.append(node) explored = set() while frontier: node = frontier.pop() if problem.goal_test(node.state): return node explored.add(node.state) for child in node.expand(problem): if child.state not in explored and child not in frontier: frontier.append(child) elif child in frontier: incumbent = frontier[child] if f(child) < f(incumbent): del frontier[incumbent] frontier.append(child) return None
def breadth_first(self, xy1, xy2): """Execute a breadth first search""" tile_col1, tile_row1 = self.the_map.xy_to_cr(xy1[0], xy1[1]) tile_col2, tile_row2 = self.the_map.xy_to_cr(xy2[0], xy2[1]) successor_to_parent_map = {} start_state = (tile_col1, tile_row1) successor_to_parent_map[( start_state, None)] = None # (Successor, Action) -> (Parent, Action) open_list = PriorityQueue() open_list.update((start_state, None), 0) closed = [] while not open_list.isEmpty(): current_state, action_to_current_state = open_list.pop() if current_state == (tile_col2, tile_row2): return self.__get_action_path( (current_state, action_to_current_state), successor_to_parent_map) if current_state not in closed: for successor_state, action, step_cost in self.__get_successors( current_state): open_list.update((successor_state, action), 0) if successor_state not in closed: successor_to_parent_map[(successor_state, action)] = ( current_state, action_to_current_state) closed.append(current_state) return []
def best_first_graph_search(problem, f): '''MODIFICATION: a timeout check has been added''' """Search the nodes with the lowest f scores first. You specify the function f(node) that you want to minimize; for example, if f is a heuristic estimate to the goal, then we have greedy best first search; if f is node.depth then we have breadth-first search. There is a subtlety: the line "f = memoize(f, 'f')" means that the f values will be cached on the nodes as they are computed. So after doing a best first search you can examine the f values of the path returned.""" f = memoize(f, 'f') node = Node(problem.initial) frontier = PriorityQueue('min', f) frontier.append(node) explored = set() start = time.time() while frontier and (time.time() - start < TIMEOUT): node = frontier.pop() if problem.goal_test(node.state): return node explored.add(tuple(sorted(node.state.pieces))) for child in node.expand(problem): if tuple(sorted(child.state.pieces) ) not in explored and child not in frontier: frontier.append(child) elif child in frontier: if f(child) < frontier[child]: del frontier[child] frontier.append(child) return None
def best_first_graph_search(problem, f, display=False): """Search the nodes with the lowest f scores first. You specify the function f(node) that you want to minimize; for example, if f is a heuristic estimate to the goal, then we have greedy best first search; if f is node.depth then we have breadth-first search. There is a subtlety: the line "f = memoize(f, 'f')" means that the f values will be cached on the nodes as they are computed. So after doing a best first search you can examine the f values of the path returned.""" f = memoize(f, 'f') node = Node(problem.initial) frontier = PriorityQueue('min', f) frontier.append(node) explored = set() while frontier: node = frontier.pop() if problem.goal_test(node.state): if display: print(len(explored), "paths have been expanded and", len(frontier), "paths remain in the frontier") print("Total path cost is:" + str(node.path_cost)) return node explored.add(node.state) for child in node.expand(problem): if child.state not in explored and child not in frontier: frontier.append(child) elif child in frontier: if f(child) < frontier[child]: del frontier[child] frontier.append(child) return None
def a_star(board, heuristic): """ A*算法主题 :param board: 要解决的游戏 :param heuristic: 选择的启发函数 :return: 返回的解的路径 """ frontier = PriorityQueue() node = Node(board) frontier.add(node, heuristic(node.data) + len(node.path()) - 1) explored = [] while frontier.has_next(): node = frontier.pop() if node.data.is_solved(): return node.path() for move in node.data.legal_moves(): child = Node(node.data.forecast(move), node) if (not frontier.has(child)) and (child.data not in explored): frontier.add(child, heuristic(child.data) + len(child.path()) - 1) elif frontier.has(child): child_value = heuristic(child.data) + len(child.path()) - 1 if child_value < frontier.get_value(child): frontier.remove(child) frontier.add(child, child_value) explored.append(node.data) return None
def best_first_search_tree(problem, f): """Search the nodes with the lowest f scores first. You specify the function f(node) that you want to minimize; for example, if f is a heuristic estimate to the goal, then we have greedy best first search; if f is node.depth then we have breadth-first search. There is a subtlety: the line "f = memoize(f, 'f')" means that the f values will be cached on the nodes as they are computed. So after doing a best first search you can examine the f values of the path returned.""" # print("he sido llamado") f = memoize(f, 'f') node = Node(problem.initial) frontier = PriorityQueue('min', f) frontier.append(node) # frontier.mostrar() # explored = set() while frontier: node = frontier.pop() if problem.goal_test(node.state): return node # explored.add(node.state) for child in node.expand(problem): frontier.append(child) '''if child.state not in explored and child not in frontier: frontier.append(child) elif child in frontier: if f(child) < frontier[child]: # mira si ya hay una forma de llegar q es mayor a la que encontre ahora? del frontier[child] frontier.append(child)''' return None
def all_pairs_dijkstra(self, biGraph, weight='weight'): for node in biGraph.nodes(): g = biGraph.copy() attributes = nx.get_edge_attributes(g, 'rname') dist = {} prev = {} last_attribute = {} Q = PriorityQueue() dist[node] = 0 prev[node] = [node] last_attribute[node] = None for n in g.nodes(): if n != node: dist[n] = float('Inf') prev[n] = [] Q.insert(dist[n], n) while Q.size() > 0: p, u = Q.pop() for v in g.neighbors(u): p_attribute = last_attribute[u] attribute = attributes[(u, v)] num = 100 if p_attribute == 'is-a' and attribute == 'is-a2' else 0 alt = dist[u] + g[u][v].get('weight', 1) + num if alt < dist[v]: dist[v] = alt prev[v] = prev[u] + [v] last_attribute[v] = attribute Q.insert(dist[v], v) yield (node, (dist, prev))
def a_star_search(problem, stats=False): h = memoize(problem.h_g, 'h') node = Node(problem.initial) nodes_generated = 1 explored = set() if problem.goal_test(node.state): if stats: return (node, explored, nodes_generated) return node frontier = PriorityQueue('min', h) frontier.append(node) while frontier: node = frontier.pop() if problem.goal_test(node.state): if stats: return (node, explored, nodes_generated) return node explored.add(node.state) for child in node.expand(problem): nodes_generated += 1 if child.state not in explored and child not in frontier: frontier.append(child) elif child in frontier: incumbent = frontier[child] if h(child) < h(incumbent): del frontier[incumbent] frontier.append(child) return None
def greedy_best_first(board, heuristic): """ an implementation of the greedy best first search algorithm. it uses a heuristic function to find the quickest way to the destination :param board: (Board) the board you start at :param heuristic: (function) the heuristic function :return: (list) path to solution, (int) number of explored boards """ frontier = PriorityQueue() node = Node(board) frontier.add(node, heuristic(node.data)) explored = [] while frontier.has_next(): node = frontier.pop() if node.data.is_solved(): return node.path(), len(explored) + 1 for move in node.data.legal_moves(): child = Node(node.data.forecast(move), node) if (not frontier.has(child)) and (child.data not in explored): frontier.add(child, heuristic(child.data)) explored.append(node.data) return None, len(explored)
def uc_search(self, initial_state): """ Uniform-Cost Search. It returns the path as a list of directions among { Direction.left, Direction.right, Direction.up, Direction.down } """ # use a priority queue with the minimum queue. from utils import PriorityQueue open_list = PriorityQueue() open_list.push([(initial_state, None)], 0) closed_list = set([initial_state]) # keep already explored positions while not open_list.isEmpty(): # Get the path at the top of the queue current_path, cost = open_list.pop() # Get the last place of that path current_state, current_direction = current_path[-1] #print("current_state -> ", current_state._position, " direction -> ", current_state._direction, " cost -> ", cost) # Check if we have reached the goal if current_state.is_goal_state(): return (list(map(lambda x: x[1], current_path[1:]))) else: # Check were we can go from here next_steps = current_state.get_successor_states() # Add the new paths (one step longer) to the queue for state, direction, weight in next_steps: # Avoid loop! if state not in closed_list: closed_list.add(state) open_list.push((current_path + [(state, direction)]), cost + weight) return []
def best_first_graph_search(problem, f): f = memoize(f, 'f') node = Node(problem.initial) frontier = PriorityQueue('min', f) frontier.append(node) explored = list() while frontier: node = frontier.pop() print("Current Node:", node.state) if problem.goal_test(node.state): trace_path(node) return node explored.append(node.state) print("Explored Nodes:", explored) for child in node.expand(problem): if child.state not in explored and child not in frontier: frontier.append(child) elif child in frontier: if f(child) < frontier[child]: del frontier[child] frontier.append(child) temp_front = list() for e in frontier.heap: val, node = e temp_front.append(node.state) print("Frontier Nodes:", temp_front) print("\n") return None
def best_first_graph_search(problem, f): """Пребарувај низ следбениците на даден проблем за да најдеш цел. Користи функција за евалуација за да се одлучи кој е сосед најмногу ветува и потоа да се истражи. Ако до дадена состојба стигнат два пата, употреби го најдобриот пат. :param problem: даден проблем :param f: дадена функција за евристика :return: Node or None """ f = memoize(f, 'f') node = Node(problem.initial) if problem.goal_test(node.state): return node frontier = PriorityQueue(min, f) frontier.append(node) explored = set() while frontier: node = frontier.pop() if problem.goal_test(node.state): return node explored.add(node.state) for child in node.expand(problem): if child.state not in explored and child not in frontier: frontier.append(child) elif child in frontier: incumbent = frontier[child] if f(child) < f(incumbent): del frontier[incumbent] frontier.append(child) return None
def optimum_policy2D(grid, init, goal, cost): Nr = len(grid) Nc = len(grid[0]) inf = 999 policy2D = [[' ' for j in range(Nc)] for i in range(Nr)] value2D = [[inf for j in range(Nc)] for i in range(Nr)] value3D = [[[inf for j in range(Nc)] for i in range(Nr)] for o in range(4)] policy3D = [[[' ' for j in range(Nc)] for i in range(Nr)] for o in range(4)] visited = [] frontier = PriorityQueue() cumcost = 0 frontier.push([init, ' ', cumcost], cumcost + heuristic_fun(init[0:2], goal)) while not frontier.isEmpty(): loc, move_name, cumcost = frontier.pop() if not loc in visited: visited.append(loc) value3D[loc[2]][loc[0]][loc[1]] = cumcost policy3D[loc[2]][loc[0]][loc[1]] = move_name if loc[0:2] == goal: # print 'Value:' # for row in value: # print '---' # for i in row: # print i # print 'Policy:' # for row in policy3D: # print '---' # for i in row: # print i # return policy2D policy2D[goal[0]][goal[1]] = '*' value2D[goal[0]][goal[1]] = value3D[loc[2]][goal[0]][goal[1]] while loc[0:2] != init[0:2]: loc, loc_move = reverse_move( loc, policy3D[loc[2]][loc[0]][loc[1]]) policy2D[loc[0]][loc[1]] = loc_move value2D[loc[0]][loc[1]] = value3D[loc[2]][loc[0]][loc[1]] print('Value') for i in value2D: print(i) return policy2D for nextloc, move_name, move_cost in childNode( grid, loc, forward, forward_name): if not nextloc in visited: nextcumcost = cumcost + move_cost frontier.push([nextloc, move_name, nextcumcost], nextcumcost + heuristic_fun(nextloc[0:2], goal)) return 'fail'
def best_first_graph_search(problem, f): """Search the nodes with the lowest f scores first. You specify the function f(node) that you want to minimize; for example, if f is a heuristic estimate to the goal, then we have greedy best first search; if f is node.depth then we have breadth-first search. There is a subtlety: the line "f = memoize(f, 'f')" means that the f values will be cached on the nodes as they are computed. So after doing a best first search you can examine the f values of the path returned.""" global frontier, node, explored, counter if counter == -1: f = memoize(f, 'f') node = Node(problem.initial) display_current(node) if problem.goal_test(node.state): return node frontier = PriorityQueue('min', f) frontier.append(node) display_frontier(frontier) explored = set() add_node(node) draw_tree() if counter % 3 == 0 and counter >= 0: node = frontier.pop() display_current(node) if problem.goal_test(node.state): return node explored.add(node.state) mark_exploring(node) draw_tree() if counter % 3 == 1 and counter >= 0: for child in node.expand(problem): if child.state not in explored and child not in frontier: frontier.append(child) add_node(child) elif child in frontier: if f(child) < frontier[child]: del frontier[child] frontier.append(child) remove_node(child) display_frontier(frontier) draw_tree() if counter % 3 == 2 and counter >= 0: display_explored(node) mark_explored(node) draw_tree() return None
def compute_path(grid,start,goal,cost,heuristic): # Use the OrderedSet for your closed list closed_set = OrderedSet() # Use thePriorityQueue for the open list open_set = PriorityQueue(order=min, f=lambda v: v.f) # Keep track of the parent of each node. Since the car can take 4 distinct orientations, # for each orientation we can store a 2D array indicating the grid cells. # E.g. parent[0][2][3] will denote the parent when the car is at (2,3) facing up parent = [[[' ' for row in range(len(grid[0]))] for col in range(len(grid))], [[' ' for row in range(len(grid[0]))] for col in range(len(grid))], [[' ' for row in range(len(grid[0]))] for col in range(len(grid))], [[' ' for row in range(len(grid[0]))] for col in range(len(grid))]] # The path of the car path =[['-' for row in range(len(grid[0]))] for col in range(len(grid))] x = start[0] y = start[1] theta = start[2] h = heuristic[x][y] g = 0 f = g+h open_set.put(start, Value(f=f,g=g)) # your code: implement A* # Initially you may want to ignore theta, that is, plan in 2D. # To do so, set actions=forward, cost = [1, 1, 1, 1], and action_name = ['U', 'L', 'R', 'D'] # Similarly, set parent=[[' ' for row in range(len(grid[0]))] for col in range(len(grid))] while open_set: node = open_set.pop() closed_set.add(node[0]) if node[0] == goal: break #finding child of node #write function for finding child node #setting the cost values for neighboring nodes neighbors = get_neighbors(node[0]) print(neighbors) for i in neighbors: x = i[0] y = i[1] theta = i[2] h = heuristic[x][y] g = node[1].g + costfunction(node[0],i) f = g+h if i not in open_set or i not in closed_set: open_set.put(i,Value(f,g)) elif i in open_set and f < open_set.get(i).f: open_set.put(i,Value(f,g)) return path, closed_set
def bcbp_secondpass(groups): print len(groups) while len(groups) > 0: failures = [] bal_groups = [] for group in groups: random_group = [random_perm(triad) for triad in group] first_node = GroupNode(random_group) # print first_node.group, first_node.distrib fringe = PriorityQueue(min, lambda node: node.score) fringe.append(first_node) if sum([sum(x) for x in first_node.distrib]) / len(first_node.distrib) == 3: goal_score = 0 else: goal_score = len(first_node.distrib) visited = [] while True: if len(fringe) == 0: print 'No solutions' break node = fringe.pop() visited.append(hash(tuple(node.group))) if len(visited) >= 10000: # print 'failure!', len(visited) failures.append(node.group) break if node.score == goal_score: # print # print # print 'success!', len(visited) print node.group bal_groups.append(node.group) print break succs = node.successors() new_succs = [x for x in succs if hash(tuple(x.group)) not in visited] fringe.extend(new_succs) gc.collect() groups = failures print 'looping', len(failures) return bal_groups
def aStarSearch(problem, heuristic=nullHeuristic): """Search the node that has the lowest combined cost and heuristic first.""" def frontier_last_states(frontier): states = [path[2][-1][0] for path in frontier.heap] return states def path_cost(path): last_state = path[-1][0] cost = sum([cost for _, _, cost in path]) hcost = heuristic(last_state, problem) return cost + hcost frontier = PriorityQueue() start_path = [(problem.initial_state(), 'START', 0.0)] frontier.push(start_path, path_cost(start_path)) explored = set() limit = 50000 while limit > 0: if frontier.isEmpty(): return [] path = frontier.pop() end_step = path[-1] end_state = end_step[0] explored.add(end_state.immutable()) if problem.goal_test(end_state): return [action for _, action, _ in path[1:]], True, (len(explored), len(frontier)) successors = problem.successors(end_state) for successor in successors: last_state = successor[0] if (last_state not in frontier_last_states(frontier) and last_state.immutable() not in explored) or problem.goal_test(last_state): extended_path = path.copy() extended_path.append(successor) frontier.push(extended_path, path_cost(extended_path)) else: pass limit -= 1 return [action for _, action, _ in frontier.pop()[1:] ], False, (len(explored), len(frontier))
def aStarSearch(task, heuristic, useHelpfulAction=False, noDel=False): extendedPathsCount = 0 root = SearchNode(task.initial_state, None, None, 0) open_set = PriorityQueue() open_set.push(root, 0) state_cost = {task.initial_state: 0} while not open_set.isEmpty(): pop_node = open_set.pop() pop_state = pop_node.state # Extend only if the cost of the node is the cheapest found so far. # Otherwise ignore, since we've found a better one. if state_cost[pop_state] == pop_node.cost: extendedPathsCount += 1 if task.goal_reached(pop_state): print "Finished searching. Found a solution. " return (extendedPathsCount, pop_node.cost, pop_node.path(), pop_state) relaxedPlan = None if useHelpfulAction: relaxedPlan = heuristic.getRelaxedPlan( SearchNode(pop_state, None, None, 0)) for op, succ_state in task.get_successor_states(pop_state, noDel): # If we're using helpful actions, ignore op that is not in the helpful actions if useHelpfulAction: if relaxedPlan and not op.name in relaxedPlan: #print str(op.name) + " not in " + str(relaxedPlan) continue # Assume each action (operation) has cost of one succ_node = SearchNode(succ_state, pop_node, op, 1) h = heuristic(succ_node) #print "\nHeuristic values for " + ', '.join(map(format_string, succ_node.state)) + " is " + str(h) if h == float('inf'): # Don't bother with states that can't reach the goal anyway continue old_succ_cost = state_cost.get(succ_state, float("inf")) if succ_node.cost < old_succ_cost: # Found a cheaper state or never saw this state before open_set.push(succ_node, succ_node.cost + h) state_cost[succ_state] = succ_node.cost print "No more operations left. Cannot solve the task" # (extendedPathsCount, cost, path, final_state) return (extendedPathsCount, None, None, None)
def astarSearch(problem, heuristic = 'eqn'): print bcolors.HEADER + "\n>> Running A* Search" + bcolors.ENDC startState = problem.getStartState() goalState = problem.getGoalState() [T, r, Z] = problem.getTransform() listOfPredicates = problem.listOfPredicates goalCompliantConditions = problem.goalCompliantConditions fval = float(heuristic(startState, problem)) print bcolors.OKGREEN + "--> Initial heuristic estimate = " + bcolors.OKBLUE + str(fval) + bcolors.ENDC fringe = PriorityQueue() closed = [] numberOfStatesExpanded = 0 printloop = 0 fringe.push([startState, [], 0.0], fval) while not fringe.isEmpty(): printloop += 1 if printloop == 300: printloop = 0 print bcolors.OKGREEN + "--> Number of states expanded > " + str(numberOfStatesExpanded) + bcolors.ENDC node = fringe.pop() if problem.isGoalState(node[0]): print bcolors.OKGREEN + "--> Goal Found. Final number of states expanded = " + bcolors.OKBLUE + str(numberOfStatesExpanded) + bcolors.ENDC return node numberOfStatesExpanded += 1 successor_list = [] if node[0] not in closed: closed.append(node[0]) successor_list = problem.getSuccessors(node) while len(successor_list) > 0: put = successor_list.pop() if put[0] not in closed: hval = heuristic(put[0], problem) if hval != -1: newnode = copy.deepcopy(node) newnode[0] = put[0] newnode[1] = newnode[1] + [put[1]] newnode[2] = put[2] + hval fringe.push(newnode,newnode[2]) print bcolors.OKGREEN + "--> Search Terminated. Final number of states expanded = " + bcolors.OKBLUE + str(numberOfStatesExpanded) + bcolors.ENDC return None
def main(): if len(sys.argv) < 2: print 'Usage: python %s <number of pieces> <group size>' % ( sys.argv[0]) return if len(sys.argv) == 2: numpieces = int(sys.argv[1]) gs = groupsizes(numpieces) if len(gs) == 1: numgroups = gs[0] else: print 'Usage: python %s %d [ %s ]' % (sys.argv[0], numpieces, \ ' | '.join([str(i) for i in gs])) return if len(sys.argv) == 3: numpieces = int(sys.argv[1]) numgroups = int(sys.argv[2]) fringe = PriorityQueue(min, lambda node: node.score) node = firstNode(numpieces, numgroups) fringe.append(node) while True: if len(fringe) == 0: print 'No solutions' break node = fringe.pop() print node.score if node.score == 0: # goal state break fringe.extend(node.successors()) if len(fringe.A) > 100: fringe.A = fringe.A[:100] gc.collect() print node.score print node.groups print node.distrib bal_groups = bcbp_func.bcbp_secondpass(node.groups) print "-------------final result-------------" if len(bal_groups) == 0: print "failed" else: bcbp_func.write_stimlist(bal_groups) print "-------------SUCESSFUL !-------------"
def aStarSearch(problem, heuristic=manhattanHeuristic): """Search the node that has the lowest combined cost and heuristic first.""" priority_q = PriorityQueue() visited = [] node = {} root = problem.getStartState() node["parent"] = None node["action"] = None node["goal"] = 0 node["heuristic"] = heuristic(root, problem) node["state"] = root priority_q.push(node, node["goal"] + node["heuristic"]) #push root while (not priority_q.isEmpty()): node = priority_q.pop() state = node["state"] if (problem.isGoalState(state)): break if (state in visited): continue visited.append(state) children = problem.getSuccessors(state) if (children): for i in range(len(children)): if (children[i][0] not in visited): sub_node = {} sub_node["parent"] = node sub_node["action"] = children[i][1] sub_node["state"] = children[i][0] sub_node["goal"] = children[i][2] + node["goal"] sub_node["heuristic"] = heuristic(sub_node["state"], problem) priority_q.push(sub_node, sub_node["goal"] + sub_node["heuristic"]) path = [] while (node["action"] != None): path.insert(0, node["action"]) node = node["parent"] return path
def uniformCostSearch(problem): "** Search the node of least total cost first. **" fringe = PriorityQueue() closed = {} # Bookeeping for visisted nodes fringe.push(Node(problem.initial)) while fringe: node = fringe.pop() # Choose a node to expand if problem.goal_test(node.state): # Check goal state return node if node.state not in closed: closed[node.state] = True # Add state node to visited tree fringe.extend(node.expand(problem)) # Expanding node return None
def UCS(self, s): pq = PriorityQueue() visited = [] visited.append(s) for v, w in self.graph[s]: pq.update(item=v, priority=w) while not pq.isEmpty(): (pri,it) = pq.pop() visited.append(it) for v,w in self.graph[it]: if v not in visited: pq.update(item=v, priority=w+pri) print(visited)
def graph_search(problem, heuristics): node = nodeTree(problem.agent, None, None, heuristics[problem.agent]) frontier = PriorityQueue() frontier.insert(node) explored = [] while frontier: node = frontier.pop() if problem.goal_test(node): return soloution(node) explored.append(node) children = expand_tree(problem, node, heuristics) for child in children: if not frontier.has_item(child) and child not in explored: frontier.insert(child) return
class TransactionPool: """Transaction pool for a miner""" def __init__(self, env, identifier, neighbourList, nodes, params): self.env = env self.identifier = identifier self.neighbourList = neighbourList self.params = params self.nodes = nodes self.transactionQueue = PriorityQueue() self.prevTransactions = [] def getTransaction(self, transactionCount): """Returns transactionCount number of Transactions. Returns top transactions based on miner reward""" return self.transactionQueue.get(transactionCount) def popTransaction(self, transactionCount): """Remove transactions from transaction pool. Called when transactions are added by a received block or a block is mined.""" poppedTransactions = self.transactionQueue.pop(transactionCount) self.prevTransactions.append(poppedTransactions) def putTransaction(self, transaction, sourceLocation): """Add received transaction to the transaction pool and broadcast further""" destLocation = self.nodes[self.identifier].location delay = getTransmissionDelay(sourceLocation, destLocation) yield self.env.timeout(delay) if ( not self.transactionQueue.isPresent(transaction) and transaction not in self.prevTransactions ): self.transactionQueue.insert(transaction) broadcast( self.env, transaction, "Transaction", self.identifier, self.neighbourList, self.params, nodes=self.nodes, ) if self.params["verbose"] == "vv": print( "%7.4f : %s accepted by %s" % (self.env.now, transaction.identifier, self.identifier) )
class OperationQueue(object): # TODO: chunking/batching should probably happen here # with the assistance of another queue for prioritized params # (i.e., don't create subops so eagerly) def __init__(self, qid, op_type, default_limit=ALL): self.qid = qid options, unwrapped = get_unwrapped_options(op_type) self.op_type = op_type self.unwrapped_type = unwrapped self.options = options self.unique_key = options.get('unique_key', 'unique_key') self.unique_func = get_unique_func(self.unique_key) self.priority = options.get('priority', 0) self.priority_func = get_priority_func(self.priority) self.default_limit = default_limit self.param_set = set() self.op_queue = PriorityQueue() self._dup_params = [] def enqueue(self, param, **kw): unique_key = self.unique_func(param) if unique_key in self.param_set: self._dup_params.append(unique_key) return priority = self.priority_func(param) kwargs = {'limit': self.default_limit} kwargs.update(kw) new_subop = self.op_type(param, **kwargs) new_subop._origin_queue = self.qid self.op_queue.add(new_subop, priority) self.param_set.add(unique_key) def enqueue_many(self, param_list, **kw): for param in param_list: self.enqueue(param, **kw) return def __len__(self): return len(self.op_queue) def peek(self, *a, **kw): return self.op_queue.peek(*a, **kw) def pop(self, *a, **kw): return self.op_queue.pop(*a, **kw)
def aStarSearch(self, heuristicName = 'equality'): method = getattr(self, 'heuristic_' + heuristicName) print bcolors.HEADER + "\n>> Running A* Search" + bcolors.ENDC startState = self.getStartState() goalState = self.getGoalState() [T, r, Z] = self.getTransform() listOfPredicates = self.listOfPredicates goalCompliantConditions = self.goalCompliantConditions fval = float(method(startState)) print bcolors.OKGREEN + "--> Initial heuristic estimate = " + bcolors.OKBLUE + str(fval) + bcolors.ENDC fringe = PriorityQueue() closed = [] numberOfStatesExpanded = 0 printloop = 0 fringe.push([startState, [], 0.0], fval) while not fringe.isEmpty(): if numberOfStatesExpanded%100 == 0: print bcolors.OKGREEN + "--> Number of states expanded > " + str(numberOfStatesExpanded) + bcolors.ENDC node = fringe.pop() if self.isGoalState(node[0]): print bcolors.OKGREEN + "--> Goal Found. Final number of states expanded = " + bcolors.OKBLUE + str(numberOfStatesExpanded) + bcolors.ENDC return [node[1], node[2]] numberOfStatesExpanded += 1 successor_list = [] if node[0] not in closed: closed.append(node[0]) successor_list = self.getSuccessors(node) while len(successor_list) > 0: put = successor_list.pop() if put[0] not in closed: hval = float(method(put[0])) if hval != -1: newnode = [put[0], node[1] + [put[1]], put[2] + hval] fringe.push(newnode, newnode[2]) print bcolors.OKGREEN + "--> Search Terminated. Final number of states expanded = " + bcolors.OKBLUE + str(numberOfStatesExpanded) + bcolors.ENDC return None
def iterate_chrono(self): unvisited_nodes = PriorityQueue() already_seen = set() for initial_commit in self.sentinel.out_neighbours(): unvisited_nodes.push(initial_commit, self.commit_timestamp[initial_commit]) already_seen.add(initial_commit) while True: # iterate over commits in order of commit_timestamps try: commit_node = unvisited_nodes.pop() except IndexError: raise StopIteration yield commit_node children = commit_node.out_neighbours() new_nodes = [child for child in children if child not in already_seen] for node in new_nodes: unvisited_nodes.push(node, self.commit_timestamp[node]) already_seen |= set(new_nodes)
class Simulator(SimulatorBase): EVENT_QUEUE_THRESHOLD = 100 def __init__(self): self._sim_modules = {} self._module_type_map = defaultdict(deque) self._device_state = DeviceState() self._event_queue = PriorityQueue() self._current_time = None self._warmup_period = None self._event_listeners = defaultdict(deque) self._trace_reader = None self._trace_executed = False self._verbose = False self._debug_mode = False self._debug_interval = 1 self._debug_interval_cnt = 0 def has_module_instance(self, name): return name in self._sim_modules def get_module_instance(self, name): return self._sim_modules[name] def get_module_for_type(self, module_type): if module_type in self._module_type_map: return self._module_type_map[module_type.value][0] else: return None def register(self, sim_module, override=False): if not isinstance(sim_module, SimModule): raise TypeError("Expected SimModule object") if sim_module.get_name() in self._sim_modules: raise Exception("Module %s already exists" % sim_module.get_name()) self._sim_modules[sim_module.get_name()] = sim_module if override: self._module_type_map[sim_module.get_type().value].appendleft(sim_module) else: self._module_type_map[sim_module.get_type().value].append(sim_module) def build(self, args): self._verbose = args.verbose self._debug_mode = args.debug # Instantiate necessary modules based on config files config = configparser.ConfigParser() config.read(args.sim_config) config['DEFAULT'] = {'modules': '', 'warmup_period': ''} if 'Simulator' not in config: raise Exception("Simulator section missing from config file") sim_settings = config['Simulator'] # Identify the set of modules to include modules_str = sim_settings['modules'] if modules_str: modules_list = modules_str.split(' ') else: modules_list = [] self._warmup_period = \ self.__parse_warmup_setting(sim_settings['warmup_period']) # Setup the trace file reader and initial simulator time self._trace_reader = get_trace_reader(args.trace) self._trace_reader.build() self._trace_executed = False self._current_time = self._trace_reader.get_start_time() for module_name in modules_list: module_settings = {} if module_name in config: module_settings = config[module_name] self.register(get_simulator_module(module_name, self, module_settings)) # Build list of modules for sim_module in self._sim_modules.values(): sim_module.build() def run(self): # Check if we need to enter debug mode immediately if self._debug_mode: self._debug_interval_cnt = 0 self.__debug() # Add alarm event for the warmup period warmup_finish_alarm = SimAlarm( timestamp=self._trace_reader.get_start_time() + self._warmup_period, handler=self.__enable_stats_collection, name='Warmup Period Alarm') self._event_queue.push(warmup_finish_alarm, (warmup_finish_alarm.timestamp, Priority.SIMULATOR)) while not self._trace_reader.end_of_trace() \ or not self._event_queue.empty(): # Populate event queue if it is below the threshold number of events if self._event_queue.size() < Simulator.EVENT_QUEUE_THRESHOLD \ and not self._trace_reader.end_of_trace(): self.__populate_event_queue_from_trace() continue # Look at next event to execute cur_event = self._event_queue.peek() # Look at next event from trace file trace_event = self._trace_reader.peek_event() # If trace event is supposed to occur before current # event, than we should populate event queue with more # events from the trace file if trace_event and cur_event.timestamp > trace_event.timestamp: self.__populate_event_queue_from_trace() continue self._event_queue.pop() # Set current time of simulator self._current_time = cur_event.timestamp if self._verbose: print(cur_event) if self._debug_mode: self._debug_interval_cnt += 1 if self._debug_interval_cnt == self._debug_interval: self.__debug() self._debug_interval_cnt = 0 self.__execute_event(cur_event) self.__finish() def subscribe(self, event_type, handler, event_filter=None): if event_type not in self._event_listeners: self._event_listeners[event_type] = [] self._event_listeners[event_type].append((event_filter, handler)) def broadcast(self, event): if event.timestamp: if event.timestamp != self._current_time: raise Exception("Broadcasting event with invalid timestamp.") else: event.timestamp = self._current_time # Get the set of listeners for the given event type listeners = self._event_listeners[event.event_type] for (event_filter, handler) in listeners: # Send event to each subscribed listener if not event_filter or event_filter(event): handler(event) def register_alarm(self, alarm): self._event_queue.push(alarm, (alarm.timestamp, Priority.ALARM)) def get_current_time(self): return self._current_time def get_device_state(self): return self._device_state def __parse_warmup_setting(self, setting_value): if setting_value: if setting_value.endswith('h'): num_hours = int(setting_value[:-1]) return datetime.timedelta(hours=num_hours) raise Exception("Invalid warmup period setting format") else: return datetime.timedelta() def __enable_stats_collection(self): for sim_module in self._sim_modules.values(): sim_module.enable_stats_collection() def __disable_stats_collection(self): for sim_module in self._sim_modules.values(): sim_module.disable_stats_collection() """ Private method that handles execution of an event object. """ def __execute_event(self, event): if event.event_type == EventType.SIM_DEBUG: self.__debug() elif event.event_type == EventType.SIM_ALARM: if not self._trace_executed: event.fire() if event.is_repeating(): self._event_queue.push(event, (event.timestamp, Priority.ALARM)) elif event.event_type == EventType.TRACE_END: self._trace_executed = True else: self.broadcast(event) def __populate_event_queue_from_trace(self): # Fill in event queue from trace events = self._trace_reader.get_events(count=Simulator.EVENT_QUEUE_THRESHOLD) for x in events: self._event_queue.push(x, (x.timestamp, Priority.TRACE)) def __finish(self): output_file = sys.stdout # Print status from all modules for sim_module in self._sim_modules.values(): header = "======== %s Stats ========\n" % sim_module.get_name() footer = "=" * (len(header) - 1) + '\n' output_file.write(header) sim_module.print_stats(output_file) output_file.write(footer) # Call finish for all modules for sim_module in self._sim_modules.values(): sim_module.finish() def __debug(self): while True: command = input("(uamp-sim debug) $ ") if command: tokens = command.split(sep=' ') cmd = tokens[0] args = tokens[1:] if cmd == 'quit' or cmd == 'exit' or cmd == 'q': # TODO(dmanatunga): Handle simulation quitting better print("Terminating Simulation") exit(1) elif cmd == 'interval': if len(args) == 1: try: self._debug_interval = int(args[0]) except ValueError: print("Command Usage Error: interval command expects one numerical value") else: print("Command Usage Error: interval command expects one numerical value") elif cmd == 'verbose': if len(args) == 0: self._verbose = True elif len(args) == 1: if args[0] == 'on': self._verbose = True elif args[0] == 'off': self._verbose = False else: print("Command Usage Error: verbose command expects 'on' or 'off' for argument") else: print("Command Usage Error: verbose command expects at most one argument") else: break