def dijkstra(graph, start): prev = {} costs = {} costs[start] = 0 visited = set() pq = PriorityQueue() for node in graph.nodes(): if node != -1: pq.insert(float('inf'), node) pq.insert(0, start) while not pq.is_empty(): cost, ele = pq.delete_min() visited.add(ele) for successor in graph.get_successors(ele): new_cost = cost + graph.get_cost(ele, successor) if successor not in visited and (successor not in costs or new_cost < costs[successor]): costs[successor] = new_cost prev[successor] = ele pq.update(new_cost, successor) res = {} for key in costs: res[(start, key)] = costs[key] return res
def prim(graph, start): # heap to have the vertex that are not added # key is the cheapest edge edge = set() overall_cost = 0 prev = {} prev[start] = start costs = {} costs[start] = 0 pq = PriorityQueue() visited = set() for node in graph.nodes(): pq.insert(float('inf'), node) pq.insert(0, start) while not pq.is_empty(): cost, ele = pq.delete_min() edge.add((prev[ele], ele)) overall_cost += cost visited.add(ele) for successor, edge_cost in graph.get_successors(ele): new_cost = edge_cost if successor not in visited and (successor not in costs or new_cost < costs[successor]): costs[successor] = new_cost prev[successor] = ele pq.update(new_cost, successor) return edge, overall_cost
def astar_search(initial_state): """ A* search algorithm for single-player Chexers. Conducts a full A* search to the nearest goal state from `initial_state`. """ # store the current best-known partial path cost to each state we have # encountered: g = {initial_state: 0} # store the previous state in this least-cost path, along with action # taken to reach each state: prev = {initial_state: None} # initialise a priority queue with initial state (f(s) = 0 + h(s)): queue = PriorityQueue() queue.update(initial_state, g[initial_state] + h(initial_state)) # (concurrent iteration is allowed on this priority queue---this will loop # until the queue is empty, and we may modify the queue inside) for state in queue: # if we are expanding a goal state, we can terminate the search! if state.is_goal(): return reconstruct_action_sequence(state, prev) # else, consider all successor states for addition to the queue (if # we see a cheaper path) # for our problem, all paths through state have the same path cost, # so we can just compute it once now: g_new = g[state] + 1 for (action, successor_state) in state.actions_successors(): # if this is the first time we are seeing the state, or if we # have found a new path to the state with lower cost, we must # update the priority queue by inserting/modifying this state with # the appropriate f-cost. # (note: since our heuristic is consistent we should never discover # a better path to a previously expanded state) if successor_state not in g or g[successor_state] > g_new: # a better path! save it: g[successor_state] = g_new prev[successor_state] = (state, action) # and update the priority queue queue.update(successor_state, g_new + h(successor_state)) # if the priority queue ever runs dry, then there must be no path to a goal # state. return None
def dijkstra(graph, start): prev = {} costs = {} costs[start] = 0 pq = PriorityQueue() for node in graph.nodes(): pq.insert(float('inf'), node) pq.insert(0, start) while not pq.is_empty(): cost, ele = pq.delete_min() for successor, edge_cost in graph.get_successors(ele): new_cost = cost + edge_cost if successor not in costs or new_cost < costs[successor]: costs[successor] = new_cost prev[successor] = ele pq.update(new_cost, successor) return prev, costs