コード例 #1
0
 def __init__(self, lst=[]):
     self.minHeap = PriorityQueue()
     self.maxHeap = MaxHeap()
     self.size = len(lst)
     if lst:
         lst = sorted(lst)
         mid = len(lst) // 2
         self.minHeap = PriorityQueue(lst[mid:])
         self.maxHeap = MaxHeap(lst[0:mid])
コード例 #2
0
def run_dijkstra(graph, source, target):
    """Dijkstra's shortest path algorithm"""
    queue = PriorityQueue()
    dist = {source: 0}
    prev = {}

    for vertex in graph:
        if vertex != source:
            dist[vertex] = float("inf")
        queue.insert(vertex, dist[vertex])

    while not queue.is_empty():
        u_dist, u = queue.pop()
        u_node = graph[u]

        if u == target:
            break

        for v in u_node['linkTo']:
            if queue.is_node_in_queue(v):
                alt = dist[u] + \
                    calc_distance(int(u_node['x']), int(u_node['y']),
                                  int(graph[v]['x']), int(graph[v]['y']))
                if alt < dist[v]:
                    dist[v] = alt
                    prev[v] = u
                    queue.insert(v, alt)

    path = []
    curr = target
    while curr in prev:
        path.append(curr)
        curr = prev[curr]
    path.append(source)
    return path[::-1]
コード例 #3
0
ファイル: johnson.py プロジェクト: nkchangliu/algorithms
def dijkstra(graph, start):
    prev = {}
    costs = {}
    costs[start] = 0
    visited = set()

    pq = PriorityQueue()

    for node in graph.nodes():
        if node != -1:
            pq.insert(float('inf'), node)
        pq.insert(0, start)

    while not pq.is_empty():
        cost, ele = pq.delete_min()
        visited.add(ele)
        for successor in graph.get_successors(ele):
            new_cost = cost + graph.get_cost(ele, successor)
            if successor not in visited and (successor not in costs or new_cost < costs[successor]):
                costs[successor] = new_cost
                prev[successor] = ele
                pq.update(new_cost, successor)
    res = {}
    for key in costs:
        res[(start, key)] = costs[key]
    return res
コード例 #4
0
ファイル: graph_algos.py プロジェクト: nkchangliu/algorithms
def prim(graph, start):
# heap to have the vertex that are not added
# key is the cheapest edge
    edge = set()
    overall_cost = 0
    prev = {}
    prev[start] = start
    costs = {}
    costs[start] = 0
    pq = PriorityQueue()
    visited = set()

    for node in graph.nodes():
        pq.insert(float('inf'), node)
    pq.insert(0, start)

    while not pq.is_empty():
        cost, ele = pq.delete_min()
        edge.add((prev[ele], ele))
        overall_cost += cost
        visited.add(ele)
        for successor, edge_cost in graph.get_successors(ele):
            new_cost = edge_cost
            if successor not in visited and (successor not in costs or new_cost < costs[successor]):
                costs[successor] = new_cost
                prev[successor] = ele
                pq.update(new_cost, successor)
    return edge, overall_cost
コード例 #5
0
    def pq_sort(self, test_array):
        pq = PriorityQueue()

        for x in test_array:
            pq.insert(x)

        pq_size = pq.size()
        return [pq.extract_min() for x in range(pq_size)]
コード例 #6
0
 def make_queue(self, score_arr):
     q = PriorityQueue()
     score_arr = score_arr.tocoo()
     for i, j, v in itertools.izip(score_arr.row, score_arr.col,
                                   score_arr.data):
         inverted_score = -v
         item = (i, j)
         q.put((inverted_score, item))
     return q
コード例 #7
0
    def test_size_remove(self):
        pq = PriorityQueue()

        for i in range(100):
            pq.insert(i)

        for i in range(100):
            pq.extract_min()

        self.assertEqual(pq.size(), 0)
コード例 #8
0
def Dijkstra(G, src):
    #initialization
    dist = {}
    #   prev = {}

    Q = PriorityQueue()
    #   print(g["home"].keys())

    #출발노드 s는 자신까지의 거리가 0이고 자신의 predecessor
    dist[src] = 0
    #   prev[s] = None

    #다른 노드들은 모두 거리를 infinity로 설정하고 predecessor는 일단 None으로 설정하고
    '''for n in g:
        dist[n[0]] = float('inf')
        prev[n[0]] = None
        dist[n[1]] = float('inf')
        prev[n[1]] = None
    '''
    #그러면서 PQ에다가 노드들을 넣어줍니다.
    for node in G.keys():
        if node != src:
            dist[node] = float('inf')
            #prev[n] = None
        '''
        if n in g[s].keys():
            dist[n] = float('inf')
            prev[n] = None
        '''
        Q.insert(dist[node], node)  # n이 우선순위 dist가 value
    #PQ가 빌때까지 계속 루프를 돌면서,
    while Q.size() > 0:
        p, u = Q.pop()  #현재까지 가장 짧은 거리를 갖고 있는 노드를 pop

        #꺼낸 노드의 각 이웃들까지의 거리를 현재 자신까지의 minimum cost와 더한 후
        #이웃들이 가지고 있는 거리보다 작으면 이것으로 업데이트 시키고 다시 PQ에 넣거나 update합니다
        #pd insert 기능에 포함되어 있다고 한다.
        '''for v in g[u].keys():
#           alt = dist[u] + g[u][v].get('weight',1)
            alt = dist[u] + g[u][v]
            if alt < dist[v]:
                dist[v] = alt
                prev[v] = u
                Q.insert(dist[v],v)       #for v in g.neighbors(u):
        '''
        for v in G[u].keys():
            #alt = dist[u] + g[u][v].get('weight',1)
            alt = dist[u] + G[u][v]
            if alt < dist[v]:
                dist[v] = alt
                Q.insert(dist[v], v)

    return dist
コード例 #9
0
 def __init__(self, initial_state, goal_state, verbose=False):
     self.node_expansions = 0
     self.unique_states = {}
     self.unique_states[initial_state.dictkey()] = True
     self.q = PriorityQueue()
     self.goal_state = goal_state
     self.q.enqueue(InformedNode(initial_state, None, 0, self.goal_state))
     self.verbose = verbose
     solution = self.execute()
     if solution is None:
         print("Search failed")
     else:
         self.showPath(solution)
コード例 #10
0
def get_huffman_tree(frequency_lst):
    frequency_lst = Counter(s)
    pq = PriorityQueue()
    for char, freq in frequency_lst:
        pq.insert(freq, TreeNode(char))

    while pq.size() > 1:
        freq1, node1 = pq.delete_min()
        freq2, node2 = pq.delete_min()

        internal_node = TreeNode(node1.val + node2.val, node1, node2)
        pq.insert(freq1 + freq2, internal_node)

    _, root = pq.delete_min()
    return get_code(root)
コード例 #11
0
ファイル: ai.py プロジェクト: Derekmod/TBAI
 def __init__(self,
              turn=0,
              prob_power=1.,
              max_uncertainty=1.,
              training_nodes=None,
              utility_cap=1e-1,
              q=0.5):
     self.turn = turn
     self.prob_power = prob_power
     self.max_uncertainty = max_uncertainty
     self.training_nodes = training_nodes
     if self.training_nodes is None:
         self.training_nodes = PriorityQueue(
             lambda x: x[1].compressed,
             lambda x: x[0])  # TODO: implement max items
     self.utility_cap = utility_cap
     self.q = q
コード例 #12
0
def astar_search(initial_state):
    """
    A* search algorithm for single-player Chexers. Conducts a full A* search to
    the nearest goal state from `initial_state`.
    """
    # store the current best-known partial path cost to each state we have
    # encountered:
    g = {initial_state: 0}
    # store the previous state in this least-cost path, along with action
    # taken to reach each state:
    prev = {initial_state: None}

    # initialise a priority queue with initial state (f(s) = 0 + h(s)):
    queue = PriorityQueue()
    queue.update(initial_state, g[initial_state] + h(initial_state))

    # (concurrent iteration is allowed on this priority queue---this will loop
    # until the queue is empty, and we may modify the queue inside)
    for state in queue:
        # if we are expanding a goal state, we can terminate the search!
        if state.is_goal():
            return reconstruct_action_sequence(state, prev)

        # else, consider all successor states for addition to the queue (if
        # we see a cheaper path)
        # for our problem, all paths through state have the same path cost,
        # so we can just compute it once now:
        g_new = g[state] + 1
        for (action, successor_state) in state.actions_successors():
            # if this is the first time we are seeing the state, or if we
            # have found a new path to the state with lower cost, we must
            # update the priority queue by inserting/modifying this state with
            # the appropriate f-cost.
            # (note: since our heuristic is consistent we should never discover
            # a better path to a previously expanded state)
            if successor_state not in g or g[successor_state] > g_new:
                # a better path! save it:
                g[successor_state] = g_new
                prev[successor_state] = (state, action)

                # and update the priority queue
                queue.update(successor_state, g_new + h(successor_state))
    # if the priority queue ever runs dry, then there must be no path to a goal
    # state.
    return None
コード例 #13
0
ファイル: graph_algos.py プロジェクト: nkchangliu/algorithms
def dijkstra(graph, start):
    prev = {}
    costs = {}
    costs[start] = 0

    pq = PriorityQueue()
    for node in graph.nodes():
        pq.insert(float('inf'), node)
    pq.insert(0, start)

    while not pq.is_empty():
        cost, ele = pq.delete_min()
        for successor, edge_cost in graph.get_successors(ele):
            new_cost = cost + edge_cost

            if successor not in costs or new_cost < costs[successor]:
                costs[successor] = new_cost
                prev[successor] = ele
                pq.update(new_cost, successor)
    return prev, costs
コード例 #14
0
 def test_size_insert(self):
     pq = PriorityQueue()
     for i in range(100):
         self.assertEqual(pq.size(), i)
         pq.insert(i)
コード例 #15
0
 def __init__(self, lst=[]):
     lst = [(-prio, ele) for (prio, ele) in lst]
     self.maxHeap = PriorityQueue(lst)
コード例 #16
0
ファイル: ai.py プロジェクト: Derekmod/TBAI
    def getMove(self, state, nslaves=1):
        '''Make the AI make a move.
        Args:
            state: <State>
        Returns:
            <Move> in the move_list of 'state'
        '''
        hash_fn = lambda node: node.state.compressed  #TODO remove
        #value_fn = lambda node: node._global_log_prob #TODO strengthen
        value_fn = lambda node: node.depth
        pq = PriorityQueue(hash_fn, value_fn)

        player_info = PlayerInfo(turn=state.player_turn,
                                 prob_power=0.1,
                                 max_uncertainty=self._max_uncertainty,
                                 q=self.q_choice)
        root = StateNode(state, player_info)
        pq.add(root)

        redundant = dict()

        slave_procs = []
        slave_pipes = []
        for pidx in range(nslaves):
            slave_pipe, pipe = Pipe()
            proc = Process(target=AIPlayer.evalStates, args=(self, pipe))

            slave_procs += [proc]
            slave_pipes += [slave_pipe]

            proc.start()

        nchecked = 0
        target_slave = 0
        flying_nodes = 0
        while nchecked < self._max_states and len(
                pq) + flying_nodes:  #terminal condition
            if len(pq):
                next = pq.pop()
                next_state = next.state
                compressed = next_state.compressed
                if compressed not in redundant:
                    redundant[compressed] = next
                else:
                    original = redundant[compressed]
                    #next = redundant[compressed]
                    #for new_node in next.parent._addChild():
                    for new_node in next.reportRedundant(original):
                        pq.add(new_node)
                    continue

                pipe = slave_pipes[target_slave]
                pipe.send(next_state)
                flying_nodes += 1
                nchecked += 1

            for pipe in slave_pipes:
                if pipe.poll():
                    try:
                        obj = pipe.recv()
                        flying_nodes -= 1
                        if not obj:
                            print('ERROR: slave closed before master [E1]')
                        heur_bundle, compressed = obj
                        new_nodes = redundant[compressed].check(heur_bundle)
                        for new_node in new_nodes:
                            pq.add(new_node)
                    except EOFError:
                        print('ERROR: slave closed before master [E2]')

        for pipe in slave_pipes:
            pipe.send(None)

        active_pipes = copy.copy(slave_pipes)
        while active_pipes:
            for pipe in active_pipes:
                try:
                    obj = pipe.recv()
                    if not obj:
                        active_pipes.remove(pipe)
                        continue
                    heur_bundle, compressed = obj
                    redundant[compressed].check(heur_bundle)
                except EOFError:
                    pipes.remove(pipe)

        if self.train_iterations:
            X = []
            Y = []
            while len(player_info.training_nodes):
                _, training_state, value, err = player_info.training_nodes.pop(
                )
                #value = node._expected_value
                #err = (node._expected_value - node._self_value) ** 2

                x = training_state.features()
                y = torch.FloatTensor([value, err])

                X += [x]
                Y += [y]

            if self._model:
                self.train(X, Y)

        #cleanNode(root)
        #for child in root.children:
        #    child.recalcValue(verbose=True)

        # find best move
        # PENDING: add randomness
        best_node = None
        moves = []
        uprobs = []
        for node in root.children:
            #print(node.value, node._self_value # TODO re add
            #print(node.state.toString()) # TODO re add
            if not best_node or (node.value - best_node.value) * (
                    2 * state.player_turn - 1) > 0:
                best_node = node

            moves += [node.move]
            uprobs += [
                get_uprob(get_utility(node.value, state.player_turn),
                          node.uncertainty, player_info.q)
            ]

        if self.train_iterations > 0:
            prob_scale = random.uniform(0, sum(uprobs))
            for i in range(len(uprobs)):
                prob_scale -= uprobs[i]
                if prob_scale <= 0:
                    return moves[i]

        #return state.moves[0] #TEMP
        return best_node.move
コード例 #17
0
 def test_size_initial(self):
     pq = PriorityQueue()
     self.assertEqual(pq.size(), 0)
コード例 #18
0
    def test_empty_extract(self):
        pq = PriorityQueue()

        self.assertEqual(pq.extract_min(), None)
コード例 #19
0
    def test_empty_peek(self):
        pq = PriorityQueue()

        self.assertEqual(pq.find_min(), None)