Exemplo n.º 1
0
def dijkstra(graph, start, end):
    inf = float('inf')
    shortest_distances = {start: 0}                 # mapping of nodes to their dist from start
    queue_sd = PQDict(shortest_distances)           # priority queue for tracking min shortest path
    predecessors = {}                               # mapping of nodes to their direct predecessors
    unexplored = set(graph.keys())                  # unexplored nodes
    path = []

    while unexplored:                                           # nodes yet to explore
        (minNode, minDistance) = queue_sd.popitem()             # node w/ min dist d on frontier
        shortest_distances[minNode] = minDistance               # est dijkstra greedy score
        unexplored.remove(minNode)                              # remove from unexplored
        if minNode == end: break                                # end if goal already reached

        # now consider the edges from minNode with an unexplored head -
        # we may need to update the dist of unexplored successors
        for neighbor in graph[minNode]:                               # successors to v
            if neighbor in unexplored:                          # then neighbor is a frontier node
                minDistance = shortest_distances[minNode] + graph[minNode][neighbor]
                if minDistance < queue_sd.get(neighbor, inf):
                    queue_sd[neighbor] = minDistance
                    predecessors[neighbor] = minNode                   # set/update predecessor

    currentNode = end
    while currentNode != start:
        try:
            path.insert(0,currentNode)
            currentNode = predecessors[currentNode]
        except KeyError:
            print('Path not reachable')
            break
    path.insert(0,start)
    if shortest_distances[end] != inf:
        return shortest_distances[end], path
    def test_heapsort(self):
        # sequences of operations
        pq = PQDict()
        self.check_heap_invariant(pq)
        self.check_index(pq)

        items = generateData('int')

        # push in a sequence of items
        added_items = []
        for dkey, pkey in items:
            pq.additem(dkey, pkey)
            self.check_heap_invariant(pq)
            self.check_index(pq)
            added_items.append( (dkey,pkey) )

        # pop out all the items
        popped_items = []
        while pq:
            dkey_pkey = pq.popitem()
            self.check_heap_invariant(pq)
            self.check_index(pq)
            popped_items.append(dkey_pkey)

        self.assertTrue(len(pq._heap)==0)
        self.check_index(pq)
Exemplo n.º 3
0
    def __init__(self, id, dynamic_model, reactions, species, dynamic_compartments,
                 local_species_population, options=None):
        """ Initialize an NRM submodel object

        Args:
            id (:obj:`str`): unique id of this dynamic NRM submodel
            dynamic_model (:obj:`DynamicModel`): the aggregate state of a simulation
            reactions (:obj:`list` of :obj:`Reaction`): the reactions modeled by this NRM submodel
            species (:obj:`list` of :obj:`Species`): the species that participate in the reactions modeled
                by this NRM submodel, with their initial concentrations
            dynamic_compartments (:obj:`dict`): :obj:`DynamicCompartment`\ s, keyed by id, that contain
                species which participate in reactions that this NRM submodel models, including
                adjacent compartments used by its transfer reactions
            local_species_population (:obj:`LocalSpeciesPopulation`): the store that maintains this
                NRM submodel's species population
            options (:obj:`dict`, optional): NRM submodel options

        Raises:
            :obj:`MultialgorithmError`: if the initial NRM wait exponential moving average is not positive
        """
        super().__init__(id, dynamic_model, reactions, species, dynamic_compartments,
                         local_species_population)
        self.random_state = RandomStateManager.instance()
        self.execution_time_priority_queue = PQDict()
        self.options = options
        # to enable testing of uninitialized instances auto_initialize controls initialization here
        # auto_initialize defaults to True
        auto_initialize = True
        if options is not None and 'auto_initialize' in options:
            auto_initialize = options['auto_initialize']
        if auto_initialize:
            self.initialize()
Exemplo n.º 4
0
def djikstra(G, start):
    '''
    Djikstra's algorithm determines the length from `start` to every other 
    vertex in the graph.

    The graph argument `G` should be a dict indexed by nodes.  The value 
    of each item `G[v]` should also a dict indexed by predecessor nodes.
    In other words, for any node `v`, `G[v]` is itself a dict, indexed 
    by the predecessors of `v`.  For any directed edge `w -> v`, `G[v][w]` 
    is the length of the edge from `w` to `v`.

    '''
    inf = float('inf')
    dist = {start: 0}       # track shortest path distances from `start`
    E = set([start])        # explored
    U = set(G.keys()) - E   # unexplored

    while U:                                        # unexplored nodes
        D = PQDict()                                # frontier candidates
        for u in U:                                 # unexplored nodes
            for v in G[u]:                          # neighbors of u
                if v in E:                          # then u is a frontier node
                    l = dist[v] + G[u][v]           # start -> v -> u
                    D[u] = min(l, D.get(u, inf))    # choose minimum for u

        (x, d) = D.popitem()                        # node w/ min dist on frontier
        dist[x] = d                                 # assign djikstra greedy score
        U.remove(x)                                 # remove from unexplored
        E.add(x)                                    # add to explored

    return dist                                     # shortest path distances
Exemplo n.º 5
0
def djikstra(G, start):
    '''
    Djikstra's algorithm determines the length from `start` to every other 
    vertex in the graph.

    The graph argument `G` should be a dict indexed by nodes.  The value 
    of each item `G[v]` should also a dict indexed by predecessor nodes.
    In other words, for any node `v`, `G[v]` is itself a dict, indexed 
    by the predecessors of `v`.  For any directed edge `w -> v`, `G[v][w]` 
    is the length of the edge from `w` to `v`.

    '''
    inf = float('inf')
    dist = {start: 0}  # track shortest path distances from `start`
    E = set([start])  # explored
    U = set(G.keys()) - E  # unexplored

    while U:  # unexplored nodes
        D = PQDict()  # frontier candidates
        for u in U:  # unexplored nodes
            for v in G[u]:  # neighbors of u
                if v in E:  # then u is a frontier node
                    l = dist[v] + G[u][v]  # start -> v -> u
                    D[u] = min(l, D.get(u, inf))  # choose minimum for u

        (x, d) = D.popitem()  # node w/ min dist on frontier
        dist[x] = d  # assign djikstra greedy score
        U.remove(x)  # remove from unexplored
        E.add(x)  # add to explored

    return dist  # shortest path distances
Exemplo n.º 6
0
    def get_dijkstra3(self, G, target):
        Gk = G.reverse()
        inf = float('inf')
        D = {target: 0}
        Que = PQDict(D)
        P = {}
        nodes = Gk.nodes
        U = set(nodes)
        while U:
            #print('len U %d'%len(U))
            #print('len Q %d'%len(Que))
            if len(Que) == 0: break
            (v, d) = Que.popitem()
            D[v] = d
            U.remove(v)
            #if v == target: break
            neigh = list(Gk.successors(v))
            for u in neigh:
                if u in U:
                    d = D[v] + Gk[v][u]['weight']
                    if d < Que.get(u, inf):
                        Que[u] = d
                        P[u] = v

        return P
Exemplo n.º 7
0
    def __init__(self, oeos, a_id, a, a_max_fielded, b_id, b, b_max_fielded):
        assert all(isinstance(oeo, Oeo) for oeo in oeos.values()), \
            "oeos is not a dict of oeo_id:oeo"
        assert isinstance(a_id, str), "'a_id' is not a string"
        assert isinstance(a, set), "'a' is not a set of oeo_id"
        assert isinstance(a_max_fielded, int), "'a_max_fielded' is not an int"
        assert isinstance(b_id, str), "'b_id' is not a string"
        assert isinstance(b, set), "'b' is not a set of oeo_id"
        assert isinstance(b_max_fielded, int), "'b_max_fielded' is not an int"
        # Throw error if each oeo_id is not unique between teams
        if a & b:
            raise ValueError(f"{a & b} oeo_id(s) not unique between teams")

        self._oeo = oeos
        self._a_id = a_id
        self._a = a
        self._b_id = b_id
        self._b = b

        self._turn_number = 0
        self._field = Field(a_id, a_max_fielded, b_id, b_max_fielded)
        self._pending_sim_events = PQDict()
        self._processed_sim_events = []

        move_set = set()
        for o in itertools.chain(self._oeo.values()):
            for move in o.moves:
                move_set.add(move)
        moves = Move.load_moves(move_set)
        self._moves = moves

        # self._speed_priority_list = None
        self._setup_axel_events()
Exemplo n.º 8
0
def dijkstra(graph, start):
    p_queue = PQDict()
    dist_to_start = {}
    prev_vert = {}

    # Fill the dictionaries with initial values
    for vert in graph:
        dist_to_start[vert] = 1000
        prev_vert[vert] = None

    # Initialize the priority queue dictionary
    dist_to_start[start] = 0
    for vert in graph:
        p_queue[vert] = dist_to_start[vert] # starts at 1000

    # BFS the priority queue dictionary
    while len(p_queue) > 0:
        current = p_queue.popitem()[0] # the first item in the tuple is the key
        neighbors = graph[current].keys()

        for vert in neighbors:
            neighbor_dist = graph[current][vert] 
            new_distance = dist_to_start[current] + neighbor_dist
            
            if new_distance < dist_to_start[vert]: # if smaller than the last dist_to_start replace
                p_queue[vert] = new_distance
                dist_to_start[vert] = new_distance
                prev_vert[vert] = current

    return prev_vert
Exemplo n.º 9
0
def gillespie_nrm(tspan, c_0, c_poly, reactions, dep_graph):
    t = tspan[0]
    c = c_0.copy()
    T = [t]
    C = [c.copy()]

    volume = compute_volume(c_0, c_poly)

    # initialize scheduler
    scheduler = PQDict()

    for rxn in reactions:
        tau = -log(rand()) / rxn.propensity(c, volume)
        scheduler[rxn] = t + tau

    # first event
    rnext, tnext = scheduler.topitem()
    t = tnext
    C, T = fire_rxn(rnext, c, t, c_poly, C, T)

    while t < tspan[1]:
        # reschedule dependent reactions
        for rxn in dep_graph[rnext]:
            tau = -log(rand()) / rxn.propensity(c, volume)
            scheduler[rxn] = t + tau

        # fire the next one!
        rnext, tnext = scheduler.topitem()
        t = tnext
        if (rnext.propensity(c, volume) > 0):
            C, T = fire_rxn(rnext, c, t, c_poly, C, T)
        else:
            print(c_poly)
    return array(T), array(C)
 def test_swap_priority(self):
     pq = PQDict(A=5, B=8, C=1)
     pq.swap_priority('A', 'C')
     self.check_index(pq)
     self.assertEqual(pq['A'], 1)
     self.assertEqual(pq['C'], 5)
     self.assertEqual(pq.top(), 'A')
     self.assertRaises(KeyError, pq.swap_priority, 'A', 'Z')
 def test_topitem(self):
     # empty
     pq = PQDict()
     self.assertRaises(KeyError, pq.top)
     # non-empty
     for num_items in range(1,30):
         items = generateData('float', num_items)
         pq = PQDict(items)
         self.assertEqual(pq.topitem(), min(items, key=lambda x: x[1]))
Exemplo n.º 12
0
    def a_star(self, heuristic):
        node = self.tree.create_node(state=State(self.wrigglers), pathCost=0)
        node.heuristic = heuristic(node)

        frontier = PQDict()
        stateFrontier = {}
        explored = {}

        # Sacrifice memory to have a huge speed up being able to instantly check for state in frontier
        stateFrontier[str(node.state)] = node.heuristic
        frontier.additem(node._identifier, node.heuristic)

        while(True):
            if(len(frontier) == 0):
                return None

            nodeID = frontier.popitem()[0]
            node = self.tree.get_node(nodeID)
            nodeStateStr = str(node.state)

            del stateFrontier[nodeStateStr]

            if self.testGoal(node.state):
                return node

            explored[nodeStateStr] = -1  # we don't care what the hash matches
            actions = self.getActions(node.state)
            for action in actions:
                child = self.childNode(node, action)
                child.heuristic = heuristic(child)
                childStr = str(child.state)

                inExplored = False
                inFrontier = False

                if childStr in explored:
                    inExplored = True

                bGreater = False
                if childStr in stateFrontier:
                    if(stateFrontier[childStr] < child.heuristic + child.pathCost):
                        bGreater = True
                    inFrontier = True

                if(not inExplored and not inFrontier):
                    stateFrontier[childStr] = child.heuristic
                    frontier.additem(child._identifier, child.heuristic + child.pathCost)
                elif(bGreater):
                    bHappened = False
                    for key in frontier:
                        if(str(self.tree.get_node(key).state) == childStr):
                            bHappened = True
                            frontier.pop(key)
                            frontier.additem(child._identifier, child.heuristic + child.pathCost)
                            break
                    assert bHappened
    def test_copy(self):
        pq1 = PQDict(self.items)
        pq2 = pq1.copy()
        # equality by value
        self.assertEqual(pq1, pq2)

        dkey = random.choice(self.dkeys)
        pq2[dkey] += 1  
        self.assertNotEqual(pq1[dkey], pq2[dkey])
        self.assertNotEqual(pq1, pq2)
 def test_replace_key(self):
     pq = PQDict(A=5, B=8, C=1)
     pq.replace_key('A', 'Alice')
     pq.replace_key('B', 'Bob')
     self.check_index(pq)
     self.assertEqual(pq['Alice'], 5)
     self.assertEqual(pq['Bob'], 8)
     self.assertRaises(KeyError, pq.__getitem__, 'A')
     self.assertRaises(KeyError, pq.__getitem__, 'B')
     self.assertRaises(KeyError, pq.replace_key, 'C', 'Bob')
Exemplo n.º 15
0
def primMST(G):
    """ Return MST of the given undirected graph"""
    vis = set()
    tot_weight = 0
    pq = PQDict()
    Gprime = nx.Graph()
    ''' Add all nodes to PQDict with infinite distance'''
    for node in G.nodes():
        pq.additem(node, float("inf"))

    curr = pq.pop()  #Select initial node
    vis.add(curr)
    while len(pq) > 0:
        for s, nod, wt in G.edges(curr, data=True):
            if nod not in vis and wt['weight'] < pq[nod]:
                pq.updateitem(nod, wt['weight'])

        if len(pq) > 0:
            top = pq.top()
            source, destination, dist = [
                data for data in sorted(G.edges(top, data=True),
                                        key=lambda
                                        (source, target, data): data['weight'])
                if data[1] in vis
            ][0]
            Gprime.add_edge(source, destination, weight=dist['weight'])
            vis.add(top)
            tot_weight += pq[top]
            curr = pq.pop()

    return Gprime, tot_weight
Exemplo n.º 16
0
def dijkstra(G, start, end=None):
    '''
    dijkstra's algorithm determines the length from `start` to every other 
    vertex in the graph.

    The graph argument `G` should be a dict indexed by nodes.  The value 
    of each item `G[v]` should also a dict indexed by successor nodes.
    In other words, for any node `v`, `G[v]` is itself a dict, indexed 
    by the successors of `v`.  For any directed edge `v -> w`, `G[v][w]` 
    is the length of the edge from `v` to `w`.

        graph = {'a': {'b': 1}, 
                 'b': {'c': 2, 'b': 5}, 
                 'c': {'d': 1},
                 'd': {}}

    Returns two dicts, `dist` and `pred`:

        dist, pred = dijkstra(graph, start='a') 
    
    `dist` is a dict mapping each node to its shortest distance from the
    specified starting node:

        assert dist == {'a': 0, 'c': 3, 'b': 1, 'd': 4}

    `pred` is a dict mapping each node to its predecessor node on the
    shortest path from the specified starting node:

        assert pred == {'b': 'a', 'c': 'b', 'd': 'c'}
    
    '''
    inf = float('inf')
    D = {start: 0}  # mapping of nodes to their dist from start           p*
    Q = PQDict(
        D)  # priority queue for tracking min shortest path      Cin ????
    P = {}  # mapping of nodes to their direct predecessors      存边
    U = set(G.keys())  # unexplored nodes

    while U:  # nodes yet to explore
        (v,
         d) = Q.popitem()  # node w/ min dist d on frontier           找到Cln最小的l
        D[v] = d  # est dijkstra greedy score                pl*
        U.remove(v)  # remove from unexplored                  从U中删除l
        if v == end: break  #如果集合为空,则算法终止

        # now consider the edges from v with an unexplored head -
        # we may need to update the dist of unexplored successors
        for w in G[v]:  # successors to v
            if w in U:  # then w is a frontier node
                d = D[v] + G[v][w]  # dgs: dist of start -> v -> w
                if d < Q.get(w, inf):
                    Q[w] = d  # set/update dgs
                    P[w] = v  # set/update predecessor

    return D, P
Exemplo n.º 17
0
def greedy_approx(G):
    """ Return MST of the given undirected graph"""
    vis = set()
    tot_weight = 0
    pq = PQDict()
    path = []
    '''Initialize Priority Queue which will help us find Farthest node after distance is calcualted from visited node'''
    for node in G.nodes():
        pq.additem(node, float("-inf"))

    curr = pq.pop()
    vis.add(curr)
    path.append(curr)
    while len(pq) > 0:
        for s, nod, wt in G.edges(curr, data=True):
            '''Distance calculation'''
            if nod not in vis and -wt['weight'] > pq[nod]:
                pq.updateitem(nod, -wt['weight'])

        if len(pq) > 0:
            ''' Selection Step'''
            top = pq.top()
            vis.add(top)
            curr = pq.pop()
            ''' Insertion Step'''
            loc, cost = minCost(G, path, top)
            '''Insert into the location found by minCost()'''
            path.insert(loc, top)
            tot_weight += cost

    return path, tot_weight
 def test_updateitem(self):
     pq = PQDict(self.items)
     dkey, pkey = random.choice(self.items)
     # assign same value
     pq.updateitem(dkey, pkey)
     self.assertEqual(pq[dkey], pkey)
     # assign new value
     pq.updateitem(dkey, pkey + 1.0)
     self.assertEqual(pq[dkey], pkey + 1.0)
     # can only update existing dkeys
     self.assertRaises(KeyError, pq.updateitem, 'does_not_exist', 99.0)  
 def test_update(self):
     pq1 = PQDict(self.items)
     pq2 = PQDict()
     pq2['C'] = 3000
     pq2['D'] = 4000
     pq2['XYZ'] = 9000
     pq1.update(pq2)
     self.assertEqual(pq1['C'],3000) and \
     self.assertEqual(pq1['D'],4000) and \
     self.assertIn('XYZ',pq1) and \
     self.assertEqual(pq1['XYZ'],9000)
def gillespie_nrm(tspan, initial_amounts, reactions, dep_graph):
    """
    Implementation of the "Next-Reaction Method" variant of the Gillespie 
    stochastic simulation algorithm, described by Gibson and Bruck. 

    The main enhancements are:
        - Use of dependency graph connecting the reaction channels to prevent 
          needless rescheduling of reactions that are unaffected by an event.
        - Use of an indexed priority queue (pqdict) as a scheduler to achieve
          O(log(M)) rescheduling on each iteration, where M is the number of
          reactions, assuming the dependency graph is sparse.

    The paper describes an additional modification to cut down on the amount of 
    random number generation which was not implemented here for simplicity.

    """
    # initialize state
    t = tspan[0]
    x = initial_amounts
    T = [t]
    X = [x]
    
    # initialize scheduler
    scheduler = PQDict()
    for rxn in reactions:
        tau = -log(rand())/rxn.propensity(x) 
        scheduler[rxn] = t + tau

    # first event
    rnext, tnext = scheduler.topitem()
    t = tnext
    x += rnext.stoich
    T.append( t )
    X.append( x.copy() )

    # main loop
    while t < tspan[1]:
        # reschedule
        tau = -log(rand())/rnext.propensity(x)
        scheduler[rnext] = t + tau

        # reschedule dependent reactions
        for rxn in dep_graph[rnext]:
            tau = -log(rand())/rxn.propensity(x)
            scheduler[rxn] = t + tau
        
        # fire the next one!
        rnext, tnext = scheduler.topitem()
        t = tnext
        x += rnext.stoich
        T.append( t )
        X.append( x.copy() )

    return array(T), array(X)
def gillespie_nrm(tspan, initial_amounts, reactions, dep_graph):
    """
    Implementation of the "Next-Reaction Method" variant of the Gillespie 
    stochastic simulation algorithm, described by Gibson and Bruck. 

    The main enhancements are:
        - Use of dependency graph connecting the reaction channels to prevent 
          needless rescheduling of reactions that are unaffected by an event.
        - Use of an indexed priority queue (pqdict) as a scheduler to achieve
          O(log(M)) rescheduling on each iteration, where M is the number of
          reactions, assuming the dependency graph is sparse.

    The paper describes an additional modification to cut down on the amount of 
    random number generation which was not implemented here for simplicity.

    """
    # initialize state
    t = tspan[0]
    x = initial_amounts
    T = [t]
    X = [x]

    # initialize scheduler
    scheduler = PQDict()
    for rxn in reactions:
        tau = -log(rand()) / rxn.propensity(x)
        scheduler[rxn] = t + tau

    # first event
    rnext, tnext = scheduler.topitem()
    t = tnext
    x += rnext.stoich
    T.append(t)
    X.append(x.copy())

    # main loop
    while t < tspan[1]:
        # reschedule
        tau = -log(rand()) / rnext.propensity(x)
        scheduler[rnext] = t + tau

        # reschedule dependent reactions
        for rxn in dep_graph[rnext]:
            tau = -log(rand()) / rxn.propensity(x)
            scheduler[rxn] = t + tau

        # fire the next one!
        rnext, tnext = scheduler.topitem()
        t = tnext
        x += rnext.stoich
        T.append(t)
        X.append(x.copy())

    return array(T), array(X)
Exemplo n.º 22
0
    def __init__(self, args, ttl=604800, time=time):
        self.args = args
        self.time = time

        # linked
        self.popularity_queue = PQDict()
        self.age_dict = OrderedDict()

        # separate
        self.future_popularity_queue = PQDict()

        self.step = ttl
Exemplo n.º 23
0
def dijkstra(G, start, end=None):
    '''
    dijkstra's algorithm determines the length from `start` to every other 
    vertex in the graph.

    The graph argument `G` should be a dict indexed by nodes.  The value 
    of each item `G[v]` should also a dict indexed by successor nodes.
    In other words, for any node `v`, `G[v]` is itself a dict, indexed 
    by the successors of `v`.  For any directed edge `v -> w`, `G[v][w]` 
    is the length of the edge from `v` to `w`.

        graph = {'a': {'b': 1}, 
                 'b': {'c': 2, 'b': 5}, 
                 'c': {'d': 1},
                 'd': {}}

    Returns two dicts, `dist` and `pred`:

        dist, pred = dijkstra(graph, start='a') 
    
    `dist` is a dict mapping each node to its shortest distance from the
    specified starting node:

        assert dist == {'a': 0, 'c': 3, 'b': 1, 'd': 4}

    `pred` is a dict mapping each node to its predecessor node on the
    shortest path from the specified starting node:

        assert pred == {'b': 'a', 'c': 'b', 'd': 'c'}
    
    '''
    inf = float('inf')
    D = {start: 0}          # mapping of nodes to their dist from start
    Q = PQDict(D)           # priority queue for tracking min shortest path
    P = {}                  # mapping of nodes to their direct predecessors
    U = set(G.keys())       # unexplored nodes

    while U:                                    # nodes yet to explore
        (v, d) = Q.popitem()                    # node w/ min dist d on frontier
        D[v] = d                                # est dijkstra greedy score
        U.remove(v)                             # remove from unexplored
        if v == end: break

        # now consider the edges from v with an unexplored head -
        # we may need to update the dist of unexplored successors 
        for w in G[v]:                          # successors to v
            if w in U:                          # then w is a frontier node
                d = D[v] + G[v][w]              # dgs: dist of start -> v -> w
                if d < Q.get(w, inf):
                    Q[w] = d                    # set/update dgs
                    P[w] = v                    # set/update predecessor

    return D, P
Exemplo n.º 24
0
def greedy_approx(G):
    """ Return MST of the given undirected graph"""
    vis = set()
    tot_weight = 0
    pq = PQDict()            
    path = []
    
    '''Initialize Priority Queue which will help us find Farthest node after distance is calcualted from visited node''' 
    for node in G.nodes():
        pq.additem(node, float("-inf"))
    
    curr = pq.pop()
    vis.add(curr)
    path.append(curr)
    while len(pq) > 0:
        for s,nod, wt in G.edges(curr, data=True):
            '''Distance calculation'''
            if nod not in vis and -wt['weight'] > pq[nod]: pq.updateitem(nod, -wt['weight']) 
        
        if len(pq)>0:
            ''' Selection Step'''
            top = pq.top()
            vis.add(top)
            curr = pq.pop()
            ''' Insertion Step'''
            loc,cost = minCost(G,path,top)
            '''Insert into the location found by minCost()'''
            path.insert(loc, top)
            tot_weight += cost
            
    return path,tot_weight
Exemplo n.º 25
0
def dijkstra(g, s):
    vis = [False for i in range(len(g))]
    dist = [float('inf') for i in range(len(g))]
    prev = [None for i in range(len(g))]
    dist[s] = 0

    ipq = PQDict()
    ipq.additem(s, 0)
    while len(ipq) != 0:
        index, minValue = ipq.popitem()
        vis[index] = True
        # mica optimizare, daca deja avem distanda mai buna pe alt drum fata de drumul direct dintre 2 noduri, pass
        if dist[index] < minValue:
            continue
        for edge in g[index]:
            # ia toate nodurile vecine nevizitate
            if vis[edge[0]]:
                continue
            # le calculaeza distanta
            newDist = dist[index] + edge[1]
            # si o modifica doar daca este mai mica decat cea care era deja
            if newDist < dist[edge[0]]:
                dist[edge[0]] = newDist
                # apoi modifica si in indexed priority queue, daca nu e, il adauga, daca e, ii updateaza valoarea
                if ipq.get(edge[0]) is None:
                    ipq.additem(edge[0], newDist)
                else:
                    ipq[edge[0]] = newDist
                # si seteaza si tatal de fiecare data pentru a ramane ultimul adica cel cu costul cel mai mic
                prev[edge[0]] = index
    return dist, prev
Exemplo n.º 26
0
def primMST(G):
    """ Return MST of the given undirected graph"""
    vis = set()
    tot_weight = 0
    pq = PQDict()            
    Gprime = nx.Graph()
    
    ''' Add all nodes to PQDict with infinite distance'''
    for node in G.nodes():
        pq.additem(node, float("inf"))
    
    curr = pq.pop()    #Select initial node
    vis.add(curr)
    while len(pq) > 0:
        for s,nod, wt in G.edges(curr, data=True):
            if nod not in vis and wt['weight'] < pq[nod]: pq.updateitem(nod, wt['weight']) 
        
        if len(pq)>0:            
            top = pq.top()
            source,destination, dist = [data for data in sorted(G.edges(top, data=True), key=lambda (source,target,data): data['weight']) if data[1] in vis][0]
            Gprime.add_edge(source, destination, weight = dist['weight'])
            vis.add(top)
            tot_weight += pq[top]
            curr = pq.pop()
            
    return Gprime, tot_weight
Exemplo n.º 27
0
def dijkstra(source):
    distResults = [INF] * (N + 1)
    distResults[source] = 0
    dist = PQDict()
    for i in range(1, N + 1):
        dist[i] = distResults[i]

    while dist:
        v1, d1 = dist.popitem()
        distResults[v1] = d1
        if d1 == INF:
            break
        for v2 in range(1, N + 1):
            if v2 in dist:
                dist[v2] = min(dist[v2], graph[v1, v2] + d1)
    return distResults
Exemplo n.º 28
0
def greedy_approx(G):
    vis = set()
    pq = PQDict()
    tot_length = 0
    seq = []
    start = 1
    curr = start
    vis.add(curr)
    seq.append(curr)
    while len(seq) != len(G.nodes()):
        next = [
            list for list in (
                sorted(G.edges(curr, data=True),
                       key=lambda (source, target, data): data['weight']))
            if list[1] not in vis
        ][0]
        curr = next[1]
        vis.add(curr)
        seq.append(curr)
        tot_length += next[2]['weight']

    next = [
        list
        for list in (sorted(G.edges(curr, data=True),
                            key=lambda (source, target, data): data['weight']))
        if list[1] == start
    ][0]
    seq.append(next[1])
    tot_length += next[2]['weight']
    #     print optimal, tot_length, tot_length/float(int(optimal))
    return tot_length


# print greed()[1]
Exemplo n.º 29
0
	def apply_astar(self):
		print "A star algorithm running to find the goal"
		close = set()
		open = set()
		open.add(self.return_key(self._tiles))
		
		parent = {}
		
		g_score = defaultdict(lambda: math.inf)
		g_score[self.return_key(self._tiles)] = 0
		
		f_score = PQDict.minpq()
		f_matric = {}
		value = g_score[self.return_key(self._tiles)] + self.heuristic(self._tiles,self.end_index)
		
		f_score[self.return_key(self._tiles)] = value
		f_matric[self.return_key(self._tiles)] = self._tiles
		
		while open:
			temp_current_key = f_score.pop()
			current = f_matric[temp_current_key]
			
			if temp_current_key == self.return_key(self.end):
				print "goal found"
				self.construct_path(parent,moves,temp_current_key)
				print "Number of closed states are ", len(close)
				return 1
				
				
				
			if temp_current_key in open:
				open.discard(temp_current_key)
				
			
			close.add(self.return_key(current))
			
			l = self.cal_child_nodes(current)
			moves = {}
			for x in l:
				temp_node = copy.copy(l[x][0])
				
				
				
				if self.return_key(temp_node) in close:
					continue
				new_g_value = g_score[self.return_key(current)] + 1
				if self.return_key(temp_node) not in open or new_g_value < g_score[self.return_key(temp_node)]:
					parent[self.return_key(temp_node)] = [self.return_key(current),l[x][1]]
					moves[self.return_key(temp_node)] = l[x][1]
					g_score[self.return_key(temp_node)] = new_g_value
					value_new = g_score[self.return_key(temp_node)] + self.heuristic(temp_node,self.end_index)
					f_score[self.return_key(temp_node)] = value_new
					
					if self.return_key(temp_node) not in open:
						open.add(self.return_key(temp_node))
						
					if self.return_key(temp_node) not in f_matric:
						f_matric[self.return_key(temp_node)] = temp_node
		
		return 0
Exemplo n.º 30
0
    def __init__(self):

        self.query = input("Enter search query: ")
        self.webpages_limit = input(
            "Set total number of webpages to be crawled: ")
        self.limit = input(
            "Set limits on how many webpages be crawled from single site: ")
        self.priority_queue = PQDict().maxpq()
        self.queue = queue.Queue()
        self.downloader = Downloader()
        self.parser = Parser(self.query)
        self.calculator = Calculator(self.query)
        self.relevance = Relevance()
        self.webpages_crawled = 0
        self.logger = logging.getLogger(__name__)
        self.visited_urls = set()
        self.sites_times = {}
Exemplo n.º 31
0
    def run(self):
        self.heapdict = PQDict()
        self.data = {}
        initx = np.linspace(0, 1, self.init_points)
        n_points = self.init_points
        if self.measure_at is not None:
            xm = [(x - self.a) / (self.b - self.a) for x in self.measure_at]
            initx = np.concatenate((initx, xm))
            initx.sort()
            n_points += len(self.measure_at)

        #initdelta = (initx[1] - initx[0])/2.
        inity = []
        for x in initx:
            xret = self.scale_to_x(x)
            yret = self.f(xret)
            yield (xret, yret)

            inity.append(self.get_y(yret))

        if self.yscale is None:
            self.yscale = max(inity) - min(inity)

        #self.scale_from_y = lambda y: (y-min(inity))/(max(inity)-min(inity))
        self.scale_from_y = lambda y: (y) / self.yscale / self.scalefactor
        inity = [self.scale_from_y(y) for y in inity]

        for (i, x) in enumerate(initx[1:-1], 1):
            d = [
                inity[i], initx[i - 1], inity[i - 1], initx[i + 1],
                inity[i + 1]
            ]

            self.update_data(x, d)

        while n_points < self.max_points:
            try:
                x = self.heapdict.top()

            except KeyError:
                return
            else:
                d = self.data[x]
                for point in self.evalx(x, *d):
                    yield point
                n_points += 2
    def test_destructive_iteration(self):
        for trial in range(100):
            size = random.randrange(1,50)
            items = generateData('float', size)
            dkeys, pkeys = zip(*items)

            if trial & 1:     # Half of the time, heapify using the constructor
                pq = PQDict(items)
            else:             # The rest of the time, insert items sequentially
                pq = PQDict()
                for dkey, pkey in items:
                   pq[dkey] = pkey

            # NOTE: heapsort is NOT a stable sorting method, so dkeys with equal priority keys
            # are not guaranteed to have the same order as in the original sequence.
            pkeys_heapsorted = list(pq.iterprioritykeys())
            self.assertEqual(pkeys_heapsorted, sorted(pkeys))
    def test_fromkeys(self):
        # assign same value to all
        seq = ['foo', 'bar', 'baz']
        pq = PQDict.fromkeys(seq)
        for k in pq:
            self.assertEqual(pq[k], float('inf'))
        pq = PQDict.fromkeys(seq, 10)
        for k in pq:
            self.assertEqual(pq[k], 10)
        pq = PQDict.fromkeys(seq, maxpq=True)
        for k in pq:
            self.assertEqual(pq[k], float('-inf'))

        # use function to calculate pkeys
        seq = [ (1,2), (1,2,3,4), ('foo', 'bar', 'baz') ]
        pq = PQDict.fromkeys(seq, sort_by=len)
        self.assertEqual(pq[1,2], 2)
        self.assertEqual(pq['foo', 'bar', 'baz'], 3)
        self.assertEqual(pq[1,2,3,4], 4)
Exemplo n.º 34
0
def prim(G):
    """ Return MST of the given undirected graph"""
    sumWeight = 0

    heap = PQDict()
    for u in G:
        heap[u] = float("inf")
    flag = [False] * len(G)
    heap[0] = 0
    #parents is the MST
    # parents = {}
    # parents[0] = 0
    while len(heap) != 0:
        [u, value] = heap.popitem()
        flag[u] = True
        sumWeight += value
        for v in G[u]:
            if flag[v] is False:
                # parents[v] = u
                heap[v] = min(heap[v], G[u][v])
    return sumWeight
Exemplo n.º 35
0
def djp(grafo, n):
    #Se determina el nodo inicial de forma aleatoria, mediante un random en entre el numero
    #nodos
    inicio = random.randint(0, n - 1)
    print("Nodo inicial: ", inicio)
    visitado = set()
    camino_mst = []
    #Se genera una cola de prioridad para generar el orden optimizado de los resultados de los
    #caminos
    cola_prioridad = PQDict()
    actual = inicio
    return recorre_djp(camino_mst, grafo, actual, cola_prioridad, visitado)
    def test_updates_and_deletes(self):
        pq = PQDict()

        items = generateData('int')
        dkeys, pkeys = zip(*items)

        # heapify a sequence of items
        pq = PQDict(items)

        for oper in range(100):
            if oper & 1: #update random item
                dkey = random.choice(dkeys)
                p_new = random.randrange(25)
                pq[dkey] = p_new
                self.assertTrue(pq[dkey]==p_new)
            elif pq: #delete random item
                dkey = random.choice(list(pq.keys()))
                del pq[dkey]
                self.assertTrue(dkey not in pq)
            self.check_heap_invariant(pq)
            self.check_index(pq)
Exemplo n.º 37
0
def AStar(problem):
	#Give path of data file
	path = problem.datapath
	#Data
	data = list()
	data = readFunc(path)

	#Initialization of a node.
	node = Node()
	node.state = problem.initState
	node.pathcost = 0

	#Frontier is a priority queue dict of tuples:(node.pathcost, node), with key:node.state
	#Frontier is ordered by path cost.
	frontier = PQDict()
	frontier.additem(node.state, (node.pathcost,node))

	#Explored begins as the empty set.
	explored = Set()

	while True:
		if len(frontier) == 0:
			return 'No route exists'
		node = frontier.popitem()[1][1] #Chooses the lowest-cost node in frontier
		if problem.goalState == node.state:
			return solution(node,problem)
		explored.add(node.state)

		possibleActions = findPossibleActions(node.state,data)
		for action in range(len(possibleActions)):
			child = childNode(problem,node,possibleActions[action],data)
			if (child.state not in explored) and (child.state not in frontier):
				frontier.additem(child.state, (child.pathcost, child))
			elif (child.state in frontier) and (frontier[child.state][1].pathcost > child.pathcost):
				frontier[child.state] = (child.pathcost, child)
Exemplo n.º 38
0
def main():

    # Main output dict
    output = {'results': []}

    # Read JSON from file
    with open('input.json', 'r') as f:
        inputjson = json.load(f)

    profiles = inputjson['profiles']
    for profile in profiles:
        pq = PQDict()
        no_of_profiles = 0
        matches = []

        # The output for this profile in JSON format
        profile_output = {'profileId': profile['id'], 'matches': []}

        for other_profile in profiles:
            if other_profile['id'] == profile['id']:
                # dont calculate against our own profile
                continue
            # Calculate match percentage with OKCupid's formula
            match_score = math.sqrt(
                satisfaction(profile, other_profile) *
                satisfaction(other_profile, profile))

            # Add the first ten matches to a min-heap (PQDict)
            if len(pq) < 11:
                pq.additem(other_profile['id'], match_score)
            else:
                if match_score > pq.popitem()[1]:
                    pq.additem(other_profile['id'], match_score)
                else:
                    continue

        for i in range(len(pq)):
            key, value = pq.popitem()
            temp = {'profileId': key, 'score': value}
            matches.append(temp)

# Reverse the heap and store it in the output for that profile
        profile_output['matches'] = matches[::-1]
        output['results'].append(profile_output)

    # Write out the output JSON to file
    with open('output_optimized.json', 'w') as outf:
        json.dump(output, outf, indent=1)

    return 0
Exemplo n.º 39
0
def prim(G, start):
    """Function recives a graph and a starting node, and returns a MST"""
    stopN = G.number_of_nodes() - 1
    current = start
    closedSet = set()
    pq = PQDict()
    mst = []

    while len(mst) < stopN:
        # print " "
        # print "Current node :", current
        for node in G.neighbors(current):
            if node not in closedSet and current not in closedSet:
                # print "    neigbors: ", node
                if (current, node) not in pq and (node, current) not in pq:
                    w = G.edge[current][node]['weight']
                    pq.additem((current, node), w)

        closedSet.add(current)

        tup, wght = pq.popitem()
        while (tup[1] in closedSet):
            tup, wght = pq.popitem()
        mst.append(tup)
        current = tup[1]

    return mst
def branchAndBound(G, cutoff, ftrace):
    queue = PQDict()
    bestSolution = INFINITY
    bestTour = []
    totalNodes = len(G.nodes())
    startNode = G.nodes()[0]
    nodeList = G.nodes()
    nodeList.remove(startNode)
    queue.additem(tuple([startNode]), lowerBound(G, [startNode]))

    start_time = time.time()
    while len(queue) != 0:
        coveredNodes, lowerBoundCurrent = queue.popitem()

        elapsed_time = time.time() - start_time
        if elapsed_time > cutoff:
            if bestSolution == INFINITY:
                return [], -1
            return bestTour, bestSolution

        for neighbor in G.neighbors(coveredNodes[-1]):
            if not neighbor in coveredNodes:
                tempNodes = list(coveredNodes)
                tempNodes.append(neighbor)
                if len(tempNodes) == totalNodes:
                    cost = findCost(G, tempNodes) + G.get_edge_data(neighbor, startNode)["weight"]
                    if cost < bestSolution:
                        bestSolution = cost
                        bestTour = tempNodes
                        ftrace.write("{0:.2f}".format(elapsed_time * 1.0) + "," + str(bestSolution) + "\n")
                else:
                    tempLowerBound = lowerBound(G, tempNodes)
                    if tempLowerBound < bestSolution:
                        queue.additem(tuple(tempNodes), tempLowerBound)
    return bestTour, bestSolution
Exemplo n.º 41
0
def dijkstra(G, src, dst=None):
    inf = float('inf')
    D = {src: 0}  # distance
    Q = PQDict(D)  # priority queue
    P = {}  # predecessor
    U = set(G.keys())  # unexplored nodes

    while U:  # still have unexplored
        (v, d) = Q.popitem()  # get node with least d
        D[v] = d  # add to D dict
        U.remove(v)  # node now explored
        if v == dst:  # reached destination
            break

        # now edges FROM v
        for w in G[v]:
            if w in U:  # unvisited neighbour
                tmpd = D[v] + G[v][w]
                if tmpd < Q.get(w, inf):  # return inf if not found
                    Q[w] = tmpd  # update distance
                    P[w] = v  # set predecessor as current
    return D, P
Exemplo n.º 42
0
    def prim(self, start):
        p_queue = PQDict()
        prev_vert = {}
        key = {}

        for vert in self.graph:
            prev_vert[vert] = None
            key[vert] = 1000
        key[start] = 0

        for vert in self.graph:
            p_queue[vert] = key[vert]

        while p_queue:
            current = p_queue.popitem()[0]

            for vert in self.graph[current]:
                if vert in p_queue and self.graph[current][vert] < key[vert]:
                    prev_vert[vert] = current
                    key[vert] = graph[current][vert]
                    p_queue[vert] = graph[current][vert]

        return prev_vert
Exemplo n.º 43
0
def dijkstra(G, start, end=None):
    inf = float('inf')
    D = {start: 0}  # mapping of nodes to their dist from start
    Q = PQDict(D)  # priority queue for tracking min shortest path
    P = {}  # mapping of nodes to their direct predecessors
    U = set(G.keys())  # unexplored nodes

    while U:  # nodes yet to explore
        (v, d) = Q.popitem()  # node w/ min dist d on frontier
        D[v] = d  # est dijkstra greedy score
        U.remove(v)  # remove from unexplored
        if v == end: break

        # now consider the edges from v with an unexplored head -
        # we may need to update the dist of unexplored successors
        for w in G[v]:  # successors to v
            if w in U:  # then w is a frontier node
                d = int(D[v]) + int(G[v][w])  # dgs: dist of start -> v -> w
                if d < Q.get(w, inf):
                    Q[w] = d  # set/update dgs
                    P[w] = v  # set/update predecessor

    return D, P
 def test_infpkey(self):
     dkeys = ['A', 'B', 'C', 'D', 'E', 'F', 'G']
     pkeys = [1, 2, 3, 4, 5, 6, 7]
     pq = PQDict(zip(dkeys, pkeys))
     pq.additem('top', -float('inf'))
     pq.additem('bot', float('inf'))
     dkeys_sorted = [key for key in pq.iterkeys()]
     self.assertEqual(dkeys_sorted[0], 'top')
     self.assertEqual(dkeys_sorted[-1], 'bot')
Exemplo n.º 45
0
    def plan(self, start_state, goal_state):
        #PQ = pqdict()
        V = {}

        self.goal_state = goal_state
        h0 = self.heuristic(start_state, goal_state)
        n0 = node(start_state, None, None, 0, h0)

        key0 = np.around(n0.state, decimals=1).tostring()
        PQ = PQDict(key0=n0)

        i = 0
        while PQ and i < 100000:
            current = PQ.popitem()[1]
            #print '\n'
            #print current.state[2]
            if (self.state_is_equal(current.path, goal_state)):
                path = self.reconstruct_path(current)
                return (path, current.f)

            V[np.around(current.state,
                        decimals=1).tostring()] = copy.deepcopy(current)

            #get children
            children = self.getChildren(
                current)  #do set parent, should return an array of nodes

            for child in children:
                i += 1
                if i % 100 == 0:
                    print 'A* iteration ' + str(i)

                child_key = np.around(child.state, decimals=1).tostring()
                if child_key in V:
                    if child.f >= V[child_key].f:
                        continue

                if (child_key in PQ):
                    existing_child = PQ[child_key]
                    if (child.g >= existing_child.g):
                        continue
                    else:
                        PQ.updateitem(child_key, child)
                else:
                    #print child.state, current.state
                    #pdb.set_trace()
                    #if(child.state[2] < 0):
                    #    pdb.set_trace()
                    PQ.additem(child_key, child)

        print 'A* Failed'
        return (None, None)
Exemplo n.º 46
0
    def dijkstra(self, src, dst):
        """dijkstras algorithm used to calculate the shortest path between two nodes a Priority queue is used in this implementation"""
        inf = float('inf')
        D = {src: 0}
        Q = PQDict(D)
        P = {}
        U = set(self.router_list)

        while U:
            (v, d) = Q.popitem()
            D[v] = d
            U.remove(v)
            if str(v) == dst:
                break

            for w in self.router_list[v]:
                if w in U:

                    d = D[v] + self.router_list[v][w]
                    if d < Q.get(w, inf):
                        Q[w] = d
                        P[w] = v

        return D, P
Exemplo n.º 47
0
def main():

	# Main output dict
	output = {'results': []}

	# Read JSON from file
	with open('input.json', 'r') as f:
		inputjson = json.load(f)

	profiles = inputjson['profiles']
	for profile in profiles:
		pq = PQDict()
		no_of_profiles = 0
		matches=[]
        
		# The output for this profile in JSON format 
		profile_output = {'profileId': profile['id'],
				'matches': []}

		for other_profile in profiles:
			if other_profile['id'] == profile['id']:
			# dont calculate against our own profile
				continue
			# Calculate match percentage with OKCupid's formula
			match_score = math.sqrt(satisfaction(profile, other_profile) * satisfaction(other_profile, profile))

			# Add the first ten matches to a min-heap (PQDict) 
			if len(pq) < 11:
				pq.additem(other_profile['id'],match_score)
			else:
				if match_score > pq.popitem()[1]:
					pq.additem(other_profile['id'],match_score)
				else:
					continue
	
		for i in range(len(pq)):
			key,value = pq.popitem()
			temp = {'profileId': key,
				'score': value}
			matches.append(temp)

        	# Reverse the heap and store it in the output for that profile
        	profile_output['matches'] = matches[::-1]
        	output['results'].append(profile_output)

	# Write out the output JSON to file
	with open('output_optimized.json', 'w') as outf:
		json.dump(output, outf, indent=1)

	return 0
Exemplo n.º 48
0
def primWeight(G):
    """ Return MST of the given undirected graph"""
    vis = set()
    tot_weight = 0
    pq = PQDict()

    for node in G.nodes():
        pq.additem(node, float("inf"))

    curr = pq.pop()
    vis.add(curr)
    while len(pq) > 0:
        for s, nod, wt in G.edges(curr, data=True):
            if nod not in vis and wt['weight'] < pq[nod]:
                pq.updateitem(nod, wt['weight'])

        if len(pq) > 0:
            top = pq.top()
            vis.add(top)
            tot_weight += pq[top]
            curr = pq.pop()
    return tot_weight
Exemplo n.º 49
0
    def run(self):
        self.heapdict = PQDict()
        self.data = {}
        initx = np.linspace(0, 1, self.init_points)
        n_points = self.init_points
        if self.measure_at is not None:
            xm = [(x-self.a)/(self.b-self.a) for x in self.measure_at]    
            initx = np.concatenate((initx, xm))
            initx.sort()
            n_points += len(self.measure_at)
            
        #initdelta = (initx[1] - initx[0])/2.
        inity = []
        for x in initx:
            xret = self.scale_to_x(x)
            yret = self.f(xret)
            yield (xret, yret)
           
            inity.append(self.get_y(yret))
            
        if self.yscale is None:
            self.yscale = max(inity)-min(inity)
            
        #self.scale_from_y = lambda y: (y-min(inity))/(max(inity)-min(inity)) 
        self.scale_from_y = lambda y: (y) / self.yscale / self.scalefactor
        inity = [self.scale_from_y(y) for y in inity]

        for (i, x) in enumerate(initx[1:-1],1):
            d = [inity[i], 
                 initx[i-1], inity[i-1], 
                 initx[i+1], inity[i+1]]
            
            self.update_data(x, d)
        
        
        while n_points < self.max_points:
            try:
                x = self.heapdict.top()
                
            except KeyError:
                return
            else:
                d = self.data[x]
                for point in self.evalx(x, *d): yield point
                n_points += 2
Exemplo n.º 50
0
def primWeight(G):
    """ Return MST of the given undirected graph"""
    vis = set()
    tot_weight = 0
    pq = PQDict()            
    
    for node in G.nodes():
        pq.additem(node, float("inf"))
    
    curr = pq.pop()
    vis.add(curr)
    while len(pq) > 0:
        for s,nod, wt in G.edges(curr, data=True):
            if nod not in vis and wt['weight'] < pq[nod]: pq.updateitem(nod, wt['weight']) 
        
        if len(pq)>0:
            top = pq.top()
            vis.add(top)
            tot_weight += pq[top]
            curr = pq.pop()
    return tot_weight
Exemplo n.º 51
0
    def plan(self, start_state, goal_state):
        #PQ = pqdict()
        V = {}

        self.goal_state = goal_state
        h0 = self.heuristic(start_state, goal_state)
        n0 = node(start_state, None, None, 0, h0)

        key0 = np.around(n0.state, decimals = 1).tostring()
        PQ = PQDict(key0=n0)

        i = 0 
        while PQ and i < 100000:
            current = PQ.popitem()[1]
            #print '\n'
            #print current.state[2]
            if(self.state_is_equal(current.path, goal_state)):
                path = self.reconstruct_path(current)
                return (path, current.f)

            V[np.around(current.state, decimals = 1).tostring()] = copy.deepcopy(current)

            #get children
            children = self.getChildren(current)#do set parent, should return an array of nodes
            
            for child in children:
                i += 1
                if i%100 == 0:
                    print 'A* iteration '+str(i)
                
                child_key = np.around(child.state, decimals = 1).tostring()
                if child_key in V:
                    if child.f >= V[child_key].f:
                        continue
                
                if (child_key in PQ):
                    existing_child = PQ[child_key]
                    if(child.g >= existing_child.g):
                        continue
                    else:
                        PQ.updateitem(child_key, child)
                else:
                    #print child.state, current.state
                    #pdb.set_trace()
                    #if(child.state[2] < 0):
                    #    pdb.set_trace()
                    PQ.additem(child_key,child)
        
        print 'A* Failed'
        return (None, None)
 def test_pop(self):
     # pop selected item - return pkey
     pq = PQDict(A=5, B=8, C=1)
     pkey = pq.pop('B')
     self.assertEqual(pkey, 8)
     pq.pop('A')
     pq.pop('C')
     self.assertRaises(KeyError, pq.pop, 'A')
     self.assertRaises(KeyError, pq.pop, 'does_not_exist')
     # no args and empty - throws
     self.assertRaises(KeyError, pq.pop) #pq is now empty
     # no args - return top dkey
     pq = PQDict(A=5, B=8, C=1)
     self.assertEqual(pq.pop(), 'C')
Exemplo n.º 53
0
def branchAndBound(G, cutoff, ftrace):
    queue = PQDict()
    bestSolution = INFINITY
    bestTour = []
    totalNodes = len(G.nodes())
    startNode = G.nodes()[0]
    nodeList = G.nodes()
    nodeList.remove(startNode)
    queue.additem(tuple([startNode]), lowerBound(G, [startNode]))

    start_time = time.time()
    while (len(queue) != 0):
        #         print count
        #         count+=1
        coveredNodes, lowerBoundCurrent = queue.popitem()
        #         coveredNodes=list(coveredNodes)

        elapsed_time = time.time() - start_time
        if elapsed_time > cutoff:
            if bestSolution == INFINITY:
                return -1, []
            return bestTour, bestSolution

        for neighbor in G.neighbors(coveredNodes[-1]):
            if not neighbor in coveredNodes:
                tempNodes = list(coveredNodes)
                tempNodes.append(neighbor)
                if (len(tempNodes) == totalNodes):
                    cost = findCost(G, tempNodes) + G.get_edge_data(
                        neighbor, startNode)['weight']
                    if (cost < bestSolution):
                        bestSolution = cost
                        bestTour = tempNodes
                        ftrace.write("{0:.2f}".format(elapsed_time * 1.0) +
                                     ',' + str(bestSolution) + '\n')
                else:
                    tempLowerBound = lowerBound(G, tempNodes)
                    if tempLowerBound < bestSolution:
                        queue.additem(tuple(tempNodes), tempLowerBound)
#                     else:
#                         print 'prune'
    return bestTour, bestSolution
Exemplo n.º 54
0
def prim(G: "networx Graph object", start: "1X2 Tuple(Node)") -> (list, list):
    """
    Function recives a graph and a starting node, 
    and returns the MST and the history of the algorithm
    """
    MST_len = G.number_of_nodes() - 1
    current = start
    visited = set()
    # Priority Queue
    #(keeps the item with the lowest value on the top -
    #in this case the weight of an edge)
    pq = PQDict()
    # Minimal spamming tree
    mst = []
    steps = []
    history = []
    # While the MST has N - 1 edges (N is the total nodes)
    while len(mst) < MST_len:
        # Get the neighbors
        # history.append((current, G.neighbors(current)))
        for node in G.neighbors(current):
            if node not in visited and current not in visited:
                # Append the history
                steps.append((current, node))
                if (current, node) not in pq and (node, current) not in pq:
                    w = G.edge[current][node]['weight']
                    pq.additem((current, node), w)
        # We have visited the node
        visited.add(current)
        # Tup is the edge "(X, Y)", wght is the cost
        tup, wght = pq.popitem()
        # Get the lowest edge if we haven't visited the node
        while (tup[1] in visited):
            tup, wght = pq.popitem()
        # Append the edge to the minimum spanning tree
        mst.append(tup)
        history.append([tup, steps])
        steps = list()
        # Update the current node to the one we visit based on the minimal weight
        current = tup[1]

    return mst, history
def executa_prim(G, primeiro_no):

    no_de_parada = G.number_of_nodes() - 1
    atual = primeiro_no
    visitados = set()  # Tira os repetidos e é uma Hash
    min_heap = PQDict()  # É um min_heap
    mst = []
    peso_total = 0
    pesos = []

    while len(mst) < no_de_parada:
        # print("Esse é o visitados", visitados)
        # print("Esse é o min_heap: ", min_heap)
        for noVizinho in G.neighbors(atual):
            # print("Esse é o no: ", noVizinho)
            # print("Esse é o atual: ", atual)
            if noVizinho not in visitados and atual not in visitados:
                if (atual,
                        noVizinho) not in min_heap and (noVizinho,
                                                        atual) not in min_heap:
                    peso_aresta = G[atual][noVizinho]['weight']
                    min_heap.additem((atual, noVizinho), peso_aresta)

        visitados.add(atual)

        aresta, peso = min_heap.popitem()  # Tira a raiz
        while (aresta[1] in visitados):  # Aresta[1] é o vizinho
            aresta, peso = min_heap.popitem(
            )  # Vai tirando a raiz até encontrar um vizinho não visitado
            # print("Essa é a aresta: ", aresta)
            # print("Esse é o peso: ", peso)
        peso_total += peso
        pesos.append(peso)
        mst.append(aresta)
        atual = aresta[1]

    return mst, peso_total, pesos
Exemplo n.º 56
0
    def rout(self, start, desti, edge_desty, vedge_desty, nodes_order, U, G2,
             points, speed_dict):
        path = []
        Q, P_hat = [], []
        neigh = list(G2.successors(start))
        print('neigh %d' % len(neigh))
        start_ax = points[start]
        desti_ax = points[desti]
        s_d_arr = [desti_ax[0] - start_ax[0], desti_ax[1] - start_ax[1]]
        all_expire = 0.0

        def get_weight(p_hat):
            w_p_hat = {}
            if p_hat in edge_desty:
                w_p_hat = edge_desty[p_hat]
            elif p_hat in speed_dict:
                w_p_hat = speed_dict[p_hat]
            elif p_hat in vedge_desty:
                w_p_hat = vedge_desty[p_hat]
            return w_p_hat

        def get_maxP(w_p_, vv):
            p_max = 0.0
            for pk in w_p_:
                w_pk = w_p_[pk]
                pk_ = int(int(pk) / self.sigma)
                if int(pk) % self.sigma == 0: pk_ += 1
                p_max += float(w_pk) * U[vv][pk_]
            return p_max

        has_visit = set()
        has_visit.add(start)
        Que = PQDict.maxpq()
        Q = {}
        for vi in neigh:
            if vi in has_visit: continue
            else: has_visit.add(vi)
            p_hat = start + '-' + vi
            w_p_hat = get_weight(p_hat)
            w_min = min([float(l) for l in w_p_hat.keys()])
            p_order = nodes_order[vi]  #p_hat]
            tcost1 = time.time()
            inx_min = np.argwhere(U[vi] > 0)
            if len(inx_min) == 0:
                #print('u 0 vd: %s %s'%(vi, desti))
                continue
            inx_min = inx_min[0][0]
            all_expire += time.time() - tcost1
            cost_time = w_min + inx_min * self.sigma
            if cost_time <= self.T:
                tcost1 = time.time()
                p_max = get_maxP(w_p_hat, vi)
                all_expire += time.time() - tcost1
                Que[p_hat] = p_max
                Q[p_hat] = (p_max, w_p_hat, cost_time)
        #print('len Q %d'%len(Q))
        QQ = {}
        p_best_p, flag = 'none', False
        p_max_m, p_best_cost, p_w_p = -1, -1, -1
        if len(Q) == 0: return 'none1', -1, -1, -1, all_expire, -1
        all_rounds = 0
        while len(Q) != 0:
            (p_hat, pqv) = Que.popitem()
            all_rounds += 1
            (p_max, w_p_hat, cost_time) = Q[p_hat]
            del Q[p_hat]
            a = p_hat.rfind('-')
            v_l = p_hat[a + 1:]
            if v_l == desti:
                p_best_p = p_hat
                p_max_m = p_max
                p_best_cost = cost_time
                p_w_p = w_p_hat
                flag = True
                break
            neigh = list(G2.successors(v_l))
            cost_sv = min([float(l) for l in w_p_hat.keys()])
            vd_d_arr = [
                points[desti][0] - points[v_l][0],
                points[desti][1] - points[v_l][1]
            ]
            for u in neigh:
                if u == desti:
                    vu = v_l + '-' + u
                    w_vu = get_weight(vu)
                    if len(w_vu) == 0: cost_vu = 0
                    else: cost_vu = min([float(l) for l in w_vu.keys()])
                    tcost1 = time.time()
                    inx_min = np.argwhere(U[u] > 0)
                    inx_min = inx_min[0][0]
                    p_best_p = p_hat + ';' + vu
                    p_w_p = self.conv(w_p_hat, w_vu)
                    p_max_m = get_maxP(w_p_hat, u)
                    all_expire += time.time() - tcost1
                    p_best_cost = cost_sv + cost_vu + inx_min * self.sigma
                    flag = True
                    break
                if u in has_visit:
                    #print('u1 %s, vd %s'%(u, desti))
                    continue
                else:
                    has_visit.add(u)
                if u in p_hat:
                    #print('u2 %s, vd %s'%(u, desti))
                    continue
                vu = v_l + '-' + u
                w_vu = get_weight(vu)
                if len(w_vu) == 0:
                    #print('vu %s, vd %s'%(vu, desti))
                    continue
                cost_vu = min([float(l) for l in w_vu.keys()])
                p_order = nodes_order[u]  #p_hat]
                tcost1 = time.time()
                inx_min = np.argwhere(U[u] > 0)
                if len(inx_min) == 0:
                    #print('inx vu %s, vd %s'%(vu, desti))
                    continue
                inx_min = inx_min[0][0]
                all_expire += time.time() - tcost1
                cost_time = cost_sv + cost_vu + inx_min * self.sigma
                if cost_time <= self.T:
                    p_hat_p = p_hat + ';' + vu
                    w_p_hat_p = self.conv(w_p_hat, w_vu)
                    tcost1 = time.time()
                    p_hat_max = get_maxP(w_p_hat, u)
                    all_expire += time.time() - tcost1
                    QQ[p_hat_p] = (p_hat_max, w_p_hat_p, cost_time)
            if flag: break
            if len(Q) == 0:
                Q = copy.deepcopy(QQ)
                for qqk in QQ:
                    Que[qqk] = QQ[qqk][0]
                QQ = {}
        return p_best_p, p_max_m, p_best_cost, p_w_p, all_expire, all_rounds
Exemplo n.º 57
0
class Battle(object):
    """
    Fight a battle between two teams of oeo
    """
    _turn_stage_map = {
        8: '02',
        7: '03',
        6: '04',
        5: '05',
        4: '06',
        3: '07',
        2: '08',
        1: '09',
        0: '10',
        -1: '11',
        -2: '12',
        -3: '13',
        -4: '14',
        -5: '15',
        -6: '16',
        -7: '17'
    }

    _turn_spi_map = {
        0: '01',
        1: '02',
        2: '03',
        3: '04',
        4: '05',
        5: '06',
        6: '07',
        7: '08',
        8: '09',
        9: '10',
        10: '11',
        11: '12'
    }

    def __init__(self, oeos, a_id, a, a_max_fielded, b_id, b, b_max_fielded):
        assert all(isinstance(oeo, Oeo) for oeo in oeos.values()), \
            "oeos is not a dict of oeo_id:oeo"
        assert isinstance(a_id, str), "'a_id' is not a string"
        assert isinstance(a, set), "'a' is not a set of oeo_id"
        assert isinstance(a_max_fielded, int), "'a_max_fielded' is not an int"
        assert isinstance(b_id, str), "'b_id' is not a string"
        assert isinstance(b, set), "'b' is not a set of oeo_id"
        assert isinstance(b_max_fielded, int), "'b_max_fielded' is not an int"
        # Throw error if each oeo_id is not unique between teams
        if a & b:
            raise ValueError(f"{a & b} oeo_id(s) not unique between teams")

        self._oeo = oeos
        self._a_id = a_id
        self._a = a
        self._b_id = b_id
        self._b = b

        self._turn_number = 0
        self._field = Field(a_id, a_max_fielded, b_id, b_max_fielded)
        self._pending_sim_events = PQDict()
        self._processed_sim_events = []

        move_set = set()
        for o in itertools.chain(self._oeo.values()):
            for move in o.moves:
                move_set.add(move)
        moves = Move.load_moves(move_set)
        self._moves = moves

        # self._speed_priority_list = None
        self._setup_axel_events()

    @property
    def teams(self):
        return {self._a_id: self._a, self._b_id: self._b}

    @property
    def field(self):
        return self._field

    def _setup_axel_events(self):
        """
        Initialise axel events
        """
        # sim_output_message(msg)
        self.sim_output_message = Event()

        # event_choose_deployments(team_id, non_fielded_team, empty_positions)
        # non_fielded_team: oeo from the team who are not on the field
        #                   but are conscious
        # empty_positions: empty positions on the field into which an oeo
        #                  could be deployed
        self.event_choose_deployments = Event()

        # event_choose_actions(team_id, oeo_requiring_actions)
        # oeo_requiring_actions: oeo that are on the field and need to select
        #                        an action for the current turn
        self.event_choose_actions = Event()

    def run(self):
        """
        Run the battle

        :return: id of the victor
        :rtype: str
        """
        logger.info(f"{self._a_id} vs {self._b_id}...")
        ta = {oeo_id: self._oeo[oeo_id] for oeo_id in self._a}
        tb = {oeo_id: self._oeo[oeo_id] for oeo_id in self._b}
        logger.debug(f"{self._a_id}'s team:\n{ta}")
        logger.debug(f"{self._b_id}'s team:\n{tb}")

        # Add the BEGIN_TURN SimEvent for turn 1
        self._pending_sim_events.additem(SimEvent(SimEventType.BeginTurn), 1)

        victor = None

        # While there are pending sim events, loop until we break when a
        # battle end condition is met
        while self._pending_sim_events:
            # Remove any oeo with 0 HP from the field
            self._remove_unconscious_oeo()

            # Check teams: if all oeo in the battle are unconscious,
            #               then end battle as a draw
            #              if all oeo in team A are unconscious,
            #               then end battle as win for team B
            #              if all oeo in team B are unconscious,
            #               then end battle as win for team A
            if not any(oeo.conscious for oeo in self._oeo.values()):
                logger.info("All oeo on both sides of the battle are "
                            "unconscious, the battle is a draw")
                victor = "DRAW"
                break
            elif not any(self._oeo[oeo_id].conscious for oeo_id in self._a):
                logger.info(f"{self._a_id}'s team are unconscious, "
                            f"{self._b_id} wins the battle")
                victor = self._b_id
                break
            elif not any(self._oeo[oeo_id].conscious for oeo_id in self._b):
                logger.info(f"{self._b_id}'s team are unconscious, "
                            f"{self._a_id} wins the battle")
                victor = self._a_id
                break

            # Let both sides choose oeo to deploy
            self._choose_deployments()
            logger.debug(f"{self._a_id}'s side: {self._field[self._a_id]}")
            logger.debug(f"{self._b_id}'s side: {self._field[self._b_id]}")

            # Check both sides of the field:
            # if team A side is empty, then end as win for team B
            # if team B side is empty, then end as win for team A
            if self._field[self._a_id].is_empty():
                logger.info(f"{self._a_id} yields, "
                            f"{self._b_id} wins the battle")
                victor = self._b_id
                break
            elif self._field[self._b_id].is_empty():
                logger.info(f"{self._b_id} yields, "
                            f"{self._a_id} wins the battle")
                victor = self._a_id
                break

            # Pop the next event to be processed, add it to the
            # processed events list, and process it
            event, event_priority = self._pending_sim_events.popitem()
            event_complete = 0
            event_type = event.event_type
            if event_type is SimEventType.BeginTurn:
                event_complete = self._process_begin_turn()
            elif event_type is SimEventType.UseMove:
                event_complete = self._process_use_move(**event.data)
            elif event_type is SimEventType.UseItem:
                # event_complete = self._process_use_item
                pass
            elif event_type is SimEventType.Switch:
                # event_complete = self._process_switch_oeo
                pass
            elif event_type is SimEventType.Run:
                # event_complete = self._process_run
                pass
            else:
                raise ValueError(f"Invalid event_type: {event}")

            self._processed_sim_events.append(
                (event_priority, event, event_complete))

        logger.debug(f"Pending events: {self._pending_sim_events}")
        logger.info(f"Processed events: {self._processed_sim_events}")
        return victor

    def _process_begin_turn(self):
        # Increment the turn number and add the BeginTurn SimEvent
        # for the next turn
        self._turn_number += 1
        logger.debug(f"Processing BeginTurn({self._turn_number}) SimEvent")
        self._pending_sim_events.additem(SimEvent(SimEventType.BeginTurn),
                                         self._turn_number + 1)

        # TODO: Update status conditions - burn, poison, landing from flight,
        # then remove unconscious oeo from field

        # Choose the actions for the oeo on the field this turn, calculate the
        # order in which the actions should occur, and add them to the pending
        # sim events priority queue
        self._choose_actions()
        return 1

    def _process_use_move(self, user_id, move_id, target_id):
        logger.debug("Processing UseMove SimEvent")
        user, move, target = self._oeo[user_id], self._moves[move_id], \
            self._oeo[target_id]
        user_is_fielded = self._is_fielded(user_id)
        target_is_fielded = self._is_fielded(target_id)
        if user_is_fielded and target_is_fielded:
            logger.info(f"{user_id} attacks {target_id} using {move_id}")
            df_id = getattr(move, "df_id", "Standard")
            damage_function = get_damage_function(df_id)
            damage = damage_function(user, move, target)
            hp = target.current_hp
            target.current_hp -= damage
            logger.info(f"{target_id}'s HP = {hp}-{damage} "
                        f"= {target.current_hp}")
            return 1
        else:
            logger.debug(f"User on field = {user_is_fielded}, "
                         f"Target on field = {target_is_fielded}")
            return -1

    def _process_use_item(self, item, target):
        logger.debug("Processing UseItem SimEvent")
        return 0

    def _process_switch(self, user, target):
        logger.debug("Processing Switch SimEvent")
        return 0

    def _process_run(self, user, run_type):
        logger.debug("Processing Run SimEvent")
        return 0

    def _calculate_event_priority(self, turn, priority, speed_priority):
        """
        :param turn: the turn in which the event is to be actioned
        :param priority: the stage of the turn in which the event is \
                         to be actioned
        :param speed_priority: the speed_priority of the oeo undertaking \
                               the event
        :return: the priority of the event to be actioned
        """
        return float(f"{turn}.{self._turn_stage_map[priority]}"
                     f"{self._turn_spi_map[speed_priority]}")

    def _remove_unconscious_oeo(self):
        """
        Withdraw unconscious oeo from the field
        """
        for team_id in [self._a_id, self._b_id]:
            oeo_to_remove = [
                oeo_id for oeo_id in self._field[team_id].fielded
                if self._oeo[oeo_id].conscious is False
            ]
            for oeo_id in oeo_to_remove:
                self._field.withdraw(team_id, oeo_id)

    def _is_fielded(self, oeo_id):
        """
        :param oeo_id:
        :return: True if oeo_id is on the field else False
        """
        if (oeo_id in self._field[self._a_id]) or \
                (oeo_id in self._field[self._b_id]):
            return True
        else:
            return False

    def _choose_deployments(self):
        """
        Choose and make deployments to the field
        """
        for team_id in [self._a_id, self._b_id]:
            empty_positions = self._field[team_id].empty_positions
            logger.debug(f"Empty positions on {team_id}'s side: "
                         f"{empty_positions}")
            if empty_positions:
                fielded = self._field[team_id].fielded
                benched = [
                    oeo_id for oeo_id in self.teams[team_id]
                    if oeo_id not in fielded and self._oeo[oeo_id].conscious
                ]
                logger.debug(f"Benched on {team_id}'s side: {benched}")
                if benched:
                    deployments = self._poll_deployments(
                        team_id, benched, empty_positions)
                    logger.debug(f"{team_id}'s oeo to deploy: {deployments}")
                    for position, oeo_id in deployments.items():
                        self._field.deploy(team_id, oeo_id, position)

    def _poll_deployments(self, team_id, non_fielded_team, empty_positions):
        """
        Polls for deployments for team_id
        :return: dict of position:oeo_id
        """
        logger.debug(f"Polling for deployments from {team_id}")
        results = self.event_choose_deployments(team_id,
                                                list(non_fielded_team),
                                                empty_positions)
        flag, result, handler = results[0]
        if flag:
            for position, oeo_id in result.items():
                # Ensure that each oeo is only deployed to one
                # field position at most
                if list(result.values()).count(oeo_id) > 1:
                    raise Exception(f"{oeo_id} can not be deployed to more "
                                    "than one field position")
                # Ensure 0 >= position < len(self._field[team_id])
                if position < 0 or position >= len(self._field[team_id]):
                    raise Exception(f"Position {position} is out of bounds"
                                    f"(0-{len(self._field[team_id]) - 1})")
                # Ensure oeo_id is in self.team[team_id]
                if oeo_id not in self.teams[team_id]:
                    raise Exception(f"{oeo_id} is not in {team_id}'s team")
            return result
        else:
            raise Exception("Exception in choose_deployments handler") \
                from result

    def _choose_actions(self):
        """
        Choose and schedule actions for oeo on the field
        """
        # Create the speed_priority_list for this turn
        oeo_against_speed = [(oeo_id, oeo.speed)
                             for oeo_id, oeo in self._oeo.items()]
        oeo_against_speed.sort(key=itemgetter(1), reverse=True)
        logger.debug(f"Speed Priority List: {oeo_against_speed}")
        speed_priority_list = [t[0] for t in oeo_against_speed]

        a_fielded = self._field[self._a_id].fielded
        b_fielded = self._field[self._b_id].fielded

        # Create action_map dictionary of oeo_id to action:None for
        # fielded oeo and update it from future_action dictionary
        action_map = {
            oeo_id: None
            for oeo_id in itertools.chain(a_fielded, b_fielded)
        }
        # todo: Update action_map from future_actions dictionary
        logger.debug(f"Initial action map for turn {self._turn_number}: "
                     f"{action_map}")

        # Call event_choose_actions for each team for oeo that do not have
        # an action to perform (action is None)
        a_oeo_requiring_actions = [
            oeo_id for oeo_id, action in action_map.items()
            if (oeo_id in a_fielded) and (action is None)
        ]
        a_oeo_requiring_actions.sort(key=lambda x: self._oeo[x].speed,
                                     reverse=True)
        b_oeo_requiring_actions = [
            oeo_id for oeo_id, action in action_map.items()
            if (oeo_id in b_fielded) and (action is None)
        ]
        b_oeo_requiring_actions.sort(key=lambda x: self._oeo[x].speed,
                                     reverse=True)
        logger.debug(f"{self._a_id}'s oeo requiring actions: "
                     f"{a_oeo_requiring_actions}")
        logger.debug(f"{self._b_id}'s oeo requiring actions: "
                     f"{b_oeo_requiring_actions}")
        a_actions = self._poll_actions(self._a_id, a_oeo_requiring_actions)
        b_actions = self._poll_actions(self._b_id, b_oeo_requiring_actions)
        logger.info(f"{self._a_id}'s actions chosen: {a_actions}")
        logger.info(f"{self._b_id}'s actions chosen: {b_actions}")

        # For each {oeo_id: action} in dictionary returned by the event
        # handlers, add it to the action map if the oeo does not already have
        # an action for this turn in the action map
        for oeo_id, action in itertools.chain(a_actions.items(),
                                              b_actions.items()):
            if action_map[oeo_id] is not None:
                raise Exception(f"{oeo_id} already had an action "
                                "for this turn")
            action_map[oeo_id] = action
        logger.debug(f"Final action map for turn {self._turn_number}: "
                     f"{action_map}")

        # For each {oeo_id: action} in the action map add the SimEvent for
        # the action to the pending sim events queue
        for oeo_id, action in action_map.items():
            if action:
                if action.event_type == SimEventType.UseMove:
                    target = action.data["target_id"]
                    move_id = action.data["move_id"]
                    move_priority = self._moves[move_id].priority
                    oeo_priority = speed_priority_list.index(oeo_id)
                    s = SimEvent(SimEventType.UseMove,
                                 user_id=oeo_id,
                                 target_id=target,
                                 move_id=move_id)
                    ep = self._calculate_event_priority(
                        self._turn_number, move_priority, oeo_priority)
                    self._pending_sim_events.additem(s, ep)

                if action.event_type == SimEventType.UseItem:
                    pass

    def _poll_actions(self, team_id, oeo_requiring_actions):
        """
        Polls for actions from team_id
        :return: dict of oeo_id:action
        """
        logger.debug(f"Polling for actions from {team_id}")
        results = self.event_choose_actions(team_id, oeo_requiring_actions)
        flag, result, handler = results[0]
        if flag:
            for oeo_id, action in result.items():
                # Ensure oeo is on the field
                if not self._is_fielded(oeo_id):
                    raise Exception(f"{oeo_id} is not on the field")
                # Ensure oeo is in team_id
                if oeo_id not in self.teams[team_id]:
                    raise Exception(f"{oeo_id} is not on {team_id}'s side")
                # Ensure action is SimEvent
                if not isinstance(action, SimEvent):
                    raise Exception("Action is not a SimEvent")
                # Ensure action.event_type is UseMove, UseItem, Switch or Run
                if action.event_type not in [
                        SimEventType.UseMove, SimEventType.UseItem,
                        SimEventType.Switch, SimEventType.Run
                ]:
                    raise Exception("Action event type not UseMove, UseItem, "
                                    "Switch or Run")
            return result
        else:
            raise Exception("Exception in choose_actions handler") from result
Exemplo n.º 58
0
    def rout(self, start, desti, edge_desty, vedge_desty, nodes_order, U, G,
             points, pred):
        path = []
        Q, P_hat = [], []
        neigh = list(G.successors(start))
        print('neigh %d' % len(neigh))
        start_ax = points[start]
        desti_ax = points[desti]
        s_d_arr = [desti_ax[0] - start_ax[0], desti_ax[1] - start_ax[1]]
        all_expire2 = 0.0

        def get_weight(p_hat):
            w_p_hat = {}
            if p_hat in edge_desty:
                w_p_hat = edge_desty[p_hat]
            elif p_hat in vedge_desty:
                w_p_hat = vedge_desty[p_hat]
            return w_p_hat

        def get_maxP3(w_p_, vv, vi_getmin):
            p_max = 0.0
            for pk in w_p_:
                w_pk = w_p_[pk]
                pk_ = int(int(pk) / self.sigma)
                if int(pk) % self.sigma == 0: pk_ += 1
                if (1.0 * self.T - float(pk)) > vi_getmin:
                    p_max += float(w_pk)
            return p_max

        def get_vigetmin(vv):
            start1 = time.time()
            v = vv
            path_ = [v]
            while v != desti:
                v = pred[v]
                path_.append(v)
            spath = path_[0]
            vi_getmin = 0.0
            for epath in path_[1:]:
                key = spath + '-' + epath
                vi_getmin += min(abs(float(l)) for l in edge_desty[key].keys())
                spath = epath
            expire_time = time.time() - start1
            return vi_getmin, expire_time

        has_visit = set()
        has_visit.add(start)
        QQue = PQDict.maxpq()
        Q = {}
        for vi in neigh:
            if vi in has_visit: continue
            else: has_visit.add(vi)
            p_hat = start + '-' + vi
            w_p_hat = get_weight(p_hat)
            w_min = min([float(l) for l in w_p_hat.keys()])
            p_order = nodes_order[vi]  #p_hat]
            vi_getmin, ex_p = get_vigetmin(vi)
            all_expire2 += ex_p
            cost_time = w_min + vi_getmin

            if cost_time <= self.T:
                p_max = max(list(w_p_hat.values()))
                QQue[p_hat] = p_max
                Q[p_hat] = (p_max, w_p_hat, cost_time)
        print('len Q %d' % len(Q))
        QQ = {}
        p_best_p, flag = 'none', False
        p_max_m, p_best_cost, p_w_p = -1, -1, -1
        if len(Q) == 0: return 'none1', -1, -1, -1, all_expire2, -1
        all_rounds = 0
        while len(Q) != 0:
            (p_hat, pqv) = QQue.popitem()
            all_rounds += 1
            (p_max, w_p_hat, cost_time) = Q[p_hat]
            del Q[p_hat]
            a = p_hat.rfind('-')
            v_l = p_hat[a + 1:]
            if v_l == desti:
                p_best_p = p_hat
                p_max_m = p_max
                p_best_cost = cost_time
                p_w_p = w_p_hat
                flag = True
                break
            neigh = list(G.successors(v_l))
            cost_sv = min([float(l) for l in w_p_hat.keys()])
            vd_d_arr = [
                points[desti][0] - points[v_l][0],
                points[desti][1] - points[v_l][1]
            ]
            for u in neigh:
                if u == desti:
                    vu = v_l + '-' + u
                    w_vu = get_weight(vu)
                    if len(w_vu) == 0: cost_vu = 0
                    else: cost_vu = min([float(l) for l in w_vu.keys()])
                    vi_getmin, ex_p = get_vigetmin(u)
                    all_expire2 += ex_p
                    p_best_p = p_hat + ';' + vu
                    p_w_p = self.conv(w_p_hat, w_vu)
                    p_max_m = max(list(p_w_p.values()))
                    p_best_cost = cost_sv + cost_vu + vi_getmin  #inx_min*self.sigma
                    flag = True
                    break
                if u in has_visit:
                    #print('u1 %s'%u)
                    continue
                else:
                    has_visit.add(u)
                if u in p_hat:
                    #print('u2 %s'%u)
                    continue
                vu = v_l + '-' + u
                w_vu = get_weight(vu)
                if len(w_vu) == 0:
                    #print('vu %s'%vu)
                    continue
                cost_vu = min([float(l) for l in w_vu.keys()])
                p_order = nodes_order[u]  #p_hat]
                vi_getmin, ex_p = get_vigetmin(u)
                all_expire2 += ex_p
                cost_time = cost_sv + cost_vu + vi_getmin  #inx_min*self.sigma
                if cost_time <= self.T:
                    p_hat_p = p_hat + ';' + vu
                    w_p_hat_p = self.conv(w_p_hat, w_vu)
                    p_hat_max = max(list(w_p_hat_p.values()))
                    QQ[p_hat_p] = (p_hat_max, w_p_hat_p, cost_time)

            if flag: break
            if len(Q) == 0:
                Q = copy.deepcopy(QQ)
                for qqk in QQ:
                    QQue[qqk] = QQ[qqk][0]
                QQ = {}
        return p_best_p, p_max_m, p_best_cost, p_w_p, all_expire2, all_rounds