示例#1
0
文件: graphs.py 项目: arjunc12/Ants
def check_road_path(road_graph, u, v):
    sp = nx.shortest_path(road_graph, u, v)
    if len(sp) >= 20:
        print "path too long"
        return None
    print "shortest path length", len(sp)
    print "shortest path", sp
    for i in xrange(1, len(sp) - 1):
        v1, v2 = sp[i], sp[i + 1]
        print v1, v2
        road_graph.remove_edge(v1, v2)
        if nx.has_path(road_graph, v1, v2):
            fp = nx.shortest_path(road_graph, v1, v2)
            if 3 < len(fp) < 8:
                print "fix path length", len(fp)
                print "fix path", fp
        else:
            pass
        if nx.has_path(road_graph, u, v):
            sp2 = nx.shortest_path(road_graph, u, v)
            if len(sp2) <= 20 and u in sp2 and v in sp2:
               print "new shortest path length", len(sp2)
               print "new shortest path", sp2
        else:
            pass
        road_graph.add_edge(v1, v2)
示例#2
0
    def test_multi_allocate_and_free(self):
        """Assert that resources allocated by flows are freed"""
        SWITCHES = ['sw1', 'sw2']
        SERVERS = ['s1', 's2']
        graph = self.graph
        max_duration = 10
        durations = range(1, max_duration)
        steps = 100
        a = nx.shortest_path(graph, choice(SERVERS), choice(SWITCHES))
        b = nx.shortest_path(graph, choice(SERVERS), choice(SWITCHES))
        paths = [a, b]
        workload = [(choice(paths), choice(durations)) for t in range(steps)]

        ctrls = [LinkBalancerCtrl(['sw1', 'sw2'])]
        sim = LinkBalancerSim(graph, ctrls)

        metric_before_alloc = sim.rmse_links(graph)

        for now, item in enumerate(workload):
            path, dur = item
            sim.free_resources(now)
            sim.allocate_resources(path, 1, now, dur)

        # Free the (up to max_duration) possibly remaining live flows
        for i in range(len(workload), steps + max_duration):
            sim.free_resources(i)

        metric_after_free = sim.rmse_links(graph)

        self.assertEqual(metric_before_alloc, metric_after_free)
        self.assertEqual(len(sim.active_flows), 0)
def verify(prog, src_name, dst_name):
    src = prog.subs.find(src_name)
    dst = prog.subs.find(dst_name)
    if src is None or dst is None:
        return None

    graphs = GraphsBuilder()
    graphs.run(prog)
    cg = graphs.callgraph

    if nx.has_path(cg, src.id.number, dst.id.number):
        return ('calls', nx.shortest_path(cg, src.id.number, dst.id.number))

    calls = CallsitesCollector(graphs.callgraph, src.id.number, dst.id.number)

    for sub in prog.subs:
        calls.run(sub)
        cfg = graphs.callgraph.nodes[sub.id.number]['cfg']
        for src in calls.srcs:
            for dst in calls.dsts:
                if src != dst and nx.has_path(cfg, src, dst):
                    return ('sites', nx.shortest_path(cfg, src, dst))
        calls.clear()

    return None
 def pre_process(self, source_id, sink_id):
     #create grade values for each node
     try:
         nx.shortest_path(self.graph, source=source_id, target = sink_id)
     except Exception, e:
         #no path in between source and destination, so return false o
         return False
示例#5
0
文件: Mlst.py 项目: zayd/mlst
def helper(T, U, e1, e2):
	path1 = nx.shortest_path(T, e1[0], e1[1])

	for f1 in zip(path1[0:],path1[1:]):

		U.add_edge(e1[0],e1[1])
		U.remove_edge(f1[0],f1[1])

		path2 = nx.shortest_path(T, e2[0], e2[1])

		for f2 in zip(path2[0:],path2[1:]):

			U.add_edge(e2[0],e2[1])

			if (tuple([f2[0],f2[1]]) in U.edges()):
				U.remove_edge(f2[0],f2[1])

			newDegrees = list(U.degree(U.nodes()).values())

			if newDegrees.count(1) > Degrees.count(1):
				print newDegrees.count(1)
				print i
				T = U.copy()
				Degrees = list(T.degree(T.nodes()).values())

			U.add_edge(f2[0],f2[1]);

		U.add_edge(f1[0],f1[1]);
示例#6
0
    def __send_to_who(self, sender, deploy_network, civ_information, cap):
        """
        Function to populate list to whom information should be sent

        """
        send_to = []

        if cap == "shelter":
            for i in range(len(deploy_network)):
                if self.rs[deploy_network[i].id].c_shelter is True and self.__check_in_circle(self.rs[deploy_network[i].id].deploy_range, civ_information.position):
                    path = nx.shortest_path(self.G, sender, deploy_network[i].id)
                    send_to.append(path[1])
        elif cap == "medical":
            for i in range(len(deploy_network)):
                if self.rs[deploy_network[i].id].c_medical is True and self.__check_in_circle(self.rs[deploy_network[i].id].deploy_range, civ_information.position):
                    path = nx.shortest_path(self.G, sender, deploy_network[i].id)
                    send_to.append(path[1])
        elif cap == "food":
            for i in range(len(deploy_network)):
                if self.rs[deploy_network[i].id].c_food is True and self.__check_in_circle(self.rs[deploy_network[i].id].deploy_range, civ_information.position):
                    path = nx.shortest_path(self.G, sender, deploy_network[i].id)
                    send_to.append(path[1])
        elif cap == "logistic":
            for i in range(len(deploy_network)):
                if self.rs[deploy_network[i].id].c_logistic is True and self.__check_in_circle(self.rs[deploy_network[i].id].deploy_range, civ_information.position):
                    path = nx.shortest_path(self.G, sender, deploy_network[i].id)
                    send_to.append(path[1])

        return send_to
示例#7
0
    def display_path_to(self, node_id):
        if node_id != self.identity.pubkey:
            for edge in self.edges.values():
                edge.neutralize()

            for node in self.nodes.values():
                node.neutralize()

            path = []
            try:
                path = networkx.shortest_path(self.nx_graph, node_id, self.identity.pubkey)
            except (networkx.exception.NetworkXError, networkx.exception.NetworkXNoPath) as e:
                logging.debug(str(e))
                try:
                    path = networkx.shortest_path(self.nx_graph, self.identity.pubkey, node_id)
                except (networkx.exception.NetworkXError, networkx.exception.NetworkXNoPath) as e:
                    logging.debug(str(e))

            for node, next_node in zip(path[:-1], path[1:]):
                if (node, next_node) in self.edges:
                    edge = self.edges[(node, next_node)]
                elif (next_node, node) in self.edges:
                    edge = self.edges[(next_node, node)]
                if edge:
                    edge.highlight()
                    self.nodes[node].highlight()
                    self.nodes[next_node].highlight()
                    logging.debug("Update edge between {0} and {1}".format(node, next_node))
def testSerialRandom():
    """ 50 Random serial test cases
    """

    N = 10
    p = .7
    runs = 0
    while runs < 50:

        # a random graph
        G = nx.fast_gnp_random_graph(N, p)
        try:
            nx.shortest_path(G, source=0, target=N-1)
        except:
            continue
        # convert to plain ndarray
        nw1 = nx2nw(G)

        # copy and join network
        nw2 = serialCopy(nw1)

        # compute effective resistance
        ER1 = ResNetwork(
            nw1, silence_level=3).effective_resistance(0, len(nw1)-1)
        ER2 = ResNetwork(
            nw2, silence_level=3).effective_resistance(0, len(nw2)-1)

        # increment runs
        runs += 1
        # assertion
        print (ER1*2-ER2)
        assert (ER1*2-ER2) < 1E-6
示例#9
0
def nca(name1, name2):
	G=json_graph.load(open("static/local_instance.json"))	

	frontier1=[get_id(name1)]
	frontier2=[get_id(name2)]
	
	done=False
	while not done:
		#retrieve nodes in next BFS shell
		shell1=list(chain.from_iterable(G.predecessors(each) for each in frontier1))
		shell2=list(chain.from_iterable(G.predecessors(each) for each in frontier2))

		#no new nodes. End of the line
		if not shell1 and not shell2:
			return []
		
		frontier1+=shell1
		frontier2+=shell2
		intersect=set(frontier1)&set(frontier2)
	
		if intersect:
			done=True
			#print intersect

	return [(nx.shortest_path(G,ancestor,get_id(name1)),nx.shortest_path(G,ancestor,get_id(name2))) for ancestor in list(intersect)]
示例#10
0
    def getFreePathToTarget(self, bot, current, target):
        ## tmp: get one of the current active paths
        if len(self.botWaypoints) == 0 or (len(self.botWaypoints) == 1 and bot in self.botWaypoints):
            return nx.shortest_path(self.graph, current, target, weight="weight")
        rndKey = random.choice([key for key in self.botWaypoints.keys() if key != bot])
        otherPath = self.botWaypoints[rndKey][0]

        ## Create a dummy graph node and connect it to each nodes of the shortest path.
        u = "startNode"
        self.graph.add_node(u)
        for v in otherPath:
            self.graph.add_edge(u, v, weight=1)

        ## Now calculate the path lengths of all graph nodes to the shortest path nodes.
        distances = nx.single_source_dijkstra_path_length(self.graph, u, weight="weight")
        self.graph.remove_node(u)  # we don't need the dummy graph node any more
        del distances[u]

        ## Create weight heuristics based on path lengths.
        for node_index, length in distances.iteritems():
            self.graph.node[node_index]["weight"] = length
        for u, v in self.graph.edges():
            w = (self.graph.node[u]["weight"] + self.graph.node[v]["weight"]) * 0.5
            self.graph[u][v]["weight"] = 1 / w ** 2

        ## And finally calculate the path to the flanking position.
        return nx.shortest_path(self.graph, current, target, weight="weight")
示例#11
0
    def _generate_path(self, 
                       frame_from, 
                       frame_to):
        '''
        Generate a path between two frames.
        
        Arguments
        ---------
        frame_from: a frame key, usually a string 
                    example: 'world'
        frame_to:   a frame key, usually a string 
                    example: 'mesh_0'

        Returns
        ----------
        path: (n) list of frame keys
              example: ['mesh_finger', 'mesh_hand', 'world']
        inverted: boolean flag, whether the path is traversing stored
                  matrices forwards or backwards. 
        '''
        try: 
            path = shortest_path(self._transforms, frame_from, frame_to)
            inverted = False
        except NetworkXNoPath:
            path = shortest_path(self._transforms, frame_to, frame_from)
            inverted = True
        self._paths[(frame_from, frame_to)] = (path, inverted)
        return path, inverted
示例#12
0
def calculate_network_measures(net, analyser):
    deg=nx.degree_centrality(net)
    clust=[]

    if(net.is_multigraph()):
        net = analyser.flatGraph(net)

    if(nx.is_directed(net)):
        tmp_net=net.to_undirected()
        clust=nx.clustering(tmp_net)
    else:
        clust=nx.clustering(net)



    if(nx.is_directed(net)):
        tmp_net=net.to_undirected()
        paths=nx.shortest_path(tmp_net, source=None, target=None, weight=None)
    else:
        paths=nx.shortest_path(net, source=None, target=None, weight=None)

    lengths = [map(lambda a: len(a[1]), x[1].items()[1:]) for x in paths.items()]
    all_lengths=[]
    for a in lengths:
        all_lengths.extend(a)
    max_value=max(all_lengths)
    #all_lengths = [x / float(max_value) for x in all_lengths]

    return deg.values(),clust.values(),all_lengths
示例#13
0
    def _generate_path(self, topo, src_mac, dst_mac, src_port,
                       dst_port, src_dpid, dst_dpid):
        """Generate path method."""
        net = nx.DiGraph(data=topo)
        net.add_node(src_mac)
        net.add_node(dst_mac)
        net.add_edge(int(src_dpid), src_mac, {'port': int(src_port)})
        net.add_edge(src_mac, int(src_dpid))
        net.add_edge(int(dst_dpid), dst_mac, {'port': int(dst_port)})
        net.add_edge(dst_mac, int(dst_dpid))

        target_path = None
        try:
            path = nx.shortest_path(net, src_mac, dst_mac)
            path2 = nx.shortest_path(net, src_mac, dst_mac)
            path2.pop()
            path2.pop(0)
            list_load = check_switch_load(path2, data_collection.switch_stat, constant.load_limitation)
            if len(list_load) > 0:
                # print 'lui', list_load
                all_paths = nx.all_simple_paths(net, src_mac, dst_mac)
                path_list = list(all_paths)
                target_path_index, target_path_cost = calculate_least_cost_path(path_list, data_collection.switch_stat, net)
                target_path = path_list[target_path_index]
            else:
                target_path = path
            print 'tarrr', target_path
        except Exception:
            target_path = None
        return target_path
示例#14
0
def get_exit_paths(instance):
    start, finish = instance.level.botSpawnAreas[instance.game.enemyTeam.name]
    enemy_base = Vector2(start.x, start.y)
    instance.graph.add_node("enemy_base", position = (start.x, start.y), weight = 0.0)
    instance.graph.node["enemy_base"]["exit_path"] = 0.0
    instance.graph.node["enemy_base"]["camp_target"] = 0.0
    instance.graph.node["enemy_base"]["camp_location"] = 0.0
        
    for i, j in itertools.product(range(int(start.x), int(finish.x)), range(int(start.y), int(finish.y))):
        instance.graph.add_edge("enemy_base", instance.terrain[j][i], weight = 1.0)                       

    our_flag_node = regressions2.get_node_index(instance, instance.game.team.flag.position)
    enemy_score_node = regressions2.get_node_index(instance, instance.game.enemyTeam.flagScoreLocation)
    enemy_flag_node = regressions2.get_node_index(instance, instance.game.enemyTeam.flag.position)
    our_score_node = regressions2.get_node_index(instance, instance.game.team.flagScoreLocation)
    
    b_to_flag = nx.shortest_path(instance.graph, source="enemy_base", target = our_flag_node)
    b_to_def = nx.shortest_path(instance.graph, source="enemy_base", target = enemy_flag_node)
    b_to_def2 = nx.shortest_path(instance.graph, source="enemy_base", target = our_score_node)

    #Calculate how the enemy is exiting from their base.
    exit_paths = [(b_to_flag, 10), (b_to_def, 6), (b_to_def2, 2)]
    for x in range(50):
        position = instance.level.findRandomFreePositionInBox(instance.level.area)
        base_seperation = position - enemy_base
        base_seperation = base_seperation*15/base_seperation.length()
        close_pos = enemy_base + base_seperation
        x, y = regressions2.sanitize_position(instance, close_pos)
        close_pos = Vector2(x, y)
        node_index = regressions2.get_node_index(instance, close_pos)
        path = nx.shortest_path(instance.graph, source="enemy_base", target = node_index)
        exit_paths.append((path, 4))     
    return exit_paths
示例#15
0
def randomCommodities(random_graph, numCommodities, commodityDistribution = None):
    '''Generates a list of commodities with reachable source and sink
       and numCommodity groups numbers of commodities with the same starting source
    '''

    edgeDict = random_graph.edge
    nodes = set([key for key in edgeDict.iterkeys()])
    commodities = []
    commodityDistribution = commodityDistribution or [1] * numCommodities
    assert len(commodityDistribution) <= len(nodes)
    for xCommodities in commodityDistribution:
        done = False
        while not done:
            randomChoice = random.sample(nodes, 1)[0]
            possSinks = [k for k in nx.shortest_path(random_graph,randomChoice).iterkeys()]
            possSinks.remove(randomChoice)
            if len(possSinks) < xCommodities:
                pass
            else:
                nodes.remove(randomChoice)
                sinks = random.sample(possSinks, xCommodities)
                
                for sink in sinks:
                    commodities.append(Commodity(randomChoice, sink, random.randint(1,50)))
                done = True
    assert len(commodities) == numCommodities
    for commodity in commodities:
        assert(commodity.sink in nx.shortest_path(random_graph, commodity.source))
    return commodities
示例#16
0
    def path(self, src_ip, dst_ip):
        src_mac = self.get_by_attr('ip', src_ip)
        dst_mac = self.get_by_attr('ip', dst_ip)
        #print "src_mac = %s dst_mac = %s" % (src_mac, dst_mac)
        ofctl_ip = self.get_ip_ofctl()
        if src_mac is None:
            if ofctl_ip == src_ip:
                src_mac = self.get_hw_ofctl()
            else:
                return []
        if dst_mac is None:
            if ofctl_ip == dst_ip:
                dst_mac = self.get_hw_ofctl()
            else:
                dst_mac = self.get_my_crossdomain_sw(dst_ip)
                if dst_mac is not None:
                    try:
                        domain_path = shortest_path(self, src_mac, dst_mac, 'weight')
                    except Exception as e:
                        #log.debug("ERRO ENCONTRAR O PATH - %s", e)
                        return []

                    if len(domain_path) != 0:
                        dst_sw = self.get_dst_crossdomain_sw(dst_ip)
                        domain_path.append(dst_sw)
                    return domain_path
                else:
                    return []
        try:
            domain_path = shortest_path(self, src_mac, dst_mac, 'weight')
        except Exception as e:
            #log.debug("ERRO SYNC ADD EDGE - %s", e)
            return []
        return domain_path
示例#17
0
def main(fName="cppzk.txt"):
    g = nx.Graph()
    for eachLine in open(fName):
        fields = eachLine.split()
        g.add_edge(fields[0], fields[1])
         
#     keyConns = [["ASPA0085", "ARGA0082"], ["ARGA0082", "GLUA0194"]]
    keyConns = [["ASPA0085", "GLUA0194"]]
#     keyConns = [["ASPA0085", "ARGA0082"], ["ARGA0082", "GLUA0194"], ["ASPA0085", "GLUA0194"]]

    keyAtoms = {"ASPA0085":["OD1", "OD2"], "ARGA0082":["NE", "NH1", "NH2"], "GLUA0194":["OE1", "OE2"]}
    
    for eachConn in keyConns:
        sourceRes = eachConn[0]
        targetRes = eachConn[1]
        
        for eachSourceAtom in keyAtoms[sourceRes]:
            sourceAtom = sourceRes + eachSourceAtom
            if sourceAtom not in g.nodes(): continue
            for eachTargetAtom in keyAtoms[targetRes]:
                targetAtom = targetRes + eachTargetAtom
                if targetAtom not in g.nodes(): continue
                
                if nx.has_path(g, sourceAtom, targetAtom):
                    print "Path between %13s%13s" % (sourceAtom, targetAtom),
                    print nx.shortest_path(g, sourceAtom, targetAtom)
示例#18
0
def switches_to_remove(version, graph):
    if version == 0:
        return []
    elif len(graph.switches()) < 2:
        return []
    else:
        scratch_graph = graph.copy()
        nodes = []
        hosts = scratch_graph.hosts()[::version * 2]
        for host1 in hosts:
            for host2 in hosts:
                if host1 == host2:
                    continue
                try:
                    path = nx.shortest_path(scratch_graph, host1, host2)
                except nx.exception.NetworkXNoPath:
                    continue
                # Pick a "middle" node
                node = path[len(path)/2]
                tmp_graph = scratch_graph.copy()
                tmp_graph.remove_node(node)
                try:
                    nx.shortest_path(tmp_graph, host1, host2)
                except nx.exception.NetworkXNoPath:
                    continue
                # If path still exists, toss node onto list
                scratch_graph.remove_node(node)
                nodes.append(node)
                
        return nodes
示例#19
0
 def get_shortest_path(self, source, destination):
   G = nx.DiGraph()
   self.update()
   G.add_weighted_edges_from([(link.lastHopIP, link.destinationIP, link.tcEdgeCost) for link in self.linklist])
   if self.is_in_topology(source) and self.is_in_topology(destination):
     return nx.shortest_path(G, source, destination)
   elif self.is_in_topology(source):
     # find the closest gateway
     closestgw = None
     cost = 0
     for gw in self.gatewaylist:
       try:
         splen = nx.shortest_path_length(G, source, gw)
       except nx.NetworkXNoPath:
         return None
       if splen > cost:
         cost = splen
         closestgw = gw
     if closestgw:
       return nx.shortest_path(G, source, closestgw)
     else:
       return None
   elif self.is_in_topology(destination):
     # should this happen?
     res = self.get_shortest_path(destination, source)
     res.reverse()
     return res
    def get_influence(self, nodea, nodeb):
        ''' Returns a positive number if "nodea is more dominant than nodeb"; 
            Returns a negative number if "nodeb is more dominant than nodea";
            Returns None if no influence comparison could be performed '''

        try:
            ab = nx.shortest_path(self.G, nodea, nodeb)
        except nx.NetworkXError:
            print "Could not find one of the nodes in the graph"
            return 0.0
        except nx.NetworkXNoPath:
            ab = None 

        try:
            ba = nx.shortest_path(self.G, nodeb, nodea)
        except nx.NetworkXError:
            print "Could not find one of the nodes in the graph"
            return 0.0
        except nx.NetworkXNoPath:
            ba = None 

        if ab != None and ba != None: 
            if len(ab) > len(ba):
                ab = None
            else:
                ba = None

        if ab != None: 
            return 1./(len(ab))
        elif ba != None: 
            return -1./len(ba)
        else:
            return 0.0
def connectTrees(milestone, milestoneTree, notMilestoneTree, rho):
    """Attempts to connect the most recently added-to tree to 
    the other tree.  Tries two configurations, tests for distance, and 
    then connects."""
    newMilestone = notMilestoneTree.findCloseNewMilestone(milestone)
    if newMilestone is None:
        newMilestone = notMilestoneTree.getUniformSample()
    pathSuccess = None
    #try twice, once nearby, once random
    for i in range(2):
        distance = l2Distance(milestone, newMilestone)
        if distance < rho:
            milestoneTree.addEdge(milestone, newMilestone, weight=distance)

            #get the path from the root to the other root to test
            node = newMilestone
            firstHalfPath = nx.shortest_path(milestoneTree.graph, source=milestoneTree.root, target=node)
            #print firstHalfPath
            secondHalfPath = nx.shortest_path(notMilestoneTree.graph, source=notMilestoneTree.root, target=node)
            pathSuccess = testPath(milestoneTree, firstHalfPath, notMilestoneTree, secondHalfPath)

            #check if the path is clear.  if it is, it will be not None,
            #otherwise it will be None
            if pathSuccess is not None:
                return firstHalfPath, secondHalfPath
        if pathSuccess is None or i is 1:
            return None
        newMilestone = notMilestoneTree.getUniformSample()
示例#22
0
	def get_score(self, player_goal, opponent_goal):
		if not self.is_terminal(player_goal, opponent_goal):
			player_path = nx.shortest_path(self.G, self.player_location, player_goal, weight="weight")
			self.player_score -= self.score_for_path(player_path, self.G) + len(player_path)
			opponent_path = nx.shortest_path(self.G, self.opponent_location, opponent_goal, weight="weight")
			self.opponent_score -= self.score_for_path(opponent_path, self.G) + len(opponent_path)
		return self.player_score+self.opponent_score+1000
示例#23
0
文件: graphs.py 项目: arjunc12/Ants
def road(road_file_path, comments='#'):
    G = nx.read_edgelist(road_file_path, comments=comments, nodetype=int)
    nodes = []
    start_node = random.choice(G.nodes())
    queue = [start_node]
    added_nodes = 1
    seen = set()
    while added_nodes < MAX_ROAD_NODES and len(queue) > 0:
        curr = queue.pop()
        if curr in seen:
            continue
        else:
            nodes.append(curr)
            queue += G.neighbors(curr)
            seen.add(curr)
            added_nodes += 1
    
    G = G.subgraph(nodes)
 
    mapping = {}
    for i, node in enumerate(G.nodes()):
        x = i / 12
        y = i % 12
        mapping[node] = (x, y)
    #nx.relabel_nodes(G, mapping, copy=False)
    
    mapping2 = {}
    for i, node in enumerate(sorted(G.nodes())):
        mapping2[node] = i
    #nx.relabel_nodes(G, mapping2, copy=False)
    
    G.graph['name'] = 'road'
    
    pos = nx.kamada_kawai_layout(G, scale = graphscale)
    for u in G.nodes():
        G.node[u]['pos'] = pos[u]
    
    done = False
    for i in xrange(MAX_ROAD_ATTEMPTS):
        n1, n2 = sample(G.nodes(), 2)
        if not nx.has_path(G, n1, n2):
            continue
        sp = nx.shortest_path(G, n1, n2)
        if len(sp) < 8 or len(sp) > 30:
            continue
        index = random.choice(range(len(sp) / 4, 3 * len(sp) / 4))
        u, v = sp[index], sp[index + 1]
        G.remove_edge(u, v)
        if not nx.has_path(G, u, v):
            G.add_edge(u, v)
            continue
        fp = nx.shortest_path(G, u, v)
        if len(fp) > 8:
            G.add_edge(u, v)
            continue
        #print n1, n2, u, v, sp, fp
        G.add_edge(u, v)
        set_init_road_path(G, n1, n2, u, v)
        return G
示例#24
0
	def printroute(self):
		for src in self.nodes.iterkeys():
			print "[Neighors List] neighbor = %s" % (src)
			print nx.neighbors(self.graph, src)
			print "[Shortest Paths] - %s " % (src)
			for dst in self.nodes.iterkeys():
				if src != dst:
					print nx.shortest_path(self.graph, src, dst)
示例#25
0
文件: derive.py 项目: xrotwang/lingpy
def _find_dir_path(graph, start, end):
    """
    Function finds the path connecting two nodes in a directed graph under the
    condition that the two nodes are connected either directly or by a common
    ancestor node.
    """

    # first possibility: there is a direct path between the two nodes
    # if nx.shortest_path(graph,start,end) != False:
    try:
        check = nx.shortest_path(graph, start, end)
    except:
        check = False

    if check == False:

        # return nx.shortest_path(graph,start,end)
        # else:
        # except:
        # second possibility: there is a direct path between the two nodes, but
        # it starts from the other node
        # if nx.shortest_path(graph,end,start) != False:
        try:
            check = nx.shortest_path(graph, end, start)
        except:
            check = False
            # return nx.shortest_path(graph,end,start)
        # third possibility: there is no direct path between the nodes in
        # neither direction, but there is a path in an undirected graph
        if check == False:
            if _fop(graph.to_undirected(), start, end) != []:
                # here, we simply check, whether with in all paths connecting the
                # two nodes there is a node which directly connects to both nodes
                # (i.e. which is the ancestor of both nodes). If this is the case,
                # the respective shortest path is what we are looking for.
                paths = _fop(graph.to_undirected(), start, end)
                current_path_length = max([len(path) for path in paths])
                shortest_paths = nx.shortest_path(graph)
                current_path = []
                for path in paths:
                    for node in path[1:-1]:
                        if start in shortest_paths[node].keys() \
                                and end in shortest_paths[node].keys():
                            if len(path) <= current_path_length:
                                current_path_length = len(path)
                                current_path = path
                                break
                if current_path != []:
                    return current_path
                else:
                    return False
            # fourth condition: there is no path connecting the nodes at all
            else:
                return False
        else:
            return check
    else:
        return check
示例#26
0
def two_edge_swap(G):
	"""
	Implements Lu and Ravi Edge Two Swap Algorithm
	Input: Original graph G
	Output: Generate a T two edge swap output

	Note: Work in progress
	"""

	T = degBasedMST(G);

	M = count_iterable(it.combinations(list(set(G.edges()).difference(set(T.edges()))),2))
	print M

	i = 1
	for e1,e2 in it.combinations(list(set(G.edges()).difference(set(T.edges()))),2):
		i += 1

		U = T.copy()
		Degrees = list(T.degree(T.nodes()).values())

		try:
			path1 = nx.shortest_path(T, e1[0], e1[1])

			for f1 in zip(path1[0:],path1[1:]):

				U.add_edge(e1[0],e1[1])
				U.remove_edge(f1[0],f1[1])

				try:
					path2 = nx.shortest_path(T, e2[0], e2[1])

					for f2 in zip(path2[0:],path2[1:]):

						U.add_edge(e2[0],e2[1])

						if (tuple([f2[0],f2[1]]) in U.edges()):
							U.remove_edge(f2[0],f2[1])

						newDegrees = list(U.degree(U.nodes()).values())

						if newDegrees.count(1) > Degrees.count(1):
							print newDegrees.count(1)
							print i
							T = U.copy()
							Degrees = list(T.degree(T.nodes()).values())

						U.add_edge(f2[0],f2[1]);
				except nx.NetworkXNoPath:
					pass

				U.add_edge(f1[0],f1[1]);

		except nx.NetworkXNoPath:
			pass

	return T
示例#27
0
def find_dependencies(G, source_filter_function, target_filter_function=None):
    source_nodes = filter(source_filter_function, G.nodes())
    target_nodes = filter(target_filter_function, G.nodes()) \
            if target_filter_function is not None else source_nodes

    for source, target in itertools.product(source_nodes, target_nodes):
        if not source == target and nx.has_path(G, source, target):
            print '\n%s -> %s' % (source, target)
            print nx.shortest_path(G, source=source, target=target)
示例#28
0
文件: sneak.py 项目: newmanne/CTF-
    def initialize(self):
        self.makeGraph()
        
        self.graph.add_node("enemy_base")
        self.positions["enemy_base"] = None
        start, finish = self.level.botSpawnAreas[self.game.enemyTeam.name]
        for i, j in itertools.product(range(int(start.x), int(finish.x)), range(int(start.y), int(finish.y))):
            self.graph.add_edge("enemy_base", self.terrain[j][i], weight = 1.0)

        self.graph.add_node("base")
        self.positions["base"] = None
        start, finish = self.level.botSpawnAreas[self.game.team.name]
        for i, j in itertools.product(range(int(start.x), int(finish.x)), range(int(start.y), int(finish.y))):
            self.graph.add_edge("base", self.terrain[j][i], weight = 1.0)

        self.node_EnemyFlagIndex = self.getNodeIndex(self.game.team.flag.position)
        self.node_EnemyScoreIndex = self.getNodeIndex(self.game.enemyTeam.flagScoreLocation)

        # self.node_Bases = self.graph.add_vertex()
        # e = self.graph.add_edge(self.node_Bases, self.node_MyBase)
        # e = self.graph.add_edge(self.node_Bases, self.node_EnemyBase)

        vb2f = nx.shortest_path(self.graph, source="enemy_base", target=self.node_EnemyFlagIndex)
        vf2s = nx.shortest_path(self.graph, source=self.node_EnemyFlagIndex, target=self.node_EnemyScoreIndex)
        #vb2s = nx.shortest_path(self.graph, source="enemy_base", target=self.node_EnemyScoreIndex)

        self.node_EnemyBaseToFlagIndex = "enemy_base_to_flag"
        self.graph.add_node(self.node_EnemyBaseToFlagIndex)
        self.positions["enemy_base_to_flag"] = None
        for vertex in vb2f:
            self.graph.add_edge(self.node_EnemyBaseToFlagIndex, vertex, weight = 1.0)
        
        self.node_EnemyFlagToScoreIndex = "enemy_flag_to_score" 
        self.graph.add_node(self.node_EnemyFlagToScoreIndex)
        self.positions["enemy_flag_to_score"] = None
        for vertex in vf2s:
            self.graph.add_edge(self.node_EnemyFlagToScoreIndex, vertex, weight = 1.0)
        
        self.node_EnemyBaseToScoreIndex = "enemy_base_to_score"
        self.graph.add_node(self.node_EnemyBaseToScoreIndex)
        self.positions["enemy_base_to_score"] = None
       # for vertex in vb2s:
       #     self.graph.add_edge(self.node_EnemyBaseToScoreIndex, vertex, weight = 1.0)

        ## node = self.makeNode(self.game.enemyTeam.flag.position)
        self.distances = nx.single_source_shortest_path_length(self.graph, self.node_EnemyFlagToScoreIndex)

        self.graph.remove_node("base")
        self.graph.remove_node("enemy_base")
        self.graph.remove_node(self.node_EnemyBaseToFlagIndex)
        self.graph.remove_node(self.node_EnemyFlagToScoreIndex)
        self.graph.remove_node(self.node_EnemyBaseToScoreIndex)

        self.updateEdgeWeights()

        self.paths = {b: None for b in self.game.team.members}
示例#29
0
文件: graph.py 项目: newmanne/CTF-
def setupGraphs(commander):
    makeGraph(commander)
    
    commander.graph.add_node("enemy_base")
    commander.positions["enemy_base"] = None
    start, finish = commander.level.botSpawnAreas[commander.game.enemyTeam.name]
    for i, j in itertools.product(range(int(start.x), int(finish.x)), range(int(start.y), int(finish.y))):
        commander.graph.add_edge("enemy_base", commander.terrain[j][i], weight=1.0)

    commander.graph.add_node("base")
    commander.positions["base"] = None
    start, finish = commander.level.botSpawnAreas[commander.game.team.name]
    for i, j in itertools.product(range(int(start.x), int(finish.x)), range(int(start.y), int(finish.y))):
        commander.graph.add_edge("base", commander.terrain[j][i], weight=1.0)

    node_EnemyFlagIndex = getNodeIndex(commander, commander.game.team.flag.position)
    node_EnemyScoreIndex = getNodeIndex(commander, commander.game.enemyTeam.flagScoreLocation)

    # self.node_Bases = commander.graph.add_vertex()
    # e = commander.graph.add_edge(self.node_Bases, self.node_MyBase)
    # e = commander.graph.add_edge(self.node_Bases, self.node_EnemyBase)

    vb2f = nx.shortest_path(commander.graph, source="enemy_base", target=node_EnemyFlagIndex)
    vf2s = nx.shortest_path(commander.graph, source=node_EnemyFlagIndex, target=node_EnemyScoreIndex)
    #vb2s = nx.shortest_path(commander.graph, source="enemy_base", target=self.node_EnemyScoreIndex)

    node_EnemyBaseToFlagIndex = "enemy_base_to_flag"
    commander.graph.add_node(node_EnemyBaseToFlagIndex)
    commander.positions["enemy_base_to_flag"] = None
    for vertex in vb2f:
        commander.graph.add_edge(node_EnemyBaseToFlagIndex, vertex, weight=1.0)
    
    node_EnemyFlagToScoreIndex = "enemy_flag_to_score" 
    commander.graph.add_node(node_EnemyFlagToScoreIndex)
    commander.positions["enemy_flag_to_score"] = None
    for vertex in vf2s:
        commander.graph.add_edge(node_EnemyFlagToScoreIndex, vertex, weight=1.0)
    
    node_EnemyBaseToScoreIndex = "enemy_base_to_score"
    commander.graph.add_node(node_EnemyBaseToScoreIndex)
    commander.positions["enemy_base_to_score"] = None
   # for vertex in vb2s:
   #     commander.graph.add_edge(self.node_EnemyBaseToScoreIndex, vertex, weight = 1.0)

    ## node = self.makeNode(commander.game.enemyTeam.flag.position)
    distances = nx.single_source_shortest_path_length(commander.graph, node_EnemyFlagToScoreIndex)

    commander.graph.remove_node("base")
    commander.graph.remove_node("enemy_base")
    commander.graph.remove_node(node_EnemyBaseToFlagIndex)
    commander.graph.remove_node(node_EnemyFlagToScoreIndex)
    commander.graph.remove_node(node_EnemyBaseToScoreIndex)

    updateEdgeWeights(commander, distances)

    commander.originalGraph = commander.graph
示例#30
0
 def least_common_subsumer(self, node1, node2):
     path1 = nx.shortest_path(self._taxonomy, node1, self._root)
     path2 = nx.shortest_path(self._taxonomy, node2, self._root)
     i = 1
     lcs = self._root
     while i <= len(path1) and i <= len(path2):
         if path1[-i] == path2[-i]:
             lcs = path1[-i]
         i = i + 1
     return lcs
示例#31
0
import networkx as nx
from matplotlib import pyplot as plt

graph = nx.DiGraph()
graph.add_edges_from([("root", "a"), ("a", "b"), ("a", "e"), ("b", "c"), ("b", "d"), ("d", "e")])

graph.nodes() # => NodeView(('root', 'a', 'b', 'e', 'c', 'd'))

nx.shortest_path(graph, 'root', 'e') # => ['root', 'a', 'e']

nx.dag_longest_path(graph) # => ['root', 'a', 'b', 'd', 'e']

list(nx.topological_sort(graph)) # => ['root', 'a', 'b', 'd', 'e', 'c']

nx.is_directed(graph)

nx.is_directed_acyclic_graph(graph)
示例#32
0
 def path_to(self, target):
     try:
         return shortest_path(self.army.board, self.pos, target)
     except:
         return None
示例#33
0
def Shortest_Path():
    # #--------------------------------------DELETING/Getting rid of any previous graph-----------------------------------------
    global Graph, visuals, button, toolbar, G, fixed_nodes, fixed_positions, pos, graph
    startnode = Entry1_7.get()
    endnode = Entry1_8.get()
    #This will deal with empty Entry inputs
    if len(startnode) == 0:
        startnode = int(list(G.nodes())[-1])
    #This will deal with empty Entry inputs
    if len(endnode) == 0:
        endnode = int(list(G.nodes())[-1])
    p = nx.shortest_path(G)
    print(p)
    path = list(p[int(startnode)][int(endnode)])
    print("PAth=" + str(path))
    x = []
    txt = ""
    nx.draw(G,
            pos,
            fixed=None,
            with_labels=True,
            node_size=800,
            node_color='skyblue',
            node_shape="s",
            alpha=0.5,
            linewidths=10,
            font_size=8,
            font_weight='bold')
    labels = nx.get_edge_attributes(G, 'weight')
    j = 0
    for i in path:
        j = j + 1
        x.append(i)
        plt.title('Iteration {}'.format(j))
        nx.draw_networkx_nodes(G,
                               pos,
                               nodelist=x,
                               node_size=400,
                               node_color='yellow',
                               node_shape="s",
                               alpha=0.5,
                               linewidths=10,
                               font_size=8,
                               font_weight='bold')
        # visuals.after(1000)
        print("Getting Inside")
        if str(i) == str(endnode):
            nx.draw_networkx_nodes(G,
                                   pos,
                                   nodelist=[i],
                                   node_size=400,
                                   node_color='red',
                                   node_shape="s",
                                   alpha=0.5,
                                   linewidths=10,
                                   font_size=8,
                                   font_weight='bold')
            break
        plt.pause(1)
        # plt.clf()
        # plt.show()
    txt = str(x)
    sp = plt.text(0.5,
                  -0.1,
                  "Shortest Path: " + txt,
                  size=12,
                  ha="center",
                  transform=ax.transAxes)  #Shows the Caption below the graph
示例#34
0
def process_sentence_spacy(base_dir,
                           sentence,
                           sentence_entities,
                           sentence_pairs,
                           positive_entities,
                           wordnet_tags=None,
                           mask_entities=True,
                           min_sdp_len=0,
                           max_sdp_len=15):
    """Process sentence to obtain labels, instances and classes for a ML classifier

    :param base_dir:
    :param sentence: sentence processed by spacy
    :param sentence_entities: dictionary mapping entity ID to ((e_start, e_end), text, paths_to_root)
    :param sentence_pairs: dictionary mapping pairs of known entities in this sentence to pair types
    :param positive_entities:
    :param wordnet_tags:
    :param mask_entities:
    :param min_sdp_len:
    :param max_sdp_len:
    :return: labels of each pair (according to sentence_entities, word vectors and classes (pair types according to sentence_pairs)
    """

    left_word_vectors = []
    right_word_vectors = []
    left_wordnets = []
    right_wordnets = []
    classes = []
    labels = []

    graph, nodes_list = get_network_graph_spacy(sentence)
    sentence_head_tokens_type_1, sentence_head_tokens_type_2, pos_gv, neg_gv = get_head_tokens_spacy(
        base_dir, sentence_entities, sentence, positive_entities)

    entity_offsets = [sentence_entities[x][0][0] for x in sentence_entities]

    for (e1, e2) in product(sentence_head_tokens_type_1,
                            sentence_head_tokens_type_2):

        if sentence_head_tokens_type_1.get(e1):
            if int(sentence_head_tokens_type_1[e1].split('e')[-1]) > int(
                    sentence_head_tokens_type_2[e2].split('e')[-1]):
                e1, e2 = e2, e1
        else:
            if int(sentence_head_tokens_type_1[e2].split('e')[-1]) > int(
                    sentence_head_tokens_type_2[e1].split('e')[-1]):
                e2, e1 = e1, e2

        if sentence_head_tokens_type_1.get(e1):
            e1_text = sentence_entities[sentence_head_tokens_type_1[e1]]
            e2_text = sentence_entities[sentence_head_tokens_type_2[e2]]

        else:
            e2_text = sentence_entities[sentence_head_tokens_type_1[e2]]
            e1_text = sentence_entities[sentence_head_tokens_type_2[e1]]

        if 'train' in base_dir:
            middle_text = sentence.text[e1_text[0][-1]:e2_text[0][0]]

            if middle_text.strip() in string.punctuation:
                continue

        try:
            sdp = nx.shortest_path(graph, source=e1, target=e2)

            if len(sdp) < min_sdp_len or len(sdp) > max_sdp_len:
                continue

            neg = False
            is_neg_gv = False
            for i, element in enumerate(sdp):
                token_text = element.split('-')[0]
                if (i == 1 or i == len(sdp) - 2) and token_text in neg_gv_list:
                    logging.info('Skipped gv {} {}:'.format(
                        token_text, str(sdp)))

            if neg or is_neg_gv:
                continue

            vector = []
            wordnet_vector = []
            negations = 0
            head_token_position = None

            for i, element in enumerate(sdp):
                if element != 'ROOT':
                    token_idx = int(
                        element.split('-')[-1])  # get the index of the token
                    sdp_token = sentence[token_idx]  # get the token object

                    if mask_entities and sdp_token.idx in entity_offsets:
                        vector.append('entity')
                    else:
                        vector.append(sdp_token.text)
                    if wordnet_tags:
                        wordnet_vector.append(wordnet_tags[token_idx])

                    head_token = '{}-{}'.format(
                        sdp_token.head.lower_,
                        sdp_token.head.i)  # get the key of head token

                    # Head token must not have its head in the path, otherwise that would be the head token
                    # In some cases the token is its own head
                    if head_token not in sdp or head_token == element:
                        head_token_position = i + negations

            if head_token_position is None:
                print('Head token not found:', e1_text, e2_text, sdp)
                sys.exit()
            else:
                left_vector = vector[:head_token_position + 1]
                right_vector = vector[head_token_position:]
                left_wordnet = wordnet_vector[:head_token_position + 1]
                right_wordnet = wordnet_vector[head_token_position:]

            left_word_vectors.append(left_vector)
            right_word_vectors.append(right_vector)
            left_wordnets.append(left_wordnet)
            right_wordnets.append(right_wordnet)

        except nx.exception.NetworkXNoPath:
            logging.warning('No path:', e1_text, e2_text, graph.nodes())
            left_word_vectors.append([])
            right_word_vectors.append([])
            left_wordnets.append([])
            right_wordnets.append([])

        except nx.NodeNotFound:
            logging.warning(('Node not found:', e1_text, e2_text, e1, e2,
                             list(sentence), graph.nodes()))
            left_word_vectors.append([])
            right_word_vectors.append([])
            left_wordnets.append([])
            right_wordnets.append([])

        if sentence_head_tokens_type_1.get(e1):
            labels.append((sentence_head_tokens_type_1[e1],
                           sentence_head_tokens_type_2[e2]))
            if (sentence_head_tokens_type_1[e1],
                    sentence_head_tokens_type_2[e2]) in sentence_pairs:
                classes.append(
                    sentence_pairs[(sentence_head_tokens_type_1[e1],
                                    sentence_head_tokens_type_2[e2])])
            else:
                classes.append(0)
        else:
            labels.append((sentence_head_tokens_type_1[e2],
                           sentence_head_tokens_type_2[e1]))
            if (sentence_head_tokens_type_1[e2],
                    sentence_head_tokens_type_2[e1]) in sentence_pairs:
                classes.append(
                    sentence_pairs[(sentence_head_tokens_type_1[e2],
                                    sentence_head_tokens_type_2[e1])])
            else:
                classes.append(0)

    return labels, (left_word_vectors, right_word_vectors), (
        left_wordnets, right_wordnets), classes, pos_gv, neg_gv
示例#35
0
def validate_consistent_re(N=500, delta_true=.15, sigma_true=[.1,.1,.1,.1,.1], 
                           true=dict(i=quadratic, f=constant, r=constant)):
    types = pl.array(['i', 'r', 'f', 'p'])

    ## generate simulated data
    model = data_simulation.simple_model(N)
    model.input_data['effective_sample_size'] = 1.
    model.input_data['value'] = 0.
    # coarse knot spacing for fast testing
    for t in types:
        model.parameters[t]['parameter_age_mesh'] = range(0, 101, 20)

    sim = consistent_model.consistent_model(model, 'all', 'total', 'all', {})
    for t in 'irf':
        for i, k_i in enumerate(sim[t]['knots']):
            sim[t]['gamma'][i].value = pl.log(true[t](k_i))

    age_start = pl.array(mc.runiform(0, 100, size=N), dtype=int)
    age_end = pl.array(mc.runiform(age_start, 100, size=N), dtype=int)

    data_type = types[mc.rcategorical(pl.ones(len(types), dtype=float) / float(len(types)), size=N)]


    a = pl.arange(101)
    age_weights = pl.ones_like(a)
    sum_wt = pl.cumsum(age_weights)

    p = pl.zeros(N)
    for t in types:
        mu_t = sim[t]['mu_age'].value
        sum_mu_wt = pl.cumsum(mu_t*age_weights)
    
        p_t = (sum_mu_wt[age_end] - sum_mu_wt[age_start]) / (sum_wt[age_end] - sum_wt[age_start])

        # correct cases where age_start == age_end
        i = age_start == age_end
        if pl.any(i):
            p_t[i] = mu_t[age_start[i]]

        # copy part into p
        p[data_type==t] = p_t[data_type==t]


    # add covariate shifts
    import dismod3
    import simplejson as json
    gbd_model = data.ModelData.from_gbd_jsons(json.loads(dismod3.disease_json.DiseaseJson().to_json()))
    model.hierarchy = gbd_model.hierarchy

    from validate_covariates import alpha_true_sim
    area_list = pl.array(['all', 'super-region_3', 'north_africa_middle_east', 'EGY', 'KWT', 'IRN', 'IRQ', 'JOR', 'SYR'])
    alpha = {}
    for t in types:
        alpha[t] = alpha_true_sim(model, area_list, sigma_true)
    print json.dumps(alpha, indent=2)

    model.input_data['area'] = area_list[mc.rcategorical(pl.ones(len(area_list)) / float(len(area_list)), N)]
    
    for i, a in model.input_data['area'].iteritems():
        t = data_type[i]
        p[i] = p[i] * pl.exp(pl.sum([alpha[t][n] for n in nx.shortest_path(model.hierarchy, 'all', a) if n in alpha]))

    n = mc.runiform(100, 10000, size=N)

    model.input_data['data_type'] = data_type
    model.input_data['age_start'] = age_start
    model.input_data['age_end'] = age_end
    model.input_data['effective_sample_size'] = n
    model.input_data['true'] = p
    model.input_data['value'] = mc.rnegative_binomial(n*p, delta_true) / n

    # coarse knot spacing for fast testing
    for t in types:
        model.parameters[t]['parameter_age_mesh'] = range(0, 101, 20)

    ## Then fit the model and compare the estimates to the truth
    model.vars = {}
    model.vars = consistent_model.consistent_model(model, 'all', 'total', 'all', {})
    #model.map, model.mcmc = fit_model.fit_consistent_model(model.vars, iter=101, burn=0, thin=1, tune_interval=100)
    model.map, model.mcmc = fit_model.fit_consistent_model(model.vars, iter=10000, burn=5000, thin=25, tune_interval=100)

    graphics.plot_convergence_diag(model.vars)

    graphics.plot_fit(model, model.vars, {}, {})
    for i, t in enumerate('i r f p rr pf'.split()):
        pl.subplot(2, 3, i+1)
        pl.plot(range(101), sim[t]['mu_age'].value, 'w-', label='Truth', linewidth=2)
        pl.plot(range(101), sim[t]['mu_age'].value, 'r-', label='Truth', linewidth=1)

    pl.show()

    model.input_data['mu_pred'] = 0.
    model.input_data['sigma_pred'] = 0.
    for t in types:
        model.input_data['mu_pred'][data_type==t] = model.vars[t]['p_pred'].stats()['mean']
        model.input_data['sigma_pred'][data_type==t] = model.vars[t]['p_pred'].stats()['standard deviation']
    data_simulation.add_quality_metrics(model.input_data)

    model.delta = pandas.DataFrame(dict(true=[delta_true for t in types if t != 'rr']))
    model.delta['mu_pred'] = [pl.exp(model.vars[t]['eta'].trace()).mean() for t in types if t != 'rr']
    model.delta['sigma_pred'] = [pl.exp(model.vars[t]['eta'].trace()).std() for t in types if t != 'rr']
    data_simulation.add_quality_metrics(model.delta)

    model.alpha = pandas.DataFrame()
    model.sigma = pandas.DataFrame()
    for t in types:
        alpha_t = pandas.DataFrame(index=[n for n in nx.traversal.dfs_preorder_nodes(model.hierarchy)])
        alpha_t['true'] = pandas.Series(dict(alpha[t]))
        alpha_t['mu_pred'] = pandas.Series([n.stats()['mean'] for n in model.vars[t]['alpha']], index=model.vars[t]['U'].columns)
        alpha_t['sigma_pred'] = pandas.Series([n.stats()['standard deviation'] for n in model.vars[t]['alpha']], index=model.vars[t]['U'].columns)
        alpha_t['type'] = t
        model.alpha = model.alpha.append(alpha_t.dropna(), ignore_index=True)

        sigma_t = pandas.DataFrame(dict(true=sigma_true))
        sigma_t['mu_pred'] = [n.stats()['mean'] for n in model.vars[t]['sigma_alpha']]
        sigma_t['sigma_pred'] = [n.stats()['standard deviation'] for n in model.vars[t]['sigma_alpha']]
        model.sigma = model.sigma.append(sigma_t.dropna(), ignore_index=True)

    data_simulation.add_quality_metrics(model.alpha)
    data_simulation.add_quality_metrics(model.sigma)


    print 'delta'
    print model.delta

    print '\ndata prediction bias: %.5f, MARE: %.3f, coverage: %.2f' % (model.input_data['abs_err'].mean(),
                                                     pl.median(pl.absolute(model.input_data['rel_err'].dropna())),
                                                                       model.input_data['covered?'].mean())

    model.mu = pandas.DataFrame()
    for t in types:
        model.mu = model.mu.append(pandas.DataFrame(dict(true=sim[t]['mu_age'].value,
                                                         mu_pred=model.vars[t]['mu_age'].stats()['mean'],
                                                         sigma_pred=model.vars[t]['mu_age'].stats()['standard deviation'])),
                                   ignore_index=True)
    data_simulation.add_quality_metrics(model.mu)
    print '\nparam prediction bias: %.5f, MARE: %.3f, coverage: %.2f' % (model.mu['abs_err'].mean(),
                                                                         pl.median(pl.absolute(model.mu['rel_err'].dropna())),
                                                                         model.mu['covered?'].mean())
    print


    data_simulation.initialize_results(model)
    data_simulation.add_to_results(model, 'delta')
    data_simulation.add_to_results(model, 'mu')
    data_simulation.add_to_results(model, 'input_data')
    data_simulation.add_to_results(model, 'alpha')
    data_simulation.add_to_results(model, 'sigma')
    data_simulation.finalize_results(model)

    print model.results

    return model
示例#36
0
    def _neighbors(self, node, levels=1, graph=None):
        """Return graph of neighbors around node in graph (default: self.dataG)
        to a certain number of levels"""

        if graph is None:
            graph = self.dataG

        if not isinstance(node, (list, tuple, set)):
            node = [
                node,
            ]

        neighbors = set(node)
        blocks = [[
            n,
        ] for n in node]
        for i in range(levels):
            for n in neighbors:
                new_neighbors = set(graph.neighbors(n)) - neighbors
                blocks.append(new_neighbors)
                neighbors = neighbors.union(new_neighbors)
        G = graph.subgraph(neighbors)

        if len(blocks) > 1:
            # Create a block repersentation of our graph and make sure we're plotting
            #  anything that connects the blocks too

            # Create blocks for each individual node not already in a block
            non_blocked = set(self.dataG.nodes()) - neighbors
            non_blocked = [[
                a,
            ] for a in non_blocked]

            partitions = blocks + non_blocked

            # B = nx.blockmodel(graph, partitions)
            B = nx.quotient_graph(graph, partitions, relabel=True)

            # The resulting graph will has nodes numbered according their index in partitions
            # We want to go through the partitions which are blocks and find the shortest path

            num_blocks = len(blocks)
            for frm_node, to_node in zip(range(num_blocks),
                                         range(1, num_blocks - 1)):
                try:
                    path = nx.shortest_path(B, frm_node, to_node)
                except nx.NetworkXNoPath as e:
                    pass  # In an island, which is permissible
                except nx.NodeNotFound as e2:
                    pass  # Node reduced away, which is permissible
                except nx.NetworkXError as e:
                    tkm.showerror("Node not in graph", str(e))
                    return
                else:
                    # Break path in B back down into path in G
                    path2 = []
                    for a in path[1:-1]:  # don't include end points
                        for n in partitions[a]:
                            neighbors.add(n)
            G = graph.subgraph(neighbors)

        return G
示例#37
0
df = pd.read_csv('D:/Practise/advent_of_code/day_6_input.txt', header=None)
df.columns = ['original']
df['to'] = df['original'].str[:3]
df['from'] = df['original'].str[4:]

# Solution to part 1
# Creating a directed graph
G = nx.DiGraph()

#Adding nodes and edges to the graph
for i, j in df.iterrows():
    G.add_edge(j[2], j[1])

# Finding the shortest path from each node to COM
sp = nx.shortest_path(G, target='COM')

# Computing total number of orbits
orbits = 0
for key in sp:
    orbits = orbits + (len(sp[key]) - 1)

print(orbits)

# Part 2
#Converting the graph to an undirected graph
UG = G.to_undirected()

#Finding the orbital transfers needed between YOU and SAN
print(len(nx.shortest_path(UG, source='YOU', target='SAN')) - 3)
示例#38
0
def global_reaching_centrality(G, weight=None, normalized=True):
    """Returns the global reaching centrality of a directed graph.

    The *global reaching centrality* of a weighted directed graph is the
    average over all nodes of the difference between the local reaching
    centrality of the node and the greatest local reaching centrality of
    any node in the graph [1]_. For more information on the local
    reaching centrality, see :func:`local_reaching_centrality`.
    Informally, the local reaching centrality is the proportion of the
    graph that is reachable from the neighbors of the node.

    Parameters
    ----------
    G : DiGraph
        A networkx DiGraph.

    weight : None or string, optional (default=None)
        Attribute to use for edge weights. If ``None``, each edge weight
        is assumed to be one. A higher weight implies a stronger
        connection between nodes and a *shorter* path length.

    normalized : bool, optional (default=True)
        Whether to normalize the edge weights by the total sum of edge
        weights.

    Returns
    -------
    h : float
        The global reaching centrality of the graph.

    Examples
    --------
    >>> import networkx as nx
    >>> G = nx.DiGraph()
    >>> G.add_edge(1, 2)
    >>> G.add_edge(1, 3)
    >>> nx.global_reaching_centrality(G)
    1.0
    >>> G.add_edge(3, 2)
    >>> nx.global_reaching_centrality(G)
    0.75

    See also
    --------
    local_reaching_centrality

    References
    ----------
    .. [1] Mones, Enys, Lilla Vicsek, and Tamás Vicsek.
           "Hierarchy Measure for Complex Networks."
           *PLoS ONE* 7.3 (2012): e33799.
           https://doi.org/10.1371/journal.pone.0033799
    """
    if nx.is_negatively_weighted(G, weight=weight):
        raise nx.NetworkXError('edge weights must be positive')
    total_weight = G.size(weight=weight)
    if total_weight <= 0:
        raise nx.NetworkXError('Size of G must be positive')

    # If provided, weights must be interpreted as connection strength
    # (so higher weights are more likely to be chosen). However, the
    # shortest path algorithms in NetworkX assume the provided "weight"
    # is actually a distance (so edges with higher weight are less
    # likely to be chosen). Therefore we need to invert the weights when
    # computing shortest paths.
    #
    # If weight is None, we leave it as-is so that the shortest path
    # algorithm can use a faster, unweighted algorithm.
    if weight is not None:

        def as_distance(u, v, d):
            return total_weight / d.get(weight, 1)

        shortest_paths = nx.shortest_path(G, weight=as_distance)
    else:
        shortest_paths = nx.shortest_path(G)

    centrality = local_reaching_centrality
    # TODO This can be trivially parallelized.
    lrc = [
        centrality(G, node, paths=paths, weight=weight, normalized=normalized)
        for node, paths in shortest_paths.items()
    ]

    max_lrc = max(lrc)
    return sum(max_lrc - c for c in lrc) / (len(G) - 1)
示例#39
0
def shortest_path(src, dst):
    G = networkx.Graph()
    G.add_edges(topo.links)
    path = networkx.shortest_path(G, src, dst)
    return helper.to_magellan_path(path)
示例#40
0
def paga_compare_paths(adata1, adata2,
                       adjacency_key='connectivities', adjacency_key2=None):
    """Compare paths in abstracted graphs in two datasets.

    Compute the fraction of consistent paths between leafs, a measure for the
    topological similarity between graphs.

    By increasing the verbosity to level 4 and 5, the paths that do not agree
    and the paths that agree are written to the output, respectively.

    The PAGA "groups key" needs to be the same in both objects.

    Parameters
    ----------
    adata1, adata2 : AnnData
        Annotated data matrices to compare.
    adjacency_key : str
        Key for indexing the adjacency matrices in `.uns['paga']` to be used in
        adata1 and adata2.
    adjacency_key2 : str, None
        If provided, used for adata2.


    Returns
    -------
    OrderedTuple with attributes ``n_steps`` (total number of steps in paths)
    and ``frac_steps`` (fraction of consistent steps), ``n_paths`` and
    ``frac_paths``.
    """
    import networkx as nx
    g1 = nx.Graph(adata1.uns['paga'][adjacency_key])
    g2 = nx.Graph(adata2.uns['paga'][adjacency_key2 if adjacency_key2 is not None else adjacency_key])
    leaf_nodes1 = [str(x) for x in g1.nodes() if g1.degree(x) == 1]
    logg.msg('leaf nodes in graph 1: {}'.format(leaf_nodes1), v=5, no_indent=True)
    paga_groups = adata1.uns['paga']['groups']
    asso_groups1 = utils.identify_groups(adata1.obs[paga_groups].values,
                                         adata2.obs[paga_groups].values)
    asso_groups2 = utils.identify_groups(adata2.obs[paga_groups].values,
                                         adata1.obs[paga_groups].values)
    orig_names1 = adata1.obs[paga_groups].cat.categories
    orig_names2 = adata2.obs[paga_groups].cat.categories

    import itertools
    n_steps = 0
    n_agreeing_steps = 0
    n_paths = 0
    n_agreeing_paths = 0
    # loop over all pairs of leaf nodes in the reference adata1
    for (r, s) in itertools.combinations(leaf_nodes1, r=2):
        r2, s2 = asso_groups1[r][0], asso_groups1[s][0]
        orig_names = [orig_names1[int(i)] for i in [r, s]]
        orig_names += [orig_names2[int(i)] for i in [r2, s2]]
        logg.msg('compare shortest paths between leafs ({}, {}) in graph1 and ({}, {}) in graph2:'
               .format(*orig_names), v=4, no_indent=True)
        no_path1 = False
        try:
            path1 = [str(x) for x in nx.shortest_path(g1, int(r), int(s))]
        except nx.NetworkXNoPath:
            no_path1 = True
        no_path2 = False
        try:
            path2 = [str(x) for x in nx.shortest_path(g2, int(r2), int(s2))]
        except nx.NetworkXNoPath:
            no_path2 = True
        if no_path1 and no_path2:
            # consistent behavior
            n_paths += 1
            n_agreeing_paths += 1
            n_steps += 1
            n_agreeing_steps += 1
            logg.msg('there are no connecting paths in both graphs', v=5, no_indent=True)
            continue
        elif no_path1 or no_path2:
            # non-consistent result
            n_paths += 1
            n_steps += 1
            continue
        if len(path1) >= len(path2):
            path_mapped = [asso_groups1[l] for l in path1]
            path_compare = path2
            path_compare_id = 2
            path_compare_orig_names = [[orig_names2[int(s)] for s in l] for l in path_compare]
            path_mapped_orig_names = [[orig_names2[int(s)] for s in l] for l in path_mapped]
        else:
            path_mapped = [asso_groups2[l] for l in path2]
            path_compare = path1
            path_compare_id = 1
            path_compare_orig_names = [[orig_names1[int(s)] for s in l] for l in path_compare]
            path_mapped_orig_names = [[orig_names1[int(s)] for s in l] for l in path_mapped]
        n_agreeing_steps_path = 0
        ip_progress = 0
        for il, l in enumerate(path_compare[:-1]):
            for ip, p in enumerate(path_mapped):
                if ip >= ip_progress and l in p:
                    # check whether we can find the step forward of path_compare in path_mapped
                    if (ip + 1 < len(path_mapped)
                        and
                        path_compare[il + 1] in path_mapped[ip + 1]):
                        # make sure that a step backward leads us to the same value of l
                        # in case we "jumped"
                        logg.msg('found matching step ({} -> {}) at position {} in path{} and position {} in path_mapped'
                               .format(l, path_compare_orig_names[il + 1], il, path_compare_id, ip), v=6)
                        consistent_history = True
                        for iip in range(ip, ip_progress, -1):
                            if l not in path_mapped[iip - 1]:
                                consistent_history = False
                        if consistent_history:
                            # here, we take one step further back (ip_progress - 1); it's implied that this
                            # was ok in the previous step
                            logg.msg('    step(s) backward to position(s) {} in path_mapped are fine, too: valid step'
                                   .format(list(range(ip - 1, ip_progress - 2, -1))), v=6)
                            n_agreeing_steps_path += 1
                            ip_progress = ip + 1
                            break
        n_steps_path = len(path_compare) - 1
        n_agreeing_steps += n_agreeing_steps_path
        n_steps += n_steps_path
        n_paths += 1
        if n_agreeing_steps_path == n_steps_path: n_agreeing_paths += 1

        # only for the output, use original names
        path1_orig_names = [orig_names1[int(s)] for s in path1]
        path2_orig_names = [orig_names2[int(s)] for s in path2]
        logg.msg('      path1 = {},\n'
               'path_mapped = {},\n'
               '      path2 = {},\n'
               '-> n_agreeing_steps = {} / n_steps = {}.'
               .format(path1_orig_names,
                       [list(p) for p in path_mapped_orig_names],
                       path2_orig_names,
                       n_agreeing_steps_path, n_steps_path), v=5, no_indent=True)
    Result = namedtuple('paga_compare_paths_result',
                        ['frac_steps', 'n_steps', 'frac_paths', 'n_paths'])
    return Result(frac_steps=n_agreeing_steps/n_steps if n_steps > 0 else np.nan,
                  n_steps=n_steps if n_steps > 0 else np.nan,
                  frac_paths=n_agreeing_paths/n_paths if n_steps > 0 else np.nan,
                  n_paths=n_paths if n_steps > 0 else np.nan)
示例#41
0
def setup_asr_step_methods(m, vars, additional_stochs=[]):
    # groups RE stochastics that are suspected of being dependent
    groups = []
    fe_group = [
        n for n in vars.get('beta', []) if isinstance(n, mc.Stochastic)
    ]
    ap_group = [
        n for n in vars.get('gamma', []) if isinstance(n, mc.Stochastic)
    ]
    groups += [[g_i, g_j] for g_i, g_j in zip(ap_group[1:], ap_group[:-1])
               ] + [fe_group, ap_group, fe_group + ap_group]

    for a in vars.get('hierarchy', []):
        group = []

        col_map = dict([[key, i] for i, key in enumerate(vars['U'].columns)])

        if a in vars['U']:
            for b in nx.shortest_path(vars['hierarchy'], 'all', a):
                if b in vars['U']:
                    n = vars['alpha'][col_map[b]]
                    if isinstance(n, mc.Stochastic):
                        group.append(n)
        groups.append(group)
        #if len(group) > 0:
        #group += ap_group
        #groups.append(group)
        #group += fe_group
        #groups.append(group)

    for stoch in groups:
        if len(stoch) > 0 and np.all(
            [isinstance(n, mc.Stochastic) for n in stoch]):
            # only step certain stochastics, for understanding convergence
            #if 'gamma_i' not in stoch[0].__name__:
            #    print 'no stepper for', stoch
            #    m.use_step_method(mc.NoStepper, stoch)
            #    continue

            #print 'finding Normal Approx for', [n.__name__ for n in stoch]
            if additional_stochs == []:
                vars_to_fit = [
                    vars.get('p_obs'),
                    vars.get('pi_sim'),
                    vars.get('smooth_gamma'),
                    vars.get('parent_similarity'),
                    vars.get('mu_sim'),
                    vars.get('mu_age_derivative_potential'),
                    vars.get('covariate_constraint')
                ]
            else:
                vars_to_fit = additional_stochs

            try:
                raise ValueError
                na = mc.NormApprox(vars_to_fit + stoch)
                na.fit(method='fmin_powell', verbose=0)
                cov = np.array(np.inv(-na.hess), order='F')
                #print 'opt:', np.round_([n.value for n in stoch], 2)
                #print 'cov:\n', cov.round(4)
                if np.all(np.eigvals(cov) >= 0):
                    m.use_step_method(mc.AdaptiveMetropolis, stoch, cov=cov)
                else:
                    raise ValueError
            except ValueError:
                #print 'cov matrix is not positive semi-definite'
                m.use_step_method(mc.AdaptiveMetropolis, stoch)
示例#42
0
# In[11]:

G.number_of_edges()

# In[8]:

nx.number_of_isolates(G)

# In[9]:

nx.density(G)

# In[11]:

list(nx.shortest_path(G, 'Alps', 'Aara'))

# In[20]:

nx.average_clustering(G)

# In[33]:

i = 100

print("Page:", a_links[i])
for j in range(len(matrix)):

    if matrix[i][j] == 1:
        print(j, a_links[j])
示例#43
0
# Find the 30 airports with most edges (both in and out)
graph = create_graph()

a = dict(graph.degree()).items()
b = sorted(a, key=lambda x: x[1])
print([n for n, e in b[-30:]])

# Draw a graph over these
ca = graph.subgraph([x[0] for x in b[-30:]])
plt.show(block=False)
draw_graph(ca)
plt.savefig('Graph.svg', format='SVG')

print(max(dict(graph.degree()).items(), key=lambda x: x[1]))

n = nx.shortest_path(ca)
print('Something: ', n)

# Create a flask server and plot the graph on this. Additional: Make a calculator on the app
# That calculates the shortest path between two points
app = Flask(__name__)

tmpl = """{% block content %}
<section>
    {{ svg }}
</section>
{% endblock %}"""


@app.route('/')
def show_svg():
示例#44
0
#-------------------------------------------------------------
#distancia promedio de un nodo a todos los de su comunidad
#distancia se podria medir como el numero de links en el camino mas corto para llegar a otro.
#Ej:
#distancia entre 'Jet' y 'Trigger' = len(nx.shortest_path(mydolphins,'Jet','Trigger')) - 1

#Calculo de a[i]
a = [
]  #contendra para cada nodo en delfines la distancia promedio a nodos de su misma comunidad
for idelfin in delfines:
    distancias = []
    for jdelfin in delfines:
        if mydolphins.nodes[idelfin]['comunity'] == mydolphins.nodes[jdelfin][
                'comunity']:
            distancias.append(
                len(nx.shortest_path(mydolphins, idelfin, jdelfin)) - 1)
    promedio = np.mean(distancias)
    a.append(promedio)

#Calculo de b[i]
b = [
]  #contendria para cada nodo en delfines la distancia promedio a nodos de otras comunidades, devuelvo  el promedio minimo
for idelfin in delfines:
    #Me quedo con las comunidades distintas a la del delfin idelfin
    comuni = ['blue', 'red', 'orange', 'green']
    comuni.remove(mydolphins.node[idelfin]['comunity'])
    b_comuni = []
    for c in comuni:
        distancias = []
        for jdelfin in delfines:
            if mydolphins.nodes[jdelfin]['comunity'] == c:
示例#45
0
sammap.add_edge(11, 900, func='straight_turn', max_sd=10)

sammap.add_edge(12, 900, func='right_turn', max_sd=10)
sammap.add_edge(12, 500, func='left_turn', max_sd=10)

# adding the edge from the self made nodes to the real nodes.
sammap.add_edge(100, 1, func='lane_follow', max_sd=10)
sammap.add_edge(200, 2, func='lane_follow', max_sd=10)
sammap.add_edge(300, 3, func='lane_follow', max_sd=10)
sammap.add_edge(400, 4, func='lane_follow', max_sd=10)
sammap.add_edge(500, 5, func='lane_follow', max_sd=10)
sammap.add_edge(600, 6, func='lane_follow', max_sd=10)
sammap.add_edge(700, 7, func='lane_follow', max_sd=10)
sammap.add_edge(800, 8, func='lane_follow', max_sd=10)
sammap.add_edge(900, 9, func='lane_follow', max_sd=10)
sammap.add_edge(1000, 10, func='lane_follow', max_sd=10)
sammap.add_edge(1100, 11, func='lane_follow', max_sd=10)
sammap.add_edge(1200, 12, func='lane_follow', max_sd=10)

if __name__ == '__main__':
    start = sys.argv[1]
    rest = sys.argv[2:]
    start = int(start)
    rest = [int(node) for node in rest]
    path = [start]
    for goal in rest:
        next_path = nx.shortest_path(sammap, path[-1], goal)
        path.extend(nx.shortest_path(sammap, path[-1], goal)[1:])
    print(path)
    print([node for node in path if node < 99])
示例#46
0
def local_reaching_centrality(G, v, paths=None, weight=None, normalized=True):
    """Returns the local reaching centrality of a node in a directed
    graph.

    The *local reaching centrality* of a node in a directed graph is the
    proportion of other nodes reachable from that node [1]_.

    Parameters
    ----------
    G : DiGraph
        A NetworkX DiGraph.

    v : node
        A node in the directed graph `G`.

    paths : dictionary (default=None)
        If this is not `None` it must be a dictionary representation
        of single-source shortest paths, as computed by, for example,
        :func:`networkx.shortest_path` with source node `v`. Use this
        keyword argument if you intend to invoke this function many
        times but don't want the paths to be recomputed each time.

    weight : None or string, optional (default=None)
        Attribute to use for edge weights.  If `None`, each edge weight
        is assumed to be one. A higher weight implies a stronger
        connection between nodes and a *shorter* path length.

    normalized : bool, optional (default=True)
        Whether to normalize the edge weights by the total sum of edge
        weights.

    Returns
    -------
    h : float
        The local reaching centrality of the node ``v`` in the graph
        ``G``.

    Examples
    --------
    >>> import networkx as nx
    >>> G = nx.DiGraph()
    >>> G.add_edges_from([(1, 2), (1, 3)])
    >>> nx.local_reaching_centrality(G, 3)
    0.0
    >>> G.add_edge(3, 2)
    >>> nx.local_reaching_centrality(G, 3)
    0.5

    See also
    --------
    global_reaching_centrality

    References
    ----------
    .. [1] Mones, Enys, Lilla Vicsek, and Tamás Vicsek.
           "Hierarchy Measure for Complex Networks."
           *PLoS ONE* 7.3 (2012): e33799.
           https://doi.org/10.1371/journal.pone.0033799
    """
    if paths is None:
        if nx.is_negatively_weighted(G, weight=weight):
            raise nx.NetworkXError('edge weights must be positive')
        total_weight = G.size(weight=weight)
        if total_weight <= 0:
            raise nx.NetworkXError('Size of G must be positive')
        if weight is not None:
            # Interpret weights as lengths.
            def as_distance(u, v, d):
                return total_weight / d.get(weight, 1)

            paths = nx.shortest_path(G, source=v, weight=as_distance)
        else:
            paths = nx.shortest_path(G, source=v)
    # If the graph is unweighted, simply return the proportion of nodes
    # reachable from the source node ``v``.
    if weight is None and G.is_directed():
        return (len(paths) - 1) / (len(G) - 1)
    if normalized and weight is not None:
        norm = G.size(weight=weight) / G.size()
    else:
        norm = 1
    # TODO This can be trivially parallelized.
    avgw = (_average_weight(G, path, weight=weight) for path in paths.values())
    sum_avg_weight = sum(avgw) / norm
    return sum_avg_weight / (len(G) - 1)
示例#47
0
        try:
            c.inputs.append(direction)
            c.run()
        except OutputInterrupt:
            reply = c.outputs[-1]
            test_position = add_tuples(position, direction_movement[direction])
            ship_map[test_position] = reply

            # visualise the search by uncommenting this
            # draw(ship_map, [])

            if reply == 1:
                # found an open space so add all directions from the new space to the frontier
                for d in direction_movement:
                    p = add_tuples(test_position, direction_movement[d])
                    if p not in ship_map:
                        frontier.append((test_position, d, deepcopy(c)))

            if reply == 2:
                target = test_position

    graph = build_graph(ship_map, target)
    shortest = networkx.shortest_path(graph, (0, 0), target)
    flood = networkx.eccentricity(graph, v=target)

    # draw(ship_map, shortest)

    print(f"Shortest path to oxygen is {len(shortest) - 1} commands")
    print(f"Oxygen is flooded in {flood} minutes")
    print(f"My flood took {flood_oxygen(ship_map, target)} minutes")
示例#48
0
    """Return the words example graph from the Stanford GraphBase"""
    fh = gzip.open('words4_dat.txt.gz', 'r')
    words = set()
    for line in fh.readlines():
        line = line.decode()
        if line.startswith('*'):
            continue
        w = str(line[0:4])
        words.add(w)
    return generate_graph(words)


if __name__ == '__main__':
    G = words_graph()
    print("Loaded words_dat.txt containing 2174 four-letter English words.")
    print("Two words are connected if they differ in one letter.")
    print("Graph has %d nodes with %d edges" %
          (nx.number_of_nodes(G), nx.number_of_edges(G)))
    print("%d connected components" % nx.number_connected_components(G))

    for (source, target) in [
        ('acid', 'back'),
        ('awry', 'zion'),
    ]:
        print("Shortest path between %s and %s is" % (source, target))
        try:
            sp = nx.shortest_path(G, source, target)
            for n in sp:
                print(n)
        except nx.NetworkXNoPath:
            print("None")
示例#49
0
 def choose_path(self, start, end, graph):
     path = nx.shortest_path(graph, start, end, weight="height")
     return path
示例#50
0
def determine_inlining_order(clusters, graph):
    inlinings = []
    for cluster in clusters:
        if len(cluster) == 1:
            inlinings.append(graph.subgraph(cluster))
            continue

        # The steiner tree is an minimum tree spanning a specified set of nodes in a graph. It will be used to define
        # the inlining order. The steiner_tree algorithm is only implemented for connected, undirected graphs.
        # The graph is transformed accordingly. It is not possible to make a subgraph containing only the nodes of a
        # cluster, because they may be connected by an intermediate, unimportant node.
        # Only one intermediate node is allowed when inlining two core functions. All non-core nodes, that dont call
        # a core node are removed to prevent dead ends in the inlining graph, when the directions are restored.
        reduced_graph = graph.copy()
        for node in list(graph.nodes()):
            if node not in cluster and not any(
                [callee in cluster for callee in graph.successors(node)]):
                reduced_graph.remove_node(node)

        inlining = steiner_tree(reduced_graph.to_undirected(), cluster)
        inlinings.append(inlining)

    # After the steiner tree has been created, the direction information can now be restored from the original graph
    directed_inlinings = []
    for inlining in inlinings:
        directed_inlining = nx.DiGraph()
        if len(inlining) > 1:
            for from_node in inlining:
                for to_node in graph.successors(from_node):
                    if to_node in inlining and inlining.has_edge(
                            from_node, to_node):
                        directed_inlining.add_edge(from_node, to_node)
        else:
            directed_inlining.add_node(list(inlining.nodes())[0])

        # To allow inlining, each cluster must have exactly one root. Additional roots will be moved to a new cluster.
        roots = get_root_nodes(directed_inlining)
        if len(roots) > 1:
            #Other roots may have nodes connected to them, that are not reachable from this root. Those nodes shall
            #also be moved to the new cluster.
            for other_root in roots[1:]:
                this_roots_subgraph = set([
                    node for node in directed_inlining
                    if nx.has_path(directed_inlining, roots[0], node)
                ])
                other_roots_subgraph = set([
                    node for node in directed_inlining
                    if nx.has_path(directed_inlining, other_root, node)
                ])
                other_roots_subgraph = other_roots_subgraph - this_roots_subgraph

                directed_inlining.remove_nodes_from(other_roots_subgraph)
                other_roots_inlining = determine_inlining_order(
                    [other_roots_subgraph], graph)[0]
                directed_inlinings.append(other_roots_inlining)

        root = roots[0] if len(roots) != 0 else None
        # While the undirected graph was a tree, restoring the direction information may cause a cycle when two
        # functions call each other, which leads to an infinite loop when inlining. The edge leading towards the
        # root is deleted, so all nodes remain reachable from the root
        try:

            #Search for a root node candidate if none exist due to the restoration of the direction information.
            #Due to cycles root nodes may be called by other nodes, but only if the root node itself calls that node
            #through a cycle. The first suitable root node candidate is selected
            for cycle in nx.simple_cycles(directed_inlining):
                if root is not None:
                    break
                assert len(cycle) == 2
                for node in cycle:
                    callers = set(directed_inlining.predecessors(node))
                    callees = set(directed_inlining.successors(node))
                    if len(callers.difference(callees)) == 0:
                        root = node
                        break

            for cycle in nx.simple_cycles(directed_inlining):
                # cycles cannot be longer than 2, otherwise the undirected graph would not be a tree. That is unless
                # two cycles are adjacent to each other, which is not recogniced by nx.simple_cycles
                assert len(cycle) == 2
                nodeA = cycle[0]
                nodeB = cycle[1]

                path = nx.shortest_path(directed_inlining, root, nodeB)
                if nodeA in path:
                    directed_inlining.remove_edge(nodeB, nodeA)
                else:
                    directed_inlining.remove_edge(nodeA, nodeB)

        except nx.NetworkXNoCycle:
            pass

        assert len(get_root_nodes(directed_inlining)) == 1
        directed_inlinings.append(directed_inlining)

    return directed_inlinings
示例#51
0
def func(arg):
    idx, index_elements = arg

    DELAY = 0
    DELAY_CELL = []
    print("CELL:", index_elements)
    gdf_elements = elements[index_elements]
    gdf_elements = pd.merge(gdf_elements, all_catania_OD[['u', 'v', 'ORIGIN', 'DESTINATION']], on=['u', 'v'], how='left')
    ## drop all rows with NA values
    gdf_elements = gdf_elements.dropna()
    # gdf_elements = gdf_elements.drop_duplicates(['ORIGIN', 'DESTINATION'])
    gdf_elements.reset_index(level=0, inplace=True)
    # O = list(gdf_elements.ORIGIN)
    O = list(gdf_elements.ORIGIN.unique())
    # D = list(gdf_elements.DESTINATION)
    D = list(gdf_elements.DESTINATION.unique())
    zipped_OD = zip(O, D)
    # loop ever each ORIGIN --> DESTINATION pair
    for (i, j) in zipped_OD:
        print(i,j)
        try:
            ## find shortest path based on the "cost" (time)
            ## NULL scenario
            try:
                init_shortest_OD_path_cost = nx.shortest_path(grafo, i, j, weight='VIASAT_cost')  # using cost (time)
                print("DO IT++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
                path_edges = list(zip(init_shortest_OD_path_cost,init_shortest_OD_path_cost[1:]))
                lr = nx.shortest_path_length(grafo, i,j, weight='VIASAT_cost') ## this is a time (seconds)
                lunghezza=[]
                if lr != 0:
                    for l in path_edges:
                      lunghezza.append(grafo [l[0]] [l[1]] [0]['length'])  # get only the length for each arch between 2 path edges, [0] it the key = 0
                    print("km:{0:.3f} h:{1:.3f} vm:{2:.0f}".format(sum(lunghezza)/1000, lr/3600, sum(lunghezza)/1000/lr*3600))  # units == km
                    time_OD_NULL = lr/3600  # hours
                    length_OD_NULL = sum(lunghezza)/1000  #km

                    ## update MAP with paths
                    df_nodes = pd.DataFrame(path_edges)
                    df_nodes.columns = ['u', 'v']
                    ## merge 'df_nodes' with 'gdf_edges'
                    edges_shortest_route_VIASAT_cost = pd.merge(df_nodes, gdf_edges, on=['u', 'v'], how='left')
                    edges_shortest_route_VIASAT_cost = gpd.GeoDataFrame(edges_shortest_route_VIASAT_cost)
                    edges_shortest_route_VIASAT_cost.drop_duplicates(['u', 'v'], inplace=True)

                    ## merge 'df_nodes' with 'TIME_EDGES' (sottorete)
                    edges_matched_route_OD = pd.merge(df_nodes, TIME_EDGES, on=['u', 'v'], how='left')
                    edges_matched_route_OD = gpd.GeoDataFrame(edges_matched_route_OD)
                    edges_matched_route_OD.drop_duplicates(['u', 'v'], inplace=True)

                    ## get the OD travel demand (counts/travelled time) (NULL SCENARIO, no penalty)
                    ## https://aetransport.org/public/downloads/QIoR6/498-514ec4e5be18d.pdf
                    if len(edges_matched_route_OD) > 0:
                        ## only use vehicle number (counts, MEAN from each EDGE)  and then divide by the time_OD_NULL....this is the TRAVEL_DEMAND_OD
                        counts = (pd.DataFrame(edges_matched_route_OD))['counts']
                        counts = counts.dropna()
                        ## Travel demand: divide by 4 (months), then by 30 (days), then by 16 (hours of higher traffic flow) (hours)
                        TRAVEL_DEMAND_OD = ((((counts.sum())/1)/30)/16)  # veichles/hour all over the path (trajectory) # if only for one month
                        # TRAVEL_DEMAND_OD = ((((counts.sum()) / 4) / 30) / 16)  # veichles/hour all over the path (trajectory) # if for 4 months



                ## for each edge (LINK) in the "element" assign a penalty time (disruption)
                # close a LINK (u,v pair) by adding sufficiently large penalty M (time in seconds ~ 5 hours = 18000 secs)
                for u, v, key, attr in grafo.edges(keys=True, data=True):
                    attr['VIASAT_cost_penalty'] = attr.get("VIASAT_cost")
                    zipped = zip(list(gdf_elements.u), list(gdf_elements.v))
                    if (u, v) in zipped:
                        print(u,v)
                        print("gotta!=============================================================================================")
                        # break
                        attr['VIASAT_cost_penalty'] = abs(float(attr['VIASAT_cost']) + penalty)
                        print(attr['VIASAT_cost_penalty'])
                        grafo.add_edge(u, v, key, attr_dict=attr)
                # get shortest path again...but now with the PENALTY
                shortest_OD_path_VIASAT_penalty = nx.shortest_path(grafo, i, j,
                                                                        weight='VIASAT_cost_penalty')
                path_edges = list(zip(shortest_OD_path_VIASAT_penalty, shortest_OD_path_VIASAT_penalty[1:]))
                lr = nx.shortest_path_length(grafo, i, j, weight='VIASAT_cost_penalty')
                if lr !=0:
                    lunghezza = []
                    for l in path_edges:
                        lunghezza.append(grafo[l[0]][l[1]][0][
                                             'length'])  # get only the length for each arch between 2 path edges, [0] it the key = 0
                    print("km:{0:.3f} h:{1:.3f} vm:{2:.0f}".format(sum(lunghezza) / 1000, lr / 3600,
                                                                   sum(lunghezza) / 1000 / lr * 3600))  # units == km
                    # total time from O--D with penalty
                    time_OD_penalty = lr/3600
                    length_OD_penalty = sum(lunghezza) / 1000  # km

                    # update MAP with paths
                    # add shortest path (by "VIASAT cost" to the my_map
                    df_nodes = pd.DataFrame(path_edges)
                    df_nodes.columns = ['u', 'v']

                    ## merge 'df_nodes' with 'gdf_edges'
                    edges_shortest_route_VIASAT_penalty = pd.merge(df_nodes, gdf_edges, on=['u', 'v'], how='left')
                    edges_shortest_route_VIASAT_penalty = gpd.GeoDataFrame(edges_shortest_route_VIASAT_penalty)
                    edges_shortest_route_VIASAT_penalty.drop_duplicates(['u', 'v'], inplace=True)

                ## calculate "closure impact" for each DESTINATION
                ## difference time between closure and normal conditions
                DT = time_OD_penalty - time_OD_NULL  # (hours)
                ## closure impact
                if DT < penalty / 3600:
                    DELAY = DELAY + TRAVEL_DEMAND_OD * DT * ((penalty / 3600) - DT / 2)  # vehicles*hours

                    #####---------------------------------------------###############################
                    DELAY_OD = TRAVEL_DEMAND_OD * DT * ((penalty / 3600) - DT / 2)  # vehicles*hours
                    df_OD = pd.DataFrame(edges_shortest_route_VIASAT_penalty, columns=['u', 'v'])
                    df_OD['importance'] = DELAY_OD
                    ### save into the DB
                    #####---------------------------------------------###############################
                    DELAY_CELL.append(DELAY)
                    print("TOTAL DELAY:", DELAY)
                    print("max(DELAY_CELL):", max(DELAY_CELL), "CELL:", index_elements)
                else:
                    DELAY = DELAY + TRAVEL_DEMAND_OD * (((penalty / 3600) ** 2) / 2)  # vehicles*hours
                    #####---------------------------------------------###############################
                    DELAY_OD = TRAVEL_DEMAND_OD * (((penalty / 3600) ** 2) / 2)  # vehicles*hours
                    df_OD = pd.DataFrame(edges_shortest_route_VIASAT_penalty, columns=['u', 'v'])
                    df_OD['importance'] = DELAY_OD
                    ### save into the DB
                    #####---------------------------------------------###############################
                    DELAY_CELL.append(DELAY)
                    print("TOTAL DELAY:", DELAY)
                    print("max(DELAY_CELL):", max(DELAY_CELL), "CELL:", index_elements)

                # restore initial travel time for each edge (LINK)....basically we need to remove the penalty time
                for u, v, key, attr in grafo.edges(keys=True, data=True):
                    zipped = zip(list(gdf_elements.u), list(gdf_elements.v))
                    if (u, v) in zipped:
                        # print(u,v)
                        # print("gotta!=============================================================================================")
                        attr['VIASAT_cost_penalty'] = float(attr['VIASAT_cost'])
                        grafo.add_edge(u, v, key, attr_dict=attr)

                with open("last_CELL_ID.txt", "w") as text_file:
                        text_file.write("last CELL ID: %s" % (index_elements))
            except ValueError:
                print('Contradictory paths found:', 'negative weights?')
        except (nx.NodeNotFound, nx.exception.NetworkXNoPath):
            print('O-->D NodeNotFound', 'i:', i, 'j:', j)

    ## for each (u,v) pair, create a new dataframe with field "importance"
    U = list(gdf_elements.u)
    V = list(gdf_elements.v)
    df_vulnerability = list(zip(U, V))
    df_vulnerability = pd.DataFrame(df_vulnerability, columns=['u', 'v'])
    df_vulnerability['importance'] = DELAY
    df_vulnerability['CELL'] = index_elements
    df_vulnerability['importance'] = df_vulnerability.importance.astype('int')
    df_vulnerability['importance'] = df_vulnerability['importance'].replace(np.nan, 0)
    df_vulnerability = df_vulnerability[['u', 'v', 'CELL', 'importance']]
    df_vulnerability.drop_duplicates(['u', 'v', 'CELL', 'importance'], inplace=True)

    ### Connect to a DB and populate the DB  ###
    connection = engine.connect()
    df_vulnerability.to_sql("vulnerability_" + DAY.strip() + "_" + MONTH.strip() + "_2019", con=connection, schema="public",
                            if_exists='append')
示例#52
0
def test_linear_program(network_factory, A):
    graph = network_factory(A)
    if A.shape == example_network_3x3.shape and np.allclose(
            A, example_network_3x3):
        fname = "example_3x3"
    elif A.shape == example_network_5x5.shape and np.allclose(
            A, example_network_5x5):
        fname = "example_5x5"
    else:
        fname = hash_graph(A)
    nx.write_graphml(graph, f"test/artifacts/{fname}.graphml")
    n_nodes = graph.number_of_nodes()
    hypothesis.note(f"number of nodes = {n_nodes}")
    old_weights = np.array([graph.edges[e][WEIGHT_KEY] for e in graph.edges])
    _paths = nx.shortest_path(graph, weight=WEIGHT_KEY)
    paths = [p for _, ps in _paths.items() for _, p in ps.items()]
    hypothesis.note(f"paths:\n{paths}")
    result = linear_program(graph, paths)
    hypothesis.note(
        f"number of constraints = {len(result.problem.constraints)}")
    result.penalty.value = 0
    result.problem.solve()
    assert result.problem.status == cp.OPTIMAL
    hypothesis.note(f"objective value = {result.problem.value}")
    weights = result.edge_cost.value
    np.save(f"test/artifacts/{fname}.weights.npy", weights)
    hypothesis.note(
        f"Did we recover the edge weights? {'yes' if np.allclose(weights, old_weights) else 'no'}; max error = {abs(weights - old_weights).max()}"
    )
    assert weights is not None
    assert len(weights) == graph.number_of_edges()
    assert weights.min() >= 0.0
    graph = assign_weights(graph, weights)
    for u in graph.nodes:
        assert sum(graph.edges[u, v][WEIGHT_KEY]
                   for v in graph.successors(u)) == pytest.approx(1.0)
    new_A = np.zeros_like(A)
    for i, j, data in graph.edges(data=True):
        new_A[i, j] = data[WEIGHT_KEY]
    old_A = A / A.sum(1).reshape(-1, 1)
    hypothesis.note(f"Input adjancency matrix (row-normalized):")
    hypothesis.note(old_A)
    hypothesis.note(f"Recovered adjacency matrix:")
    hypothesis.note(new_A)
    errors = [
        abs(old - new)
        for old, new in zip(old_A.reshape(-1), new_A.reshape(-1)) if old > 0
    ]
    hypothesis.note(
        f"Errors: mean={np.mean(errors)}; min={np.min(errors)}, max={np.max(errors)}"
    )
    # Run shortest paths on graph with recovered weights
    expected_path_length = nx.shortest_path_length(graph, weight=WEIGHT_KEY)
    n_path_checks = 0
    # Everything being compared is in [0, 1] so absolute tolerance is sufficient
    approx = lambda expected: pytest.approx(expected, abs=1e-6)
    for s, costs in expected_path_length:
        for t, cost in costs.items():
            n_path_checks += 1
            hypothesis.note(f"{s}->{t} path = {_paths[s][t]}")
            # These two should ALWAYS be the same
            actual_cost = result.min_trip_cost_of(
                (s, t))  # min path cost from solution
            expected_cost = path_cost(
                graph,
                _paths[s][t])  # cost of the observed path on the network
            hypothesis.note(f"least cost from {s}->{t} by edge weight: {cost}")
            hypothesis.note(
                f"least cost from {s}->{t} by lp variable: {actual_cost}")
            hypothesis.note(
                f"least cost from {s}->{t} by actual shortest path: {expected_cost}"
            )
            assert expected_cost == approx(actual_cost)
            if not CI:
                # we are interested in counter examples locally, but not on CI
                assert cost == expected_cost
        'AA_TOF_node_counts.pickle') or not os.path.exists(
            'AA_TOF_counts.pickle') or not os.path.exists(
                'AA_TOF_path_lengths.pickle'):
    counts = {}
    node_counts = {}
    path_lengths = {'length': [], 'type': []}
    d_keys = list(disease_gene_sets.keys())

    disease = 'APLASTIC ANEMIA'
    disease_genes = disease_gene_sets[disease]
    nodes = partition(disease_genes, 2)
    seeds, targets = nodes[0], nodes[1]
    for i, seed in enumerate(seeds):
        for target in targets:
            try:
                path = nx.shortest_path(G, seed, target)
            except (nx.exception.NodeNotFound, nx.exception.NetworkXNoPath):
                continue
            paths.append(path)
            path_lengths['length'].append(len(path))
            path_lengths['type'].append('normal')
            for i in range(len(path) - 1):
                p = G[path[i]][path[i + 1]]['predicate_name']
                # add proportional weight
                if p in counts:
                    counts[p] += 1
                else:
                    counts[p] = 1
            # count the number times nodes occur in the paths
            for node in path:
                if node in node_counts:
示例#54
0
    else:  #read speed from way class dictionary
        speedlist = way_dict.get(attr["highway"])
        speed = speedlist[0] * 1000 / 3600
        attr['cost'] = attr.get("length") / speed
        #print(attr.get("highway"), speedlist[0], attr.get("cost"),'-----------')
        G2.add_edge(u, v, key, attr_dict=attr)
'''        
for u,v,key,attr in G2.edges(keys=True,data=True):
    if attr['highway'] in highways_to_keep:
        H.add_edge(u,v,key,attr_dict=attr)
H.graph = G2.graph
'''
#G2 = nx.Graph(G1)
from_n = np.random.choice(G2.nodes)
to_n = np.random.choice(G2.nodes)
route = nx.shortest_path(G2, from_n, to_n, weight='cost')
lr = nx.shortest_path_length(G2, from_n, to_n, weight='cost')
print(lr)
route = nx.shortest_path(G, from_n, to_n, weight='length')
path_edges = list(zip(route, route[1:]))
lunghezza = []
#route_length_km = sum([G2.edge[u][v][0]['length'] for u, v in zip(route, route[1:])]) / 1000.
for l in path_edges:
    lunghezza.append(G2[l[0]][l[1]][0]['length'])
print("km:{0:.3f} h:{1:.3f} vm:{2:.0f}".format(
    sum(lunghezza) / 1000, lr / 3600,
    sum(lunghezza) / 1000 / lr * 3600))
route1 = nx.dijkstra_path(G2, from_n, to_n, weight='cost')
lr1 = nx.dijkstra_path_length(G2, from_n, to_n, weight='cost')
#route2 = nx.astar_path(G2,from_n,to_n,weight='length')
#lr2=nx.astar_path_length(G2, from_n,to_n,weight='length')
示例#55
0
    def __init__(self,
                 run,
                 graph_dir='../data/annotated/whole_v4',
                 n_components=8,
                 min_count=50,
                 max_var=0.1,
                 min_edge=50,
                 clust_algo='k_means',
                 aggregate=-1,
                 optimize=True,
                 max_graphs=None,
                 nc_only=True):
        # General
        self.run = run
        self.graph_dir = graph_dir
        # Nodes parameters
        self.n_components = n_components
        self.min_count = min_count
        self.max_var = max_var

        # Edges parameters
        self.min_edge = min_edge

        # BUILD MNODES
        model_output = inference_on_list(self.run,
                                         self.graph_dir,
                                         os.listdir(self.graph_dir),
                                         max_graphs=max_graphs,
                                         nc_only=nc_only)

        self.node_map = model_output['node_to_zind']
        self.reversed_node_map = {
            value: key
            for key, value in self.node_map.items()
        }

        Z = model_output['Z']
        # self.Z = Z

        # Extract the non canonical edges ids and compute distances between them
        nc_nodes = set()
        nc_edges = set()
        for graph_name in os.listdir(self.graph_dir)[:max_graphs]:
            graph_path = os.path.join(self.graph_dir, graph_name)
            g = pickle.load(open(graph_path, 'rb'))['graph'].to_undirected()
            local_nodes = set()
            for source, target, label in g.edges(data='label'):
                if label not in ['CWW', 'B53']:
                    local_nodes.add((source, self.node_map[source]))
                    local_nodes.add((target, self.node_map[target]))
            for source, sid in local_nodes:
                nc_nodes.add(sid)
            for (source, sid), (target,
                                tid) in itertools.combinations(local_nodes, 2):
                try:
                    distance = len(nx.shortest_path(g, source, target))
                    # TODO : Find better cutoff
                    if distance < 7:
                        nc_edges.add((sid, tid, distance))
                except nx.NetworkXNoPath:
                    pass

        list_ids = sorted(list(nc_nodes))
        extracted_embeddings = Z[list_ids]

        clust_info = cluster(extracted_embeddings,
                             algo=clust_algo,
                             optimize=optimize,
                             n_clusters=n_components)

        distance = True

        self.cluster_model = clust_info['model']
        self.n_components = clust_info['n_components']
        self.components = clust_info['components']
        self.labels = clust_info['labels']
        self.centers = clust_info['centers']
        if distance:
            dists = cdist(Z, self.centers)
            scores = np.take_along_axis(dists, self.labels[:, None], axis=1)
        else:
            probas = clust_info['scores']
            scores = np.take_along_axis(probas, self.labels[:, None], axis=1)
        self.id_to_score = {
            ind: scores[ind]
            for ind, _ in self.reversed_node_map.items()
        }
        self.spread = clust_info['spread']

        self.graph = nx.Graph()

        # don't keep clusters that are too sparse or not populated enough
        # keep_clusts = cluster_filter(clusts, cov, self.min_count, self.max_var)
        # keep_clusts = set(keep_clusts)
        # print(f">>> keeping {len(self.clusts)} clusters")
        for id_clust in self.components:
            self.graph.add_node(id_clust, node_ids=set())

        # Here there needs to be a modification to avoid putting wrong nodes
        for index, clust in enumerate(self.labels):
            if clust in list(set(self.labels)):
                nc_id = list_ids[index]
                self.graph.nodes[clust]['node_ids'].add(nc_id)

        id_to_clust = dict(zip(list_ids, self.labels))

        # BUILD MEDGES
        for sid, tid, distance in nc_edges:
            # Filter out the nodes that link a cluster that got removed
            start_clust, end_clust = id_to_clust[sid], id_to_clust[tid]
            if start_clust in self.clusts and end_clust in self.clusts:
                if not self.graph.has_edge(start_clust, end_clust):
                    self.graph.add_edge(start_clust, end_clust, edge_set=set())
                self.graph.edges[(start_clust, end_clust)]['edge_set'].add(
                    (sid, tid, distance))

        # Filtering and hashing
        to_remove = list()
        for start, end, edge_set in self.graph.edges(data='edge_set'):
            # remove from adjacency
            if len(edge_set) < self.min_edge:
                to_remove.append((start, end))
        for start, end in to_remove:
            self.graph.remove_edge(start, end)
    
    trip_start_node_id = np.random.choice(node_id_list)
    trip_target_node_id = np.random.choice(node_id_list)
    while trip_start_node_id == trip_target_node_id:
        trip_target_node_id = np.random.choice(node_id_list)

    trip_has_path = has_path(road_network, trip_start_node_id, trip_target_node_id)
    while trip_has_path is False:
        print('we have a dud path, TRIP id: %i, nodes: %i, %i' % (passenger_trip_id, trip_start_node_id, trip_target_node_id))
        trip_target_node_id = np.random.choice(node_id_list)
        trip_start_node_id = np.random.choice(node_id_list)
        trip_has_path = has_path(road_network, trip_start_node_id, trip_target_node_id)

    if trip_has_path is True:

        passenger_trip_waypoints = shortest_path(road_network, trip_start_node_id, trip_target_node_id, weight='length')

        passenger_trip_start_pos = [node_longitude_dict[trip_start_node_id],node_latitude_dict[trip_start_node_id]]
        passenger_trip_destination_pos = [node_longitude_dict[trip_target_node_id],node_latitude_dict[trip_target_node_id]]
        
        start_passenger_trip_longitude_array[passenger_trip_id] = node_longitude_dict[trip_start_node_id]
        start_passenger_trip_latitude_array[passenger_trip_id] = node_latitude_dict[trip_start_node_id]

        passenger_trip_dict[passenger_trip_id] = {'start_pos':passenger_trip_start_pos, 'start_node':trip_start_node_id, 'dest_pos':passenger_trip_destination_pos, 'dest_node':trip_target_node_id, 'route':passenger_trip_waypoints, 'route_len':len(passenger_trip_waypoints)}

### save passenger trip data
with open((data_file_path+('%s_passenger_trip_data_dict_%s.pickle' % (CITY_NAME, SIM_RUN_DATE))), 'wb') as handle:
    pickle.dump(passenger_trip_dict, handle, protocol = pickle.HIGHEST_PROTOCOL)


    def predict_edges(self, edge_list="soundcloud_edge_list.txt"):
        original_network = nx.read_edgelist(edge_list)
        scores = [[] for i in range(3)]
        ys = [[] for i in range(3)]
        degree_product_score = {}
        common_neighbor_score = {}
        geodesic_path_score = {}
        if len(self.network.degree().values()) != 0:
            max_degree = max(self.network.degree().values())
            min_degree = min(self.network.degree().values())
        else:
            max_degree = 0
            min_degree = 0

        # Cycle through each of the nodes in the graph
        for x, i in enumerate(self.network.nodes()):
            # Calculate the degree of the current node i
            i_degree = self.network.degree(i)
            # Find the set of neighbors for i
            i_neighbors = self.network.neighbors(i)
            # Cycle through all of the possible connections each node could have
            for j in self.network.nodes():
                # Cannot connect to itself
                if i != j:
                    # If an edge doesn't exist
                    if not (i, j) in self.network.edges():
                        # Calculate the degree of the current node j
                        j_degree = self.network.degree(j)
                        # Find the set of neighbors for j
                        j_neighbors = self.network.neighbors(j)

                        # Find the degree product score
                        if max_degree and max_degree - min_degree:
                            degree_product_score[
                                i, j] = (i_degree * j_degree - min_degree) / (
                                    max_degree * max_degree - min_degree)
                        else:
                            degree_product_score[i, j] = 0
                        scores[0].append(degree_product_score[i, j])

                        # Find the normalized common neighbor score
                        if (len(list(set(i_neighbors) | set(j_neighbors)))):
                            common_neighbor_score[i, j] = (len(
                                list(set(i_neighbors) & set(j_neighbors))
                            )) / (len(
                                list(set(i_neighbors) | set(j_neighbors))))
                            scores[1].append(common_neighbor_score[i, j])
                        else:
                            scores[1].append(random.random() /
                                             len(self.network.nodes()))

                        # Find the shortest path between nodes i and j and compute the socre
                        try:
                            geodesic_path_score[i, j] = 1 / (
                                len(nx.shortest_path(self.network, i, j)) - 1)
                            scores[2].append(geodesic_path_score[i, j])
                        except nx.NetworkXNoPath:
                            scores[2].append(random.random() /
                                             len(self.network.nodes()))

                        # Compute the true y's for the AUC function
                        if original_network.has_edge(i, j):
                            ys[0].append(1)
                            ys[1].append(1)
                            ys[2].append(1)
                            # print("Degree score: ", scores[0][-1])
                            # print("Common Neighbors score: ", scores[1][-1])
                            # print("Shortest path score: ", scores[2][-1])
                        else:
                            ys[0].append(0)
                            ys[1].append(0)
                            ys[2].append(0)

        # Calculate the AUC for each of the heuristics
        fpr = [[] for i in range(3)]
        tpr = [[] for i in range(3)]
        accs = [[] for i in range(3)]
        for i in range(3):
            if len(scores[i]) != 0 and len(ys[i]) != 0:
                zipped = zip(scores[i], ys[i])
                sorted_zipped = sorted(zipped)
                new_scores = [score[0] for score in sorted_zipped]
                new_ys = [y[1] for y in sorted_zipped]
                fpr[i], tpr[i], thresholds = metrics.roc_curve(new_ys,
                                                               new_scores,
                                                               pos_label=1)
                accs[i] = metrics.auc(fpr[i], tpr[i])
            else:
                accs[i] = 0

        print("Accs: ", accs)
        return accs[0], accs[1], accs[2]
 def shortest_path(self, node):
     return nx.shortest_path(self.tree,
                             source=self.start,
                             target=tuple(node))
示例#59
0
                    G.add_edge((x, y), (x, y-1))

            elif char.isalpha():  # check if portal already labeled
                add_portal(x, y)

    # add the portal edges to the graph
    for name in portals:
        if len(portals[name]) == 2:  # not start or finish
            G.add_edge(portals[name][0], portals[name][1])

    # save start and finish points
    start = portals['AA'][0]
    finish = portals['ZZ'][0]

    # use nx shortest_path to find between start and finish
    shortest_path = nx.shortest_path(G, start, finish)
    print("PART 1")
    print("Path: ", shortest_path)
    print("Length: ", len(shortest_path) - 1)  # node sea_monster_count, subtract one for start

# part 2
else:
    G = nx.Graph()
    portals = {}  # key will be name, will have list of two children for ends of portal

    # populate maze - same as part 1 but with addition of layers
    for y, row in enumerate(maze):
        for x, char in enumerate(row):
            for layer in range(35):  # create 35 layers of nodes - each node has extra layer
                if char == '.':
                    G.add_node((x, y, layer))
示例#60
0
def beam_search_with_heuristics(model,
                                orig_item,
                                preproc_item,
                                beam_size,
                                max_steps,
                                from_cond=True):
    """
    Find the valid FROM clasue with beam search
    """
    inference_state, next_choices = model.begin_inference(
        orig_item, preproc_item)
    beam = [Hypothesis4Filtering(inference_state, next_choices)]

    cached_finished_seqs = []  # cache filtered trajectories
    beam_prefix = beam
    while True:
        # search prefixes with beam search
        prefixes2fill_from = []
        for step in range(max_steps):
            if len(prefixes2fill_from) >= beam_size:
                break

            candidates = []
            for hyp in beam_prefix:
                # print(hyp.inference_state.cur_item.state, hyp.inference_state.cur_item.node_type )
                if hyp.inference_state.cur_item.state == TreeTraversal.State.CHILDREN_APPLY \
                        and hyp.inference_state.cur_item.node_type == "from":
                    prefixes2fill_from.append(hyp)
                else:
                    candidates += [(hyp, choice, choice_score.item(),
                                    hyp.score + choice_score.item())
                                   for choice, choice_score in hyp.next_choices
                                   ]
            candidates.sort(key=operator.itemgetter(3), reverse=True)
            candidates = candidates[:beam_size - len(prefixes2fill_from)]

            # Create the new hypotheses from the expansions
            beam_prefix = []
            for hyp, choice, choice_score, cum_score in candidates:
                inference_state = hyp.inference_state.clone()

                # cache column choice
                column_history = hyp.column_history[:]
                if hyp.inference_state.cur_item.state == TreeTraversal.State.POINTER_APPLY and \
                        hyp.inference_state.cur_item.node_type == "column":
                    column_history = column_history + [choice]

                next_choices = inference_state.step(choice)
                assert next_choices is not None
                beam_prefix.append(
                    Hypothesis4Filtering(inference_state, next_choices,
                                         cum_score,
                                         hyp.choice_history + [choice],
                                         hyp.score_history + [choice_score],
                                         column_history))

        prefixes2fill_from.sort(key=operator.attrgetter('score'), reverse=True)
        # assert len(prefixes) == beam_size

        # emuerating
        beam_from = prefixes2fill_from
        max_size = 6
        unfiltered_finished = []
        prefixes_unfinished = []
        for step in range(max_steps):
            if len(unfiltered_finished) + len(prefixes_unfinished) > max_size:
                break

            candidates = []
            for hyp in beam_from:
                if step > 0 and hyp.inference_state.cur_item.state == TreeTraversal.State.CHILDREN_APPLY \
                        and hyp.inference_state.cur_item.node_type == "from":
                    prefixes_unfinished.append(hyp)
                else:
                    candidates += [(hyp, choice, choice_score.item(),
                                    hyp.score + choice_score.item())
                                   for choice, choice_score in hyp.next_choices
                                   ]
            candidates.sort(key=operator.itemgetter(3), reverse=True)
            candidates = candidates[:max_size - len(prefixes_unfinished)]

            beam_from = []
            for hyp, choice, choice_score, cum_score in candidates:
                inference_state = hyp.inference_state.clone()

                # cache table choice
                table_history = hyp.table_history[:]
                key_column_history = hyp.key_column_history[:]
                if hyp.inference_state.cur_item.state == TreeTraversal.State.POINTER_APPLY:
                    if hyp.inference_state.cur_item.node_type == "table":
                        table_history = table_history + [choice]
                    elif hyp.inference_state.cur_item.node_type == "column":
                        key_column_history = key_column_history + [choice]

                next_choices = inference_state.step(choice)
                if next_choices is None:
                    unfiltered_finished.append(
                        Hypothesis4Filtering(
                            inference_state, None, cum_score,
                            hyp.choice_history + [choice],
                            hyp.score_history + [choice_score],
                            hyp.column_history, table_history,
                            key_column_history))
                else:
                    beam_from.append(
                        Hypothesis4Filtering(
                            inference_state, next_choices, cum_score,
                            hyp.choice_history + [choice],
                            hyp.score_history + [choice_score],
                            hyp.column_history, table_history,
                            key_column_history))

        unfiltered_finished.sort(key=operator.attrgetter('score'),
                                 reverse=True)

        # filtering
        filtered_finished = []
        for hyp in unfiltered_finished:
            mentioned_column_ids = set(hyp.column_history)
            mentioned_key_column_ids = set(hyp.key_column_history)
            mentioned_table_ids = set(hyp.table_history)

            # duplicate tables
            if len(mentioned_table_ids) != len(hyp.table_history):
                continue

            # the foreign key should be correctly used
            # NOTE: the new version does not predict conditions in FROM clause anymore
            if from_cond:
                covered_tables = set()
                must_include_key_columns = set()
                candidate_table_ids = sorted(mentioned_table_ids)
                start_table_id = candidate_table_ids[0]
                for table_id in candidate_table_ids[1:]:
                    if table_id in covered_tables:
                        continue
                    try:
                        path = nx.shortest_path(
                            orig_item.schema.foreign_key_graph,
                            source=start_table_id,
                            target=table_id)
                    except (nx.NetworkXNoPath, nx.NodeNotFound):
                        covered_tables.add(table_id)
                        continue

                    for source_table_id, target_table_id in zip(
                            path, path[1:]):
                        if target_table_id in covered_tables:
                            continue
                        if target_table_id not in mentioned_table_ids:
                            continue
                        col1, col2 = orig_item.schema.foreign_key_graph[
                            source_table_id][target_table_id]['columns']
                        must_include_key_columns.add(col1)
                        must_include_key_columns.add(col2)
                if not must_include_key_columns == mentioned_key_column_ids:
                    continue

            # tables whose columns are mentioned should also exist
            must_table_ids = set()
            for col in mentioned_column_ids:
                tab_ = orig_item.schema.columns[col].table
                if tab_ is not None:
                    must_table_ids.add(tab_.id)
            if not must_table_ids.issubset(mentioned_table_ids):
                continue

            filtered_finished.append(hyp)

        filtered_finished.sort(key=operator.attrgetter('score'), reverse=True)
        # filtered.sort(key=lambda x: x.score / len(x.choice_history), reverse=True)
        prefixes_unfinished.sort(key=operator.attrgetter('score'),
                                 reverse=True)
        # new_prefixes.sort(key=lambda x: x.score / len(x.choice_history), reverse=True)

        prefixes_, filtered_ = merge_beams(prefixes_unfinished,
                                           filtered_finished, beam_size)

        if filtered_:
            cached_finished_seqs = cached_finished_seqs + filtered_
            cached_finished_seqs.sort(key=operator.attrgetter('score'),
                                      reverse=True)

        if prefixes_ and len(prefixes_[0].choice_history) < 200:
            beam_prefix = prefixes_
            for hyp in beam_prefix:
                hyp.table_history = []
                hyp.column_history = []
                hyp.key_column_history = []
        elif cached_finished_seqs:
            return cached_finished_seqs[:beam_size]
        else:
            return unfiltered_finished[:beam_size]