示例#1
0
    def setUp(self):

        self.T1 = self.graph()

        self.T2 = self.graph()
        self.T2.add_node(1)

        self.T3 = self.graph()
        self.T3.add_nodes_from(range(5))
        edges = [(i,i+1) for i in range(4)]
        self.T3.add_edges_from(edges)

        self.T5 = self.multigraph()
        self.T5.add_nodes_from(range(5))
        edges = [(i,i+1) for i in range(4)]
        self.T5.add_edges_from(edges)

        self.T6 = self.graph()
        self.T6.add_nodes_from([6,7])
        self.T6.add_edge(6,7)

        self.F1 = nx.compose(self.T6, self.T3)

        self.N4 = self.graph()
        self.N4.add_node(1)
        self.N4.add_edge(1,1)

        self.N5 = self.graph()
        self.N5.add_nodes_from(range(5))

        self.N6 = self.graph()
        self.N6.add_nodes_from(range(3))
        self.N6.add_edges_from([(0,1),(1,2),(2,0)])

        self.NF1 = nx.compose(self.T6,self.N6)
def load_semantic_network():
	# load several files into a single nx graph
	S = load_file_to_graph("../ontologies/stuff_ontology.txt")
	A = load_file_to_graph("../ontologies/attribute_ontology.txt")
	C = load_file_to_graph("../ontologies/context_knowledge.txt")
	X = load_file_to_graph("../ontologies/activity_ontology.txt")
	
	G = nx.compose(S,A)
	G = nx.compose(G,C)
	G = nx.compose(G,X)
	return G
示例#3
0
    def outputGraph(self, filename, _filter=None):
        if filename not in self._graphTime:
            self._graphTime[filename] = 0

        if (time.time() - self._graphTime[filename]) > 10:
            # print 'pre-Update Graph: %s' % filename
            G = nx.DiGraph()
            plt.clf()
            for edge in self._edges:
                if _filter is None:
                    G.add_edge(edge[0], edge[1])
                elif _filter(edge[0]) or _filter(edge[1]):
                    G.add_edge(edge[0], edge[1])

            try:
                G1 = dfs_tree(G, u"0015")
                G2 = dfs_tree(G, u"0013")
                G3 = nx.compose(G1, G2)
                G4 = dfs_tree(G, u"0017")
                G = nx.compose(G3, G4)
            except:
                pass

            relabel = {}
            newToOld = {}
            for n in G.nodes():
                if n in self._berths and self._berths[n] is not None:
                    relabel[n] = self._berths[n]
                    newToOld[self._berths[n]] = n
            nx.relabel_nodes(G, relabel, False)

            colours = []
            for n in G.nodes():
                if n in newToOld:
                    n = newToOld[n]
                if n in self._berths and self._berths[n] is not None:
                    colours.append("r")
                else:
                    colours.append("g")

            pos = nx.graphviz_layout(G, prog="dot")

            nx.draw_networkx_nodes(G, pos, node_color=colours, node_shape="s", node_size=900)
            nx.draw_networkx_edges(G, pos)
            nx.draw_networkx_labels(G, pos)

            fig = matplotlib.pyplot.gcf()
            fig.set_size_inches(16.0, 25.0)
            plt.axis("off")

            self.outputFile(plt, filename)

            self._graphTime[filename] = time.time()
示例#4
0
def load_semantic_network():
	# load several files into a single nx graph
	filePath = os.path.dirname(os.path.abspath(__file__))
	S = load_file_to_graph(filePath +"/ontologies/stuff_ontology.txt")
	A = load_file_to_graph(filePath + "/ontologies/attribute_ontology.txt")
	C = load_file_to_graph(filePath + "/ontologies/context_knowledge.txt")
	X = load_file_to_graph(filePath + "/ontologies/activity_ontology.txt")
	
	G = nx.compose(S,A)
	G = nx.compose(G,C)
	G = nx.compose(G,X)
	return G
示例#5
0
def snowball_round(G,seeds,myspace=False):
    """Function takes a base graph, and a list of seeds
    and builds out the network data by accessing the
    Google SocialGraph API."""
    t0=time()
    if myspace:
        seeds=get_myspace_url(seeds)
    sb_data=[]
    for s in range(0,len(seeds)):
        s_sg=get_sg(seeds[s])
        new_ego,pen=create_egonet(s_sg) # Create ego net of seed
        # Compose new network data into old abse graph
        for p in pen:
                sb_data.append(p)
        if s<1:
            sb_net=nx.compose(G,new_ego)
        else:
            sb_net=nx.compose(new_ego,sb_net)
        del new_ego
        if s==round(len(seeds)*0.2):
        # Simple progress output, useful for long jobs
            sb_net.name='20% complete'
            nx.info(sb_net)
            print 'AT: '+strftime('%m/%d/%Y, %H:%M:%S', gmtime())
            print ''
        if s==round(len(seeds)*0.4):
            sb_net.name='40% complete'
            nx.info(sb_net)
            print 'AT: '+strftime('%m/%d/%Y, %H:%M:%S', gmtime())
            print ''
        if s==round(len(seeds)*0.6):
            sb_net.name='60% complete'
            nx.info(sb_net)
            print 'AT: '+strftime('%m/%d/%Y, %H:%M:%S', gmtime())
            print ''
        if s==round(len(seeds)*0.8):
            sb_net.name='80% complete'
            nx.info(sb_net)
            print 'AT: '+strftime('%m/%d/%Y, %H:%M:%S', gmtime())
            print ''
        if s==len(seeds)-1:
            print 'NEW NETWORK COMPLETE!'
            print 'AT: '+strftime('%m/%d/%Y, %H:%M:%S', gmtime())
            sb_net.name=G.name+'--> '
    # Return newly discovered seeds
    sb_data=array(sb_data)
    sb_data.flatten()
    sb_data=unique(sb_data)
    nx.info(sb_net)
    return sb_net,sb_data
示例#6
0
def test_compose_multigraph():
    G = nx.MultiGraph()
    G.add_edge(1, 2, key=0)
    G.add_edge(1, 2, key=1)
    H = nx.MultiGraph()
    H.add_edge(3, 4, key=0)
    H.add_edge(3, 4, key=1)
    GH = nx.compose(G, H)
    assert_equal(set(GH), set(G) | set(H))
    assert_equal(set(GH.edges(keys=True)), set(G.edges(keys=True)) | set(H.edges(keys=True)))
    H.add_edge(1, 2, key=2)
    GH = nx.compose(G, H)
    assert_equal(set(GH), set(G) | set(H))
    assert_equal(set(GH.edges(keys=True)), set(G.edges(keys=True)) | set(H.edges(keys=True)))
示例#7
0
def main():
    '''
    This is the main function
    http://networkx.lanl.gov/reference/algorithms.operators.html
    '''    
    # Get distance matrices
    walking_times = read_weights_from_file(walking_time_filename)  
    shuttle_times = read_weights_from_file(shuttle_time_filename)
    shuttle_connection_times = read_weights_from_file(shuttle_connection_time_filename)
    outdoorness_matrix = read_weights_from_file(outdoorness_filename)
    #print outdoorness_matrix
    
    # Add penalties
    shuttle_connection_times = apply_penalty(shuttle_connection_times, shuttle_penalty/2, 'add') # /2 because we get in and out the shuttle, so we don't want to have a double penalty
    walking_times = apply_penalty(walking_times, walking_penalty , 'multiply') 
    walking_times = apply_outdoor_penalty(walking_times, outdoorness_matrix, outdoorness_penalty)
    
    # Create subgraphs
    walking_graph = nx.DiGraph(data=walking_times)
    #print G.edges(data=True)
    walking_graph = nx.relabel_nodes(walking_graph,convert_list_to_dict(read_node_labels(walking_time_filename)))    
    print 'walking_graph', walking_graph.edges(data=True)
    
    shuttle_graph = nx.DiGraph(data=shuttle_times)
    shuttle_graph = nx.relabel_nodes(shuttle_graph,convert_list_to_dict(read_node_labels(shuttle_time_filename)))
    print 'shuttle_graph', shuttle_graph.edges(data=True)
    
    shuttle_connection_graph = nx.DiGraph(data=shuttle_connection_times)
    shuttle_connection_graph = nx.relabel_nodes(shuttle_connection_graph,convert_list_to_dict(read_node_labels(shuttle_connection_time_filename)))
    print 'shuttle_connection_graph', shuttle_connection_graph.edges(data=True)
    
    # Create main graph
    main_graph = nx.compose(walking_graph, shuttle_graph)
    print 'main_graph', main_graph.edges(data=True)    
    main_graph = nx.compose(main_graph, shuttle_connection_graph)
    print 'main_graph', main_graph.edges(data=True)
    
    # Compute the shortest paths and path lengths between nodes in the graph.
    # http://networkx.lanl.gov/reference/algorithms.shortest_paths.html
    compute_shortest_path(main_graph, '32', 'NW86')
    compute_shortest_path(main_graph, 'W7', 'W20')
    compute_shortest_path(main_graph, '50', '35')
    #print nx.dijkstra_predecessor_and_distance(main_graph, 'NW86')
    
    # Compute shortest paths and lengths in a weighted graph G. TODO: Return farthest region.
    print nx.single_source_dijkstra(main_graph, '32', 'NW86')
    
    # Compute KSP (k-shortest paths) using https://github.com/Pent00/YenKSP
    yenksp_digraph = convert_nx_digraph_into_yenksp_digraph(main_graph)
    print ksp_yen(yenksp_digraph, 'NW86', '32', 2)
示例#8
0
    def graph(self):
        graph = nx.DiGraph()
        # Link all antecedents to me by decomposing
        #  FuzzyVariableTermAggregate down to just FuzzyVariableTerms
        for t in self.antecedent_terms:
            assert isinstance(t, FuzzyVariableTerm)
            graph.add_path([t, self])
            graph = nx.compose(graph, t.parent_variable.graph)

        # Link all consequents from me
        for c in self.consequent:
            assert isinstance(c, WeightedConsequent)
            graph.add_path([self, c.term])
            graph = nx.compose(graph, c.term.parent_variable.graph)
        return graph
示例#9
0
文件: context.py 项目: MGM-KTH/kexbot
 def search(self,concept):
     global G, mentioned_concepts
     new_subgraph = nx.DiGraph()
     # ('ConceptuallyRelatedTo',1), ('AtLocation',2), ('DerivedFrom', 1)
     for relation, lim in [('IsA',3), ('PartOf', 2),('HasContext', 2)]:
         for node in mentioned_concepts:
             json_document = json.dumps(cnet.search(rel=relation, start=node, end=concept, limit=lim))
             decoder = json.JSONDecoder()
             json_obj = decoder.decode(json_document)
             new_subgraph = nx.compose(new_subgraph,self.parse_json_to_graph(json_obj, start=node, end=concept))
             json_document = json.dumps(cnet.search(rel=relation,start=concept, end=node, limit=lim))
             decoder = json.JSONDecoder()
             json_obj = decoder.decode(json_document)
             new_subgraph = nx.compose(new_subgraph, self.parse_json_to_graph(json_obj, start=concept, end=node))
     G = nx.compose(G,new_subgraph)    
def construct_RankTh(fCorr, ffMRI):
    #
    # a function to generate rank-based thresholding networks
    #
    #
    # some parameters
    Target_d = [3, 4, 5, 6, 8, 10, 15, 20, 30]
    # Output directory is relative to fCorr directory
    CorrDir, fCorrMat = os.path.split(fCorr)
    BaseDir, CorrDirName = os.path.split(CorrDir)
    OutBase = os.path.join(BaseDir, 'Adjlist')
    if not os.path.exists(OutBase):
        os.makedirs(OutBase)
    OutDir = os.path.join(OutBase, 'Network_RankTh')
    if not os.path.exists(OutDir):
        os.makedirs(OutDir)
    # loading the correlation matrix
    R, NodeInd = NetUtil.load_corrmat_sparse(fCorr, ffMRI)
    # loop for generating rank-th networks
    for iTh in range(len(Target_d)):
        print "Generating a network with threshold d=" + str(Target_d[iTh])
        # generating the network
        if iTh==0:
            G, trR = NetUtil.net_builder_RankTh(R, NodeInd, Target_d[iTh])
            R = [] # releasing the memory
        else:
            # just generate the difference between the previous threshold.
            # then combine the resulting graphs
            deltaG, trR = NetUtil.net_builder_RankTh(trR, NodeInd, 
                                                Target_d[iTh]-Target_d[iTh-1])
            G = nx.compose(G, deltaG)
        # saving the network
        fNetFile = "Network_d" + str(Target_d[iTh]) + ".adjlist"
        fNet = os.path.join(OutDir,fNetFile)
        nx.write_adjlist(G, fNet)
示例#11
0
 def dumpjson_graph(self):
     assert self.COMM.rank==0        
     import json
     import networkx as nx
     from networkx.readwrite import json_graph
     h=self.h
     #import pickle
     #json_graph.node_link_graph
     #Create a whole network of both transmitter types.
     self.global_whole_net=nx.compose(self.global_ecg, self.global_icg)
     self.global_whole_net.remove_nodes_from(nx.isolates(self.global_whole_net))
     self.global_icg.remove_nodes_from(nx.isolates(self.global_icg))
     self.global_ecg.remove_nodes_from(nx.isolates(self.global_ecg))
     
     d =[]
     whole=nx.to_numpy_matrix(self.global_whole_net)  
     #TODO sort whole (network) here in Python, as Python is arguably easier to understand than JS. 
     d.append(whole.tolist()) 
     #d.append(self.global_whole_net.tolist())
     #d.append(json_graph.node_link_data(self.global_whole_net))                 
     d.append(self.global_namedict)
     json.dump(d, open('web/js/global_whole_network.json','w'))
     d=json.load(open('web/js/global_whole_network.json','r'))
     #read the object just to prove that is readable.
     d=None #destroy the object.    
     print('Wrote JSON data to web/js/network.json')
 
     print('Wrote node-link JSON data to web/js/network.json')
示例#12
0
def jaccard(network1, network2, d="directed"):
    """Returns Jaccard similarity coefficient and
    distance of two different networks of the same
    sets of nodes.

    Parameters
    ----------
    network1 : first network edge list
    network2 : second network edge list
    d : directed or undirected
        type of graph

    Returns
    -------
    j : float
        Jaccard similarity coefficient
    jd : float
        Jaccard distance
    """
    if d == "directed":
        g1 = nx.read_weighted_edgelist(network1, create_using=nx.DiGraph())
        g2 = nx.read_weighted_edgelist(network2, create_using=nx.DiGraph())
    elif d == "undirected":
        g1 = nx.read_weighted_edgelist(network1)
        g2 = nx.read_weighted_edgelist(network2)

    union = nx.compose(g1, g2)
    inter = nx.intersection(g1, g2)

    j = float(inter.number_of_edges()) / float(union.number_of_edges())
    jd = 1 - j

    return j, jd
示例#13
0
def total_overlap(network1, network2, d="directed"):
    """Returns value of total overlap measure for
    two given networks of the same sets of nodes.

    Parameters
    ----------
    network1 : first network edge list
    network2 : second network edge list
    d : directed or undirected
        type of graph

    Returns
    -------
    t_overlap : float
    """
    if d == "directed":
        g1 = nx.read_weighted_edgelist(network1, create_using=nx.DiGraph())
        g2 = nx.read_weighted_edgelist(network2, create_using=nx.DiGraph())
    elif d == "undirected":
        g1 = nx.read_weighted_edgelist(network1)
        g2 = nx.read_weighted_edgelist(network2)

    overlap = 0
    for i in g1.edges():
        if g2.has_edge(i[0], i[1]):
            overlap += 1

    t_overlap = (float(overlap) / float(nx.compose(g1, g2).number_of_edges()))

    return t_overlap
示例#14
0
def show_nca(name1, name2, levels=0):
	nca_list=nca(name1, name2)
	G=json_graph.load(open("static/local_instance.json"))
	H=nx.DiGraph()
	for each in nca_list:
		anc_path=nx.compose(color_path(each[0],'green'),color_path(each[1],'yellow'))
		H=nx.compose(H,anc_path)

	for i in range(levels):
		H=expand_graph(H)

	for each in nca_list:
		H.node[each[0][0]]['color']='red' #color the nca different

	data=json_graph.dumps(H)
	return data
示例#15
0
    def _process_rules(self, rules):
        # Recursive funcion to process rules in the correct firing order
        len_rules = len(rules)
        skipped_rules = []
        while len(rules) > 0:
            rule = rules.pop(0)
            if self._can_calc_rule(rule):
                yield rule
                # Add rule to calced graph
                self.calced_graph = nx.compose(self.calced_graph, rule.graph)
            else:
                # We have not calculated the predecsors for this rule yet.
                #  Skip it for now
                skipped_rules.append(rule)

        if len(skipped_rules) == 0:
            # All done!
            raise StopIteration()
        else:
            if len(skipped_rules) == len_rules:
                # Avoid being caught in an infinite loop
                raise RuntimeError("Unable to resolve rule execution order. "
                                   "The most likely reason is two or more "
                                   "rules that depend on each other.\n"
                                   "Please check the rule graph for loops.")
            else:
                # Recurse across the skipped rules
                for r in self._process_rules(skipped_rules):
                    yield r
def createMergedGraph(groupSampleDict, processedDataDir, rawModelDir):

    print 'Merging genomes from specified taxonomic group'

# Loop over the keys of the dictionary, one for each group
    for group in groupSampleDict:

# Create an empty graph object
        mergedGraph = nx.DiGraph()

# Read in the graph of the group and merge with the graph from the previous
# iteration
        for sample in groupSampleDict[group]:

# Read in adjacency list and convert to digraph object
            myDiGraph = nx.read_adjlist(rawModelDir+'/'+sample+'/'+sample+'AdjList.txt',
                                create_using=nx.DiGraph())

# Append to the previous graph
            mergedGraph = nx.compose(mergedGraph, myDiGraph)

# Check that the proper output directory exists. It not, create it.
        if not os.path.exists(processedDataDir+'/'+group):
            os.makedirs(processedDataDir+'/'+group)

        nx.write_adjlist(mergedGraph, processedDataDir+'/'+group+'/'+group+'AdjList.txt')
        nx.write_graphml(mergedGraph, processedDataDir+'/'+group+'/'+group+'Graph.xml')

    return
def combine_graph(Gs, opts):
    # combine the graphs in the list Gs
    G = Gs[0]
    for i in range(1,len(Gs)):
        G = nx.compose(G, Gs[i])

    # position of each nodes
    pos = nx.spring_layout(G) # Fruchterman-Reingold force-directed algorithm.

    # plot nodes and edges separately
    for i in range(len(Gs)):
        nodes = Gs[i].nodes()
        edges = Gs[i].edges()
        opt = opts[i]
        nx.draw_networkx_nodes(G, pos, nodelist = nodes, 
                               node_color = opt['node_color'], 
                               alpha = float(opt['node_alpha']), 
                               node_size = int(opt['node_size']))
        nx.draw_networkx_edges(G, pos, edgelist = edges, 
                               edge_color = opt['edge_color'], 
                               alpha = float(opt['edge_alpha']), 
                               width = int(opt['edge_width']))
    # label the nodes
    nx.draw_networkx_labels(G, pos)
    plt.show()
示例#18
0
def compose_all(graphs, name=None):
    """Return the composition of all graphs.

    Composition is the simple union of the node sets and edge sets.
    The node sets of the supplied graphs need not be disjoint.

    Parameters
    ----------
    graphs : list
       List of NetworkX graphs

    name : string
       Specify name for new graph

    Returns
    -------
    C : A graph with the same type as the first graph in list

    Notes
    -----
    It is recommended that the supplied graphs be either all directed or all
    undirected.

    Graph, edge, and node attributes are propagated to the union graph.
    If a graph attribute is present in multiple graphs, then the value
    from the last graph in the list with that attribute is used.
    """
    graphs = iter(graphs)
    C = next(graphs)
    for H in graphs:
        C = nx.compose(C, H, name=name)
    return C
示例#19
0
文件: all.py 项目: pengyu/networkx
def compose_all(graphs, name=None):
    """Return a new graph, the composition of supplied graphs

    Composition is the simple union of the node sets and edge sets.
    The node sets of the supplied graphs need not be disjoint.

    Parameters
    ----------
    graphs : list of graphs
       Multiple NetworkX graphs

    name : string
       Specify name for new graph

    Returns
    -------
    R : A new graph with the same type as the first graph

    Notes
    -----
    A new graph is returned, of the same class as the first graph in the list.
    It is recommended that the supplied graphs be either all directed or all
    undirected.  If a graph attribute is present in multiple graphs,
    then the first graph in the graphs_list with that attribute takes
    priority for that attribute
    """
    C = graphs.pop(0)
    for H in graphs:
        C = nx.compose(C, H, name=name)
    return C
    def recomputeAllSourcesDag(self, all_dag, new_ridx_dag):
        """
        Given the initial all_routers_dag, and the new chosen ridxDag, we compute
        the newly created all_routers_dag merging the previous one while forcing the
        new ridxDag.
        """
        # Add 'flag' in new ridx dag
        edges = new_ridx_dag.edges()
        ridx_dag = nx.DiGraph()
        ridx_dag.add_edges_from(edges, flag=True)
        
        # Compose it with all_dag
        new_adag = nx.compose(all_dag, ridx_dag)

        # Iterate new ridx nodes. Remove those outgoing edges from the same node 
        # in all_dag that do not have 'flag'.
        final_all_dag = new_adag.copy()

        # Get edges to remove
        edges_to_remove = [(x, y) for node in new_ridx_dag.nodes() for
                           (x, y, data) in new_adag.edges(data=True)
                           if node == x and not data.get('flag')]

        # Remove them
        final_all_dag.remove_edges_from(edges_to_remove)
        
        # Return modified all_dag
        return final_all_dag
示例#21
0
    def get_underlying_tree(self, connected_component):
        # Find the root (color with only one occurrence)
        root = None
        colors = [self.coloring[node] for node in connected_component.nodes()]
        for index, color in enumerate(colors):
            colors[index] = 'Not a color'
            if color not in colors:
                root = connected_component.nodes()[index]
                break
            colors[index] = color

        # If we can't find a root, something's wrong!
        if root == None:
            print 'WARNING: Coloring this has no root', colors
            return connected_component

        # Create a new NetworkX graph to represent the tree
        tree = nx.Graph()
        tree.add_node(root)

        # Remove the root from the connected component
        connected_component = nx.Graph(connected_component)
        connected_component.remove_node(root)

        # Every new connected component is a subtree
        for sub_cc in nx.connected_component_subgraphs(connected_component):
            subtree = self.get_underlying_tree(sub_cc)
            tree = nx.compose(tree, subtree)
            tree.add_edge(root, subtree.root)

        # Root field for use in recursive case to connect tree and subtree
        tree.root = root
        return tree
示例#22
0
    def add(self, concept):
        """
        Simple adder method.

        :param concept: Concept to be added to the model.
        """
        self.graph = nx.compose(self.graph, ConceptModel([concept]).graph)
示例#23
0
 def add_edge(self, u, v):
     print('[euler_tour_forest] add_edge(%r, %r)' % (u, v))
     if self.has_edge(u, v):
         return
     ru = self.find_root(u)
     rv = self.find_root(v)
     if ru is None:
         self.add_node(u)
         ru = u
     if rv is None:
         self.add_node(v)
         rv = v
     assert ru is not rv, (
         'u=%r, v=%r not disjoint, can only join disjoint edges' % (u, v))
     assert ru in self.trees, 'ru must be a root node'
     assert rv in self.trees, 'rv must be a root node'
     subtree1 = self.trees[ru]
     subtree2 = self.trees[rv]
     del self.trees[rv]
     new_tree = nx.compose(subtree1, subtree2)
     new_tree.add_edge(u, v)
     self.trees[ru] = new_tree
     print(list(new_tree.nodes()))
     assert nx.is_connected(new_tree)
     assert nx.is_tree(new_tree)
示例#24
0
def msp_associated_graph(BG,N,projection_layer=0):
    bg_graph = nx.Graph();
    for i in range(projection_layer*N,(projection_layer+1)*N):
        nei = BG.neighbors(i);
        k = nx.relabel_nodes(nx.complete_graph(len(nei)),dict(zip(range(len(nei)), nei)));
        bg_graph = nx.compose(bg_graph,k);
    return bg_graph;
示例#25
0
def graph_from_set(df):
    """
    The co-ocurrence graph for a set of tweets is the composition of the individual
    complete k-graphs, for each tweet. In other words, each tweet in the set forms
    a k-clique of the composed graph, and cliques are connected when distinct tweets
    have at least one hashtag in common.

    Each already existing node/hashtag that is seen in a new tweet will take on the
    timestamp of the new tweet containing it.

    Parameters
    ----------
    df : Pandas DataFrame
        to extract tweet info from, usually within a time window.

    Return
    ------
    G : NetworkX Graph
        composition graph on all tweets' hashtags in df
    """

    G = nx.Graph(time=df.time.max())  # initialize empty graph with latest timestamp

    for i in df.itertuples():
        tw_no, tags, time = i

        if len(tags) < 2:  # skip tweets with no hashtag co-occurrence
            continue
        H = graph_from_tweet(df, tw_no)  # current tweet's complete k-graph
        G = nx.compose(G, H)  # add new edges and nodes found in H not already in G
    return G
示例#26
0
    def update_graph(self, list_solutions):
        
        ''' update__graph
            Parameters:
            list_solutions: A list of paths returned by different ants
            A path is a list of NODES (not node indexes)
            Given a list of solutions (list of paths), updates the global graph.
            
            Return:
            Returns a list of graphs of the solutions
        '''
        list_graphs_return = list()
        
        for solution in list_solutions:
            # [[(1080243414620259090, {'node': <aconode.ACONode instance at 0x2c713b0>, 'initial': True, 'solution': False}), (16212338162585125650L, {'node': <aconode.ACONode instance at 0x2c71368>}), (17225648078743487250L, {'node': <aconode.ACONode instance at 0x2c71878>}), (17270683387822424850L, {'node': <aconode.ACONode instance at 0x2c718c0>}), (17270672049108763410L, {'node': <aconode.ACONode instance at 0x2c71908>}), (16189824631214261010L, {'node': <aconode.ACONode instance at 0x2c71950>}), (1057729883249394450, {'node': <aconode.ACONode instance at 0x2c71998>}), (14892576832299025170L, {'node': <aconode.ACONode instance at 0x2c719e0>}), (14892717567639896850L, {'node': <aconode.ACONode instance at 0x2c71a28>}), (14892717569401504530L, {'node': <aconode.ACONode instance at 0x2c71a70>}), (14892717569451835410L, {'node': <aconode.ACONode instance at 0x2c71ab8>}), (14892717569451820050L, {'node': <aconode.ACONode instance at 0x2c71b00>}), (14892717567572800530L, {'node': <aconode.ACONode instance at 0x2c71b48>}), (14892717568327775250L, {'node': <aconode.ACONode instance at 0x2c71b90>}), (14892717568422147090L, {'node': <aconode.ACONode instance at 0x2c71bd8>}), (14892716812519437330L, {'node': <aconode.ACONode instance at 0x2c71c20>}), (14847681503440499730L, {'node': <aconode.ACONode instance at 0x2c71c68>}), (13901925581692695570L, {'node': <aconode.ACONode instance at 0x2c71cb0>}), (14982772999587197970L, {'node': <aconode.ACONode instance at 0x2c71cf8>}), (14982779596556301330L, {'node': <aconode.ACONode instance at 0x2c71d40>}), (14982779595801326610L, {'node': <aconode.ACONode instance at 0x2c71d88>}), (14982779597680346130L, {'node': <aconode.ACONode instance at 0x2c71dd0>}), (14982779597680361490L, {'node': <aconode.ACONode instance at 0x2c71e18>}), (14982779597630030610L, {'node': <aconode.ACONode instance at 0x2c71e60>}), (14982779597803045650L, {'node': <aconode.ACONode instance at 0x2c71ea8>}), (14982779597804094210L, {'node': <aconode.ACONode instance at 0x2c71ef0>}), (14982779597804094240L, {'node': <aconode.ACONode instance at 0x2c71f38>}), (14982779597803766565L, {'node': <aconode.ACONode instance at 0x2c71f80>}), (14982779559149650725L, {'node': <aconode.ACONode instance at 0x2c71fc8>}), (14982778914904556325L, {'node': <aconode.ACONode instance at 0x2c72050>}), (14982772730151650085L, {'node': <aconode.ACONode instance at 0x2c72098>}), (14982784824595006245L, {'node': <aconode.ACONode instance at 0x2c720e0>}), (14982784824645337125L, {'node': <aconode.ACONode instance at 0x2c72128>}), (14982784824645321765L, {'node': <aconode.ACONode instance at 0x2c72170>}), (14982784822766302245L, {'node': <aconode.ACONode instance at 0x2c721b8>}), (14982784823521276965L, {'node': <aconode.ACONode instance at 0x2c72200>}), (14982784823588384805L, {'node': <aconode.ACONode instance at 0x2c72248>}), (14982784823588357925L, {'node': <aconode.ACONode instance at 0x2c72290>}), (14982784822783063845L, {'node': <aconode.ACONode instance at 0x2c722d8>}), (14982784823789696805L, {'node': <aconode.ACONode instance at 0x2c72320>}), (14982784823907135525L, {'node': <aconode.ACONode instance at 0x2c72368>}), (14982784823907136005L, {'node': <aconode.ACONode instance at 0x2c723b0>}), (14982784823906087445L, {'node': <aconode.ACONode instance at 0x2c723f8>}), (14982784411595518485L, {'node': <aconode.ACONode instance at 0x2c72440>}), (14982785055840612885L, {'node': <aconode.ACONode instance at 0x2c72488>}), (14982785094494728725L, {'node': <aconode.ACONode instance at 0x2c724d0>}), (14982785094488830485L, {'node': <aconode.ACONode instance at 0x2c72518>}), (14982784407304548885L, {'node': <aconode.ACONode instance at 0x2c72560>}), (14919734974594036245L, {'node': <aconode.ACONode instance at 0x2c725a8>}), (14974622595052614165L, {'node': <aconode.ACONode instance at 0x2c725f0>}), (14977155831188304405L, {'node': <aconode.ACONode instance at 0x2c72638>}), (14977154929245172245L, {'node': <aconode.ACONode instance at 0x2c72680>}), (14977155616429453845L, {'node': <aconode.ACONode instance at 0x2c726c8>}), (14977155616435352085L, {'node': <aconode.ACONode instance at 0x2c72710>}), (14977155616435679760L, {'node': <aconode.ACONode instance at 0x2c72758>}), (14977155616435679745L, {'node': <aconode.ACONode instance at 0x2c727a0>}), (14977155616435679265L, {'node': <aconode.ACONode instance at 0x2c727e8>}), (14977155616435667745L, {'node': <aconode.ACONode instance at 0x2c72830>}), (14977155615361942305L, {'node': <aconode.ACONode instance at 0x2c72878>}), (14977155617123549985L, {'node': <aconode.ACONode instance at 0x2c728c0>}), (14977155617173880865L, {'node': <aconode.ACONode instance at 0x2c72908>}), (14977155617173865505L, {'node': <aconode.ACONode instance at 0x2c72950>}), (14977155615294845985L, {'node': <aconode.ACONode instance at 0x2c72998>}), (14977155616049820705L, {'node': <aconode.ACONode instance at 0x2c729e0>}), (14977155616144192545L, {'node': <aconode.ACONode instance at 0x2c72a28>}), (14977155616146289665L, {'node': <aconode.ACONode instance at 0x2c72a70>}), (14977155616146288705L, {'node': <aconode.ACONode instance at 0x2c72ab8>}), (14977155616146261825L, {'node': <aconode.ACONode instance at 0x2c72b00>}), (14977155615340967745L, {'node': <aconode.ACONode instance at 0x2c72b48>}), (14977155616850917185L, {'node': <aconode.ACONode instance at 0x2c72b90>}), (14977155616968355905L, {'node': <aconode.ACONode instance at 0x2c72bd8>}), (14977155616968344385L, {'node': <aconode.ACONode instance at 0x2c72c20>}), (14977155615357756225L, {'node': <aconode.ACONode instance at 0x2c72c68>}), (14977155617119363905L, {'node': <aconode.ACONode instance at 0x2c72cb0>}), (14977155617169694785L, {'node': <aconode.ACONode instance at 0x2c72cf8>}), (14977155617169671745L, {'node': <aconode.ACONode instance at 0x2c72d40>}), (14977155615290652225L, {'node': <aconode.ACONode instance at 0x2c72dd0>}), (14977155616045626945L, {'node': <aconode.ACONode instance at 0x2c72ea8>}), (14977155616146288705L, {'node': <aconode.ACONode instance at 0x2c72ab8>}), (14977155616146289665L, {'node': <aconode.ACONode instance at 0x2c72a70>}), (14977155616144192545L, {'node': <aconode.ACONode instance at 0x2c72a28>}), (14977155616049820705L, {'node': <aconode.ACONode instance at 0x2c729e0>}), (14977155615294845985L, {'node': <aconode.ACONode instance at 0x2c72998>}), (14977155617173865505L, {'node': <aconode.ACONode instance at 0x2c72950>}), (14977155617173880865L, {'node': <aconode.ACONode instance at 0x2c72908>}), (14977155617123549985L, {'node': <aconode.ACONode instance at 0x2c728c0>}), (14977155615361942305L, {'node': <aconode.ACONode instance at 0x2c72878>}), (14977155616435667745L, {'node': <aconode.ACONode instance at 0x2c72830>}), (14977155616435679265L, {'node': <aconode.ACONode instance at 0x2c727e8>}), (14977155616435679745L, {'node': <aconode.ACONode instance at 0x2c727a0>}), (14977155616429388385L, {'node': <aconode.ACONode instance at 0x2c74368>}), (14977154929245106785L, {'node': <aconode.ACONode instance at 0x2c74488>}), (14977155831188238945L, {'node': <aconode.ACONode instance at 0x2c745a8>}), (14974622595052548705L, {'node': <aconode.ACONode instance at 0x2c746c8>}), (14919734974593970785L, {'node': <aconode.ACONode instance at 0x2c747e8>}), (14982784407304483425L, {'node': <aconode.ACONode instance at 0x2c74908>}), (14982785094488765025L, {'node': <aconode.ACONode instance at 0x2c74a28>}), (14982785094495056385L, {'node': <aconode.ACONode instance at 0x2c74b48>}), (14982785094495055905L, {'node': <aconode.ACONode instance at 0x2c74c68>}), (14982785094495044385L, {'node': <aconode.ACONode instance at 0x2c74d88>}), (14982785093421318945L, {'node': <aconode.ACONode instance at 0x2c74ea8>}), (14982644358080447265L, {'node': <aconode.ACONode instance at 0x2c74fc8>}), (1147797409030816545, {'node': <aconode.ACONode instance at 0x2c77128>}), (1147797409030816545, {'node': <aconode.ACONode instance at 0x2c77128>})]]
            #print ("Solution "+ str(solution))
            g = nx.Graph()
            g.add_nodes_from(solution)
            #print("graph generated: "+ str(g.node))
            g.add_path([s[0] for s in solution])
            
            self.graph = nx.compose(self.graph, g) # self.graph edges have preference over g
            list_graphs_return.append(g)
        
            #print ("Update graph len nodes(self.graph): " + str(len(self.graph.nodes())))
            #print ("Update graph len edges(self.graph): " + str(len(self.graph.edges())))
            
            i = 0
            for l in list_graphs_return:
                #print ("Update graph len nodes ("+str(i) + "): " + str(len(l.nodes())))
                #print ("Update graph len edges ("+str(i) + "): " + str(len(l.edges())))
                i += 1

        return list_graphs_return
示例#27
0
文件: STWalk1.py 项目: jblupus/STWalk
def createSpaceTimeGraph(G_list, time_window, start_node, time_step):
    """
     time step is necessary because we want representation only for last time step and
     we will create the space-time graph for [time_step, time_step-1,time_step-2,...,time_step-time_window]
    """
    G = G_list[-1]
    for time1 in range(1, time_window + 1):
        past_node = start_node.split("_")[0] + "_" + str(time_step - time1)
        if past_node not in G_list:
            continue
        else:
            G.add_edge(start_node, past_node)

            G_past = G_list[-time1 - 1]

            # considering first level neighbors
            past_neighbors = list(G_past.neighbors(past_node))
            temp = []

            # considering second level neighbors
            for elt in past_neighbors:
                temp = temp + list(G_past.neighbors(elt))

            # merging list of level-1 and level-2 neighbors
            past_neighbors = past_neighbors + temp
            past_neighbors.append(past_node)

            # subgraph of G_past containing nodes from "past_neighbors" and edges between those nodes.
            past_subgraph = G_past.subgraph(past_neighbors)

            # merge current graph with past subgraphs
            G = nx.compose(G, past_subgraph)
            start_node = past_node
    return G
示例#28
0
    def full_diff(
       cls,
       graph1,
       graph2,
       node_equal=lambda g1, g2: lambda x, y: True,
       edge_equal=lambda g1, g2: lambda x, y: True
    ):
        """
        Return a graph that shows the full difference between graph1 and graph2.

        :param `DiGraph` graph1: a graph
        :param `DiGraph` graph2: a graph
        :param node_equal: a function that determines if two nodes are equal
        :type node_equal: `DiGraph` * `DiGraph` -> node * node -> bool
        :param edge_equal: a function that determines if two edges are equal
        :type edge_equal: `DiGraph` * `DiGraph` -> edge * edge -> bool
        :returns: an annotated graph composed of ``graph1`` and ``graph2``
        :rtype: `DiGraph`
        """
        graph = nx.compose(graph1, graph2, name="union")

        (l_node_diff, r_node_diff) = cls.node_differences(
           graph1,
           graph2,
           node_equal
        )
        (l_edge_diff, r_edge_diff) = cls.edge_differences(
           graph1,
           graph2,
           edge_equal
        )

        removed = DifferenceMarkers.node_differences(
           graph,
           l_node_diff,
           DiffStatuses.REMOVED
        )
        Decorator.decorate_nodes(graph, removed)
        removed = DifferenceMarkers.edge_differences(
           graph,
           l_edge_diff,
           DiffStatuses.REMOVED
        )
        Decorator.decorate_edges(graph, removed)

        added = DifferenceMarkers.node_differences(
           graph,
           r_node_diff,
           DiffStatuses.ADDED
        )
        Decorator.decorate_nodes(graph, added)
        added = DifferenceMarkers.edge_differences(
           graph,
           r_edge_diff,
           DiffStatuses.ADDED
        )
        Decorator.decorate_edges(graph, added)

        return graph
示例#29
0
    def clone_subgraphs(self, g):
        if not isinstance(g, CGRContainer):
            raise InvalidData('only CGRContainer acceptable')

        r_group = []
        x_group = {}
        r_group_clones = []
        newcomponents = []

        ''' search bond breaks and creations
        '''
        components, lost_bonds, term_atoms = self.__split_graph(g)
        lost_map = {x: y for x, y in lost_bonds}
        ''' extract subgraphs and sort by group type (R or X)
        '''
        x_terminals = set(lost_map.values())
        r_terminals = set(lost_map)

        for i in components:
            x_terminal_atom = x_terminals.intersection(i)
            if x_terminal_atom:
                x_group[x_terminal_atom.pop()] = i
                continue

            r_terminal_atom = r_terminals.intersection(i)
            if r_terminal_atom:
                r_group.append([r_terminal_atom, i])
                continue

            newcomponents.append(i)
        ''' search similar R groups and patch.
        '''
        tmp = g
        for i in newcomponents:
            for k, j in r_group:
                gm = GraphMatcher(j, i, node_match=self.__node_match_products,
                                  edge_match=self.__edge_match_products)
                ''' search for similar R-groups started from bond breaks.
                '''
                mapping = next((x for x in gm.subgraph_isomorphisms_iter() if k.issubset(x) and
                                all(x[y] in term_atoms for y in k)), None)
                if mapping:
                    r_group_clones.append([k, mapping])
                    tmp = compose(tmp, self.__remap_group(j, tmp, mapping)[0])
                    break

        ''' add lose X groups to R groups
        '''
        for i, j in r_group_clones:
            for k in i:
                remappedgroup, mapping = self.__remap_group(x_group[lost_map[k]], tmp, {})
                tmp = CGRcore.union(tmp, remappedgroup)
                tmp.add_edge(j[k], mapping[lost_map[k]], s_bond=1, sp_bond=(1, None))

        if r_group_clones:
            tmp.meta.update(g.meta)
            return tmp

        return tmp.copy()
示例#30
0
def build_activity_graph(activity_uri, activity_id):
    G = nx.DiGraph()
    
    q_activity_to_resource = render_template('activity_to_resource.q', activity_uri = activity_uri)
    
    G = build_graph(G, activity_uri, "activity", "concept", q_activity_to_resource)
    
    q_resource_to_activity = render_template('resource_to_activity.q', activity_uri = activity_uri)
    
    G = build_graph(G, activity_uri, "concept", "activity", q_resource_to_activity)
    
    print activity_uri, activity_id
    
    # origin_node_id = "{}".format(activity_id.lower())
    origin_node_id = activity_id
    
    G.node[origin_node_id]['type'] = 'origin'
    
    names = {}
    for n, nd in G.nodes(data=True):
        if nd['type'] == 'activity' or nd['type'] == 'origin':
            label = nd['label'].replace('Activity','').upper()            
            names[n] = label
        else :
            names[n] = nd['label']
            
    
    
    
    
    nx.set_node_attributes(G,'label', names)
    
    
    
    deg = nx.degree(G)
    nx.set_node_attributes(G,'degree',deg)
    
    
    outG = nx.ego_graph(G,origin_node_id,50)
    inG = nx.ego_graph(G.reverse(),origin_node_id,50)
    
    inG = inG.reverse()
    
    sG = nx.compose(outG,inG)
    
    
            
    
    assign_weights(sG, [])
            
            
    print sG.edges(data=True)
    
    
    
    g_json = json_graph.node_link_data(sG) # node-link format to serialize

    
    return g_json
示例#31
0
文件: transit.py 项目: fxjung/MaaSSim
def prep_transit_graph(inData, params, calc_skim=True, plot=False):
    inData.skims = DotMap()
    inData.skims.dist = inData.skim.copy()
    inData.skims.ride = inData.skims.dist.divide(
        params.speeds.ride).astype(int)
    inData.skims.walk = inData.skims.dist.divide(
        params.speeds.walk).astype(int)

    # load graphs from
    graphs = load_graphs(params.GTFS.cities.keys(),
                         params.GTFS.space_list,
                         params.GTFS.transfer_penalty,
                         folder_path=params.paths.dingGTFS)

    # G_L = graphs[params.GTFS.city]['L']
    G_P = graphs[params.GTFS.city]['P']
    skims = get_skims(
        G_P
    )  # create stop x stop skim matrices of ['GTC', "IVT", "WT", "TRANSFER", "NONIVT"]
    # assign nodes to stop points
    skims.pos['node'] = skims.pos.apply(
        lambda p: ox.get_nearest_node(inData.G, (p.y, p.x)), axis=1)
    inData.transit_stops = skims.pos.copy()

    # manipulate skims to networkx adjancency matrix
    to_concat = list()
    attrs = ['GTC', "IVT", "WT", "TRANSFER", "NONIVT"]
    for field in attrs:
        skims[field].columns = skims[field].columns.astype(int)
        adj = skims[field].stack().to_frame()
        adj.columns = [field]
        to_concat.append(adj)
    adj = pd.concat(to_concat, axis=1)
    adj['s'] = adj.index.get_level_values(0)
    adj['t'] = adj.index.get_level_values(1)
    adj['cost'] = adj['IVT'] + params.GTFS.wait_penalty * adj['WT'] + adj[
        'TRANSFER']
    adj = adj.astype(int)
    adj['source'] = adj.apply(lambda x: skims.pos.loc[x.s].node, axis=1)
    adj['target'] = adj.apply(lambda x: skims.pos.loc[x.t].node, axis=1)

    # create transit graph (nodes are from inData.G - road graph nearest node)
    TG = nx.from_pandas_edgelist(adj,
                                 'source',
                                 'target',
                                 edge_attr=attrs,
                                 create_using=nx.MultiDiGraph)
    G = nx.compose(inData.G, TG)  # merge road graph and transit graph

    # get graph attributes back
    to_concat = list()
    for field in attrs + ['length']:
        to_concat.append(pd.Series(nx.get_edge_attributes(G, field)))
    df = pd.concat(to_concat, axis=1)
    df.columns = attrs + ['length']
    df['L'] = df.length.fillna(999999)  # infinite length for transit links
    df = df.fillna(0)  # fill walking links with empty values

    df['WALK_TIME'] = (df.length / params.speeds.walk).astype(int)
    df['TRANSIT_COST'] = df.IVT + df.WT * 2 + df.TRANSFER + df.WALK_TIME  # either transit times or walk times

    nx.set_edge_attributes(G, df.TRANSIT_COST.to_dict(), name='TRANSIT_COST')
    nx.set_edge_attributes(G, df.L.to_dict(), name='length')

    if plot:
        import seaborn as sns
        palette = sns.color_palette("muted")
        ev = [
            0.003 if 'IVT' in edge[-1].keys() else 0.3
            for edge in G.edges(data=True)
        ]
        colors = [
            'blue' if 'IVT' in edge[-1].keys() else 'grey'
            for edge in G.edges(data=True)
        ]
        fig, ax = ox.plot_graph(G,
                                fig_height=15,
                                fig_width=15,
                                node_size=0,
                                edge_linewidth=ev,
                                show=False,
                                close=False,
                                edge_color=colors)
        ax.scatter(skims.pos.x, skims.pos.y, s=3, c='blue', marker='x')
        o, d = inData.nodes.sample(1).squeeze().name, inData.nodes.sample(
            1).squeeze().name
        route = nx.shortest_path(G, o, d, weight='TRANSIT_COST')
        ax = add_route(G, ax, route, color=palette[2], alpha=1, key='length')
        route = nx.shortest_path(G, o, d, weight='length')
        ax = add_route(G, ax, route, color=palette[3], alpha=1, key='length')
    if calc_skim:
        skim_generator = nx.all_pairs_dijkstra_path_length(
            G, weight='TRANSIT_COST')
        skim_dict = dict(skim_generator)
        inData.skims.transit = pd.DataFrame(skim_dict).fillna(
            params.dist_threshold).T.astype(
                int)  # and dataframe is more intuitive
    inData.GTFS.G = G

    return inData
示例#32
0
for line in ldd_data:
    if len(line.split()) > 1:
        lib = line.split()[0]
        libs.append(lib)

callgraph = nx.DiGraph()
fn_to_node = dict()
node_to_fn = dict()
for lib in libs:
    gtmp = pgv.AGraph(sys.argv[2] + "/" + lib + ".bc.dot")
    for i in gtmp.nodes():
        fn_to_node[i.attr['label'][1:-1]] = i.name
        node_to_fn[i.name] = i.attr['label'][1:-1]
    graph = nx.DiGraph(gtmp)
    tmp = callgraph.copy()
    callgraph = nx.compose(tmp, graph)

# nx.nx_agraph.write_dot(callgraph, "test.dot")

fn_to_capunsafe = dict()
with open(sys.argv[3]) as f:
    for line in f:
        if line.strip() in fn_to_node:
            callers = nx.algorithms.dag.ancestors(callgraph,
                                                  fn_to_node[line.strip()])

            for call in callers:
                fn = node_to_fn[call]
                if fn in fn_to_capunsafe:
                    fn_to_capunsafe[fn].append(line.strip())
                else:
示例#33
0
def gen_graphs_batch(n_graphs,
                     n_motifs=20,
                     motif_sizes=[2, 3, 4],
                     prior=np.array([1, 1, 5, 7, 1, 1, 1, 7, 7]) / 31):
    """
	Returns a list of networkx graphs generated according to a distribution over motifs.

	Parameters
	----------
	n_graphs: int
		Number of graphs to generate.
	n_motifs: int
		Number of motifs to place in the graph.
	motif_size: lst
		A list of motif sizes, where all (non-degenerate) n-node motifs will be considered for the n's listed.
	prior: np.array
		An array that encodes the probability of drawing each unique motif. This is the length of the number of unique motifs. For motif_sizes=[2, 3, 4], the length should be 9.

	Returns
	-------
	graphs: lst
		List of networkx graphs.
	motifs: dic
		Dictionary of motifs used to generate the graphs.
	"""
    motifs_clust = {m: group_motifs(m) for m in motif_sizes}
    motif_tups = [(m, n) for m in motif_sizes
                  for n in range(len(motifs_clust[m]))]
    motifs = {
        m: n
        for m, n in zip(motif_tups,
                        [j for i in motifs_clust for j in motifs_clust[i]])
    }
    motif_colors = {
        m: n
        for m, n in zip(motif_tups, range(1,
                                          len(motif_tups) + 1))
    }
    graphs = []
    for i in range(n_graphs):
        G = nx.Graph()
        rand_motifs = [
            motif_tups[i] for i in np.random.choice(
                np.arange(0, len(motif_tups)), size=n_motifs, p=prior)
        ]
        for motif in rand_motifs:
            idx = np.random.randint(len(
                motifs[motif]))  #permutation is drawn from random uniform
            m = nx.from_numpy_matrix(motifs[motif][idx])
            orig_nodes = np.arange(motif[0])
            new_nodes = np.random.randint(len(G.nodes()) + motif[0] + 1,
                                          size=motif[0])
            m = nx.relabel_nodes(m,
                                 {x: y
                                  for x, y in zip(orig_nodes, new_nodes)})
            nx.set_node_attributes(m, 'motifs', [motif])
            nx.set_edge_attributes(m, 'motif', motif)
            # nx.set_node_attributes(m, 'neighbors', [tuple(new_nodes)])
            nx.set_node_attributes(m, 'color', motif_colors[motif])
            nx.set_edge_attributes(m, 'color', motif_colors[motif])
            for j in m.nodes():
                if j in G.nodes():
                    nx.set_node_attributes(
                        m, 'motifs', {
                            j:
                            nx.get_node_attributes(m, 'motifs')[j] +
                            nx.get_node_attributes(G, 'motifs')[j]
                        })
                    # nx.set_node_attributes(m, 'neighbors', {j: nx.get_node_attributes(m, 'neighbors')[j] + nx.get_node_attributes(G, 'neighbors')[j]})
            G = nx.compose(G, m)
        G = nx.convert_node_labels_to_integers(G)
        graphs.append(G)
    return graphs, motifs
示例#34
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('-i', '--infile', type=str, default=None)
    parser.add_argument('-s', '--singletons', type=str, default=None)
    parser.add_argument('-t', '--temp_prefix', type=str, default=None)
    parser.add_argument('-f', '--infile2', type=str, default=None)

    args = parser.parse_args()
    min_counts_strict = 5
    usage_denom = 1024
    Gloc = nx.Graph()
    Ghom = nx.Graph()

    homvar = {}
    with gzip.open(args.infile2, 'rt') as fp:
        line = fp.readline().strip()
        while line:
            homvar[line] = 1
            line = fp.readline().strip()

    Gloc = nx.Graph()
    Gloconf = nx.Graph()

    with gzip.open(args.temp_prefix + '.het.txt.gz', 'rt') as fp:
        line = fp.readline().strip()
        ct = 0
        while line:
            if ct % 1000 == 0:
                sys.stderr.write(
                    str(ct) + '\t' + str(
                        resource.getrusage(resource.RUSAGE_SELF).ru_maxrss /
                        usage_denom) + '\n')
            ll = re.split('[\t]', line)
            [loc1, loc2] = ll[0:2]
            tp = 'het-het'
            cts = list(map(int, ll[2:6]))
            mns = list(map(float, ll[6:10]))
            if loc2 < loc1:
                [loc1, loc2] = [loc2, loc1]
                cts = [cts[0], cts[2], cts[1], cts[3]]
                mns = [mns[0], mns[2], mns[1], mns[3]]
            [passf, orient, nn,
             dist] = pre_filter_strict_pass(cts, mns, 0.95, tp)
            if passf:
                Gloc.add_edge(loc1,
                              loc2,
                              conf=True,
                              cts=cts,
                              mns=mns,
                              wt=nn,
                              orient=orient,
                              dist=dist)
            else:
                Gloc.add_node(loc1)
                Gloc.add_node(loc2)
                [passf, orient, nn,
                 dist] = pre_filter_loose_pass(cts, mns, 2, 0.95, tp)
                if passf:
                    Gloconf.add_edge(loc1,
                                     loc2,
                                     conf=False,
                                     cts=cts,
                                     mns=mns,
                                     wt=nn,
                                     orient=orient,
                                     dist=dist)
            line = fp.readline().strip()
            ct += 1

    het_bridges = remove_bridges(Gloc, min_counts_strict, 'het-hom')
    loc2comp = {}
    comp2tree = {}

    #  with gzip.open(args.singletons, 'rt') as fp:
    #    line=fp.readline().strip()
    #    while line:
    #      if not Gloc.has_node(line):
    #        Gloc.add_node(line)
    #      line=fp.readline().strip()

    gg = list(
        Gloc.subgraph(cc)
        for cc in sorted(nx.connected_components(Gloc), key=len, reverse=True))
    for ii in range(len(gg)):
        print(
            str(ii) + '\t' + str(
                resource.getrusage(resource.RUSAGE_SELF).ru_maxrss /
                usage_denom / usage_denom))
        if gg[ii].number_of_nodes() > 2:
            tr = nx.minimum_spanning_tree(gg[ii], weight='dist')
        else:
            tr = gg[ii]
        comp2tree[ii] = tr
        for node in tr.nodes():
            loc2comp[node] = ii

    with gzip.open(args.temp_prefix + '.het.l2c.txt.gz', 'wt') as fp:
        for loc in loc2comp.keys():
            print(loc + '\t' + str(loc2comp[loc]), file=fp)

    with gzip.open(args.temp_prefix + '.het.c2t.txt.gz', 'wt') as fp:
        for comp in comp2tree.keys():
            tr = comp2tree[comp]
            for edge in tr.edges(data=True):
                print(str(comp) + '\t' + edge[0] + '\t' + edge[1] + '\t' +
                      edge[2]['orient'] + '\t' + str(edge[2]['dist']) + '\t' +
                      str(edge[2]['wt']),
                      file=fp)

    loc2comphom = {}
    comp2treehom = {}

    with gzip.open(args.temp_prefix + '.hom.l2c.txt.gz', 'rt') as fp:
        line = fp.readline().strip()
        while line:
            [loc, comp] = re.split('[\t]', line)
            loc2comphom[loc] = int(comp)
            line = fp.readline().strip()

    oldcomp = -1
    with gzip.open(args.temp_prefix + '.hom.c2t.txt.gz', 'rt') as fp:
        line = fp.readline().strip()
        tr = nx.Graph()
        while line:
            [comp, loc1, loc2, orient, dist, wt] = re.split('[\t]', line)
            comp = int(comp)
            dist = int(dist)
            wt = int(wt)
            if comp == oldcomp or oldcomp < 0:
                tr.add_edge(loc1, loc2, wt=wt, dist=dist, orient=orient)
                if oldcomp < 0:
                    oldcomp = comp
            else:
                comp2treehom[oldcomp] = tr.copy()
                tr = nx.Graph()
                oldcomp = comp
                tr.add_edge(loc1, loc2, wt=wt, dist=dist, orient=orient)
            line = fp.readline().strip()

    with gzip.open(args.temp_prefix + 'hom.bed.gz', 'wt') as outb:
        for comp in comp2treehom.keys():
            id = 'hom_' + str(comp)
            mintree = comp2treehom[comp]
            str1 = comp2bed(mintree.nodes, id, '0,150,0')
            if str1 is not None:
                print(str1, file=outb)

    with gzip.open(args.temp_prefix + 'het.bed.gz', 'wt') as outb:
        for comp in comp2tree.keys():
            id = 'het_' + str(comp)
            print(id)
            mintree = comp2tree[comp]
            str1 = comp2bed(mintree.nodes, id, '150,0,0')
            if str1 is not None:
                print(str1, file=outb)

    Gmix = nx.Graph()
    superg = nx.Graph()

    with gzip.open(args.temp_prefix + '.mixed.txt.gz', 'rt') as fp:
        line = fp.readline().strip()
        ct = 0
        while line:
            if ct % 1000 == 0:
                sys.stderr.write(
                    str(ct) + '\t' + str(
                        resource.getrusage(resource.RUSAGE_SELF).ru_maxrss /
                        usage_denom) + '\n')
            ll = re.split('[\t]', line)
            cts = list(map(int, ll[2:6]))
            mns = list(map(float, ll[6:10]))
            [loc1, loc2] = ll[0:2]
            tp = 'het-hom'
            [passf, orient, nn,
             dist] = pre_filter_strict_pass(cts, mns, 0.95, tp)
            if passf:
                if not loc2 in homvar:
                    [loc1, loc2] = [loc2, loc1]
                    cts = [cts[0], cts[2], cts[1], cts[3]]
                    mns = [mns[0], mns[2], mns[1], mns[3]]
                if loc1 in loc2comp and loc2 in loc2comphom:
                    Gmix.add_edge(loc1,
                                  loc2,
                                  conf=True,
                                  cts=cts,
                                  mns=mns,
                                  wt=nn,
                                  orient=orient,
                                  dist=dist)
                    hetcomp = loc2comp[loc1]
                    homcomp = loc2comphom[loc2]
                    node1 = 'het_' + str(hetcomp)
                    node2 = 'hom_' + str(homcomp)
                    if not superg.has_edge(node1, node2):
                        superg.add_edge(node1,
                                        node2,
                                        dist=[],
                                        wt=0,
                                        ct=0,
                                        sum_dist=0)
                    superg.edges[node1, node2]['wt'] += nn
                    superg.edges[node1, node2]['dist'].append(dist)
                    superg.edges[node1, node2]['ct'] += 1
                    superg.edges[node1, node2]['sum_dist'] += dist
            line = fp.readline().strip()
            ct += 1

    gg = list(
        superg.subgraph(cc) for cc in sorted(
            nx.connected_components(superg), key=len, reverse=True))
    comp2treemixed = {}
    loc2compmixed = {}

    for ii in range(len(gg)):
        id = 'mixed_' + str(ii)
        tocomp = []
        for node in gg[ii].nodes():
            [tp, id] = re.split('_', node)
            if tp == 'het':
                tr = comp2tree[int(id)]
            else:
                tr = comp2treehom[int(id)]
            for node in tr.nodes():
                tr.nodes[node]['tp'] = tp
            tocomp.append(tr)
        Gcomp = nx.compose_all(tocomp)
        Gsub = Gmix.subgraph(Gcomp.nodes())
        Gcomp1 = nx.compose(Gsub, Gcomp)
        mintree = nx.minimum_spanning_tree(Gcomp1, weight='dist')
        comp2treemixed[ii] = mintree
        for node in mintree.nodes():
            loc2compmixed[node] = ii


#  with open(args.temp_prefix+'.mixed.p', 'wb') as f:
#    ll=[loc2compmixed, comp2treemixed]
#    pickle.dump(ll, f)

    with gzip.open(args.temp_prefix + 'mixed.bed.gz', 'wt') as outb:
        for comp in comp2treemixed.keys():
            id = 'mixed_' + str(comp)
            print(id)
            mintree = comp2treemixed[comp]
            str1 = comp2bed(mintree.nodes, id, '0,0,150')
            if str1 is not None:
                print(str1, file=outb)

    code.interact(local=locals())
    for node in superg.nodes():
        deg = superg.degree(node)
        [tp, id] = re.split('_', node)
        if tp == 'hom':
            print(node + '\t' + str(deg))
示例#35
0
    def make_dag(self, expand=set()):
        """Generate a DAG for the model
        """
        G = nx.DiGraph()

        ## Inputs-to-Functions
        for f in self.functions:
            # Expand composed models
            if isinstance(f, FunctionModel) and (f.name in expand):
                G_ref = f.model.make_dag(expand=expand - {f})
                G_sub = nx.DiGraph()
                # Add nodes
                G_sub.add_node(f.name + ".var")
                G_sub.add_node(f.name + ".out")
                for g in f.model.functions:
                    G_sub.add_node(f.name + "." + g.name)
                # Add node metadata
                nx.set_node_attributes(G_sub, f.name, "parent")

                # Add edges
                for u, v, d in G_ref.edges(data=True):
                    # Add renamed edge
                    if u == "(var)":
                        G_sub.add_edge(f.name + ".var", f.name + "." + v, **d)
                    elif v == "(out)":
                        G_sub.add_edge(f.name + "." + u, f.name + ".out", **d)
                    else:
                        G_sub.add_edge(f.name + "." + u, f.name + "." + v, **d)

                # Compose the graphs
                G = nx.compose(G, G_sub)

            i_var = set(self.var).intersection(set(f.var))
            if len(i_var) > 0:
                s_var = "{}".format(i_var)
                if isinstance(f, FunctionModel) and (f.name in expand):
                    G.add_edge("(var)", f.name + ".var", label=s_var)
                else:
                    G.add_edge("(var)", f.name, label=s_var)

        ## Function-to-Function
        for i0 in range(len(self.functions)):
            for i1 in range(i0 + 1, len(self.functions)):
                f0 = self.functions[i0]
                f1 = self.functions[i1]
                i_var = set(f0.out).intersection(set(f1.var))

                ## If connected
                if len(i_var) > 0:
                    s_var = "{}".format(i_var)
                    ## Handle composed models
                    if isinstance(f0, FunctionModel) and (f0.name in expand):
                        name0 = f0.name + ".out"
                    else:
                        name0 = f0.name
                    if isinstance(f1, FunctionModel) and (f1.name in expand):
                        name1 = f1.name + ".out"
                    else:
                        name1 = f1.name

                    G.add_edge(name0, name1, label=s_var)

        ## Functions-to-Outputs
        for f in self.functions:
            i_out = set(self.out).intersection(set(f.out))

            if len(i_out) > 0:
                s_out = "{}".format(i_out)
                ## Target composed model's out
                if isinstance(f, FunctionModel) and (f.name in expand):
                    G.add_edge(f.name + ".out", "(out)", label=s_out)
                ## An ordinary function
                else:
                    G.add_edge(f.name, "(out)", label=s_out)

            # Add node metadata
            nx.set_node_attributes(G, {f.name: {"parent": self.name}})

        # Final metadata
        nx.set_node_attributes(G, {"(var)": {"parent": self.name}})
        nx.set_node_attributes(G, {"(out)": {"parent": self.name}})

        return G
    return data


def program_to_data(program: str, parser: Parser = None) -> Data:
    return get_data_from_graph(parse_program(program, parser))


if __name__ == '__main__':
    program_str = """
        public int add(int a, int b) {
            int c = 0;
            c = a + b;
            return c;
        }
    """

    program_str_2 = """
        public int add_numbers(int c, int d) {
            int e = 0;
            return c + d;
        }
    """

    parser = get_parser(JAVA_SO_PATH)
    g1 = parse_program(program_str, parser)
    g2 = parse_program(program_str_2, parser)
    g3 = nx.compose(g1, g2)
    plot_graph(g3)
    plt.show()
示例#37
0
def objDetect(data, file):
    #global file
    global index_of_img
    e_max = 4
    V1 = ob.EM_Object_s()
    V2 = ob.EM_Object_s()
    pub_cnn.publish(0.0)
    br = CvBridge()
    V_temp = ob.EM_Object_s()

    try:
        img = br.imgmsg_to_cv2(data, "bgr8")
    except CvBridgeError as e:
        print e

    current_objects = net.predict(img, display_img=img)
    beta = e_max / 2
    all_images.append(img)
    for object in current_objects:
        position = object["bb_o"]
        x = (position[1] + position[3]) / 2
        y = (position[0] + position[2]) / 2
        label = object["class"]
        print "label", label
        vObjects.addObject(x, y, label, index_of_img)
        vObjects.createEdge(x, y, label, index_of_img)
        V1.addObject(x, y, label, index_of_img)
        V.addObject(x, y, label, index_of_img)
    index_of_img += 1

    for ob1 in V1.objects:
        for ob2 in V.objects:
            dij = math.sqrt((ob1.cv[0] - ob2.cv[0])**2 +
                            (ob1.cv[1] - ob2.cv[1])**2)
            if dij < beta:
                x = ob2.cv[0]
                y = ob2.cv[1]
                lv = ob2.lv
                V_temp.addObject(x, y, lv, index_of_img)
    V_tot = nx.Graph()
    V_tot = nx.compose(V1.objects, V_temp.objects)
    V1.objects = V_tot

    for i in V.objects:
        if i not in V1.objects:
            x = i.cv[0]
            y = i.cv[1]
            lv = i.lv
            V2.addObject(x, y, lv, index_of_img)

    #compute k_nearest neighbors between the new place and the object graph
    '''for ob1 in V1.objects:
        for ob2 in V2.objects:
            dij=math.sqrt((ob1.cv[0]-ob2.cv[0])**2+(ob1.cv[1]-ob2.cv[1])**2)
            if dij<beta:
                x=ob2.cv[0]
                y=ob2.cv[1]
                lv=ob2.lv
                V_temp.addObject(x,y,lv,index_of_img)
    V_tot=nx.Graph()
    V_tot=nx.compose(V1.objects,V_temp.objects)
    V1.objects=V_tot
    '''
    V1.emax = 4
    match = Matching(V1, V2, all_images)
    Hr = match.create_Hr()
    Hl = match.create_Hl()
    number_of_1 = 0
    for i in range(len(Hr)):
        for j in range(len(Hr[0])):
            for k in range(len(Hr[0][0])):
                for l in range(len(Hr[0][0][0])):
                    if (Hl[i][j][k][l] == 1):
                        print " je suis un 1"
                        number_of_1 += 1
    #print "index of 1",Hl.index(0)
    print "number_of_1", number_of_1
    H = [[[[0] * len(Hr[0][0][0])] * len(Hr[0][0])] * len(Hr[0])] * len(Hr)
    for i in range(len(Hr)):
        for j in range(len(Hr[0])):
            for k in range(len(Hr[0][0])):
                for l in range(len(Hr[0][0][0])):
                    H[i][j][k][l] = Hr[i][j][k][l] * Hl[i][j][k][l]

    match.all_views = all_images
    scoreX = match.compute_X(H)

    print " i am near score variable"
    file.write("eeeeeeeeeeeeeeeeee")
    scoreXVector.append(scoreX)
    print "score X", scoreX

    pub_cnn.publish(1.0)
    pub_cnn.publish(0.0)
    cv2.imshow('scene at time t', img)

    cv2.waitKey(1)
    cv2.destroyAllWindows()
示例#38
0
文件: grammar.py 项目: csantran/pg2l
    def __add__(self, grammar):
        if not isinstance(grammar, MetaGrammar):
            raise Exception()

        return MetaGrammar(nx.compose(self, grammar))
示例#39
0
    def getGraph_Recursive(self, traversedAgents: List[AgentRepresentation]):

        print(f"gg_r called on {self.ssid}")

        if len(self.found_agents) == 0:
            self.found_agents = self.findAgents()

        agents = [
            agent for agent in self._semiShuffle(self.found_agents)
            if agent not in traversedAgents
        ]

        # agents = [agent for agent in agents if len(self.graph.neighbors(agent)) < MAX_CONNECTED_AGENTS]

        print(f"untraversed agents: {[i.ssid for i in agents]}")

        if AgentRepresentation.fromAgent(self) not in traversedAgents:
            traversedAgents.append(AgentRepresentation.fromAgent(self))
        if len(agents) > 0:
            for agent in agents[0:min(len(agents), MAX_CONNECTED_AGENTS)]:
                otherrep = agent
                selfrep = AgentRepresentation.fromAgent(self)
                if MeshNode.call(otherrep.port, Request(
                        'num_connections')).response >= MAX_CONNECTED_AGENTS:
                    continue
                (otherGraph, additionallyTraversed) = MeshNode.call(
                    otherrep.port,
                    Request('get_graph_recursive',
                            args=[traversedAgents],
                            longRunning=True)).response
                traversedAgents.extend(additionallyTraversed)
                traversedAgents = list(set(traversedAgents))

                newGraph = nx.compose(otherGraph, self.graph)
                self.graph = newGraph.copy()
                # print(f"New graph: {otherGraph}")
                # self.graph.add_nodes_from(otherGraph)
                # self.graph.add_edges_from(otherGraph)
                self.graph.add_edge(selfrep, otherrep)
                self.directly_connected.append(otherrep)

        print(
            f"Number of connections: {len(list(self.graph.neighbors(AgentRepresentation.fromAgent(self))))}"
        )
        if len(list(self.graph.neighbors(
                AgentRepresentation.fromAgent(self)))) < MAX_CONNECTED_AGENTS:
            # If there are no unconnected agents, let's add the closest ones that
            # A. we don't have any connections with
            # B. aren't at max connections
            nodes = list(self.graph.nodes())
            nodes.remove(AgentRepresentation.fromAgent(self))
            agents = []

            for agent in nodes:
                if len(list(
                        self.graph.neighbors(agent))) < MAX_CONNECTED_AGENTS:
                    if not self.graph.has_edge(
                            AgentRepresentation.fromAgent(self), agent):
                        agents.append(agent)
            agents2 = [
                a for a in self._semiShuffle(self.found_agents) if a in agents
            ]
            agents = agents2
            for agent in agents[0:min(
                    MAX_CONNECTED_AGENTS - len(
                        list(
                            self.graph.
                            neighbors(AgentRepresentation.fromAgent(self)))
                    ), len(agents))]:
                print(
                    f"Forming additional connection between {self.ssid} and {agent.ssid}"
                )
                self.graph.add_edge(agent, AgentRepresentation.fromAgent(self))
                self.directly_connected.append(agent)

                # print(f"Returning traversed: {traversedAgents}")
        assert [
            len(list(self.graph.neighbors(i))) <= MAX_CONNECTED_AGENTS
            for i in self.graph.nodes()
        ]
        return (self.graph, traversedAgents)
示例#40
0
def merge_g_op(a, b, datatype):
    return nx.compose(a, b)
示例#41
0
        networkGraphs[network] = networkGraph

        htmlDF.export(
            "/mnt/d/yanc_network/" + network.replace(" ", "_") + ".html",
            ExportTYPE.HTML)
        htmlDF.export(
            "/mnt/d/yanc_network/" + network.replace(" ", "_") + ".tsv",
            ExportTYPE.TSV)

    figidx = 0
    for stages in makeStory:

        mergedGraph = networkGraphs[stages[0]]

        for i in range(1, len(stages)):
            mergedGraph = nx.compose(mergedGraph, networkGraphs[stages[i]])

        hasLargeStage = any(['large' in stage for stage in stages])

        pos = nx.spring_layout(mergedGraph)

        for stage in stages:

            networkGraph = networkGraphs[stage]

            edges = networkGraph.edges()
            colors = [networkGraph[u][v]['color'] for u, v in edges]

            d = nx.degree(networkGraph)
            nodes = networkGraph.nodes()
            nodeColors = []
示例#42
0
 def plan(self,
          component_name,
          start_conf,
          goal_conf,
          obstacle_list=[],
          otherrobot_list=[],
          ext_dist=2,
          rand_rate=70,
          max_iter=1000,
          max_time=15.0,
          smoothing_iterations=50,
          animation=False):
     self.roadmap.clear()
     self.roadmap_start.clear()
     self.roadmap_goal.clear()
     self.start_conf = start_conf
     self.goal_conf = goal_conf
     # check start and goal
     if self._is_collided(component_name, start_conf, obstacle_list,
                          otherrobot_list):
         print("The start robot_s configuration is in collision!")
         return None
     if self._is_collided(component_name, goal_conf, obstacle_list,
                          otherrobot_list):
         print("The goal robot_s configuration is in collision!")
         return None
     if self._goal_test(conf=start_conf,
                        goal_conf=goal_conf,
                        threshold=ext_dist):
         return [start_conf, goal_conf]
     self.roadmap_start.add_node('start', conf=start_conf)
     self.roadmap_goal.add_node('goal', conf=goal_conf)
     last_nid = 'goal'
     tic = time.time()
     for _ in range(max_iter):
         toc = time.time()
         if max_time > 0.0:
             if toc - tic > max_time:
                 print("Too much motion time! Failed to find a path.")
                 return None
         # Random Sampling
         while True:
             if last_nid != -1:
                 goal_nid = last_nid
             goal_conf = self.roadmap_goal.nodes[goal_nid]['conf']
             rand_conf = self._sample_conf(component_name=component_name,
                                           rand_rate=rand_rate,
                                           default_conf=goal_conf)
             # goal_nid = 'goal'
             last_nid = self._extend_roadmap(
                 component_name=component_name,
                 roadmap=self.roadmap_start,
                 conf=rand_conf,
                 ext_dist=ext_dist,
                 goal_conf=goal_conf,
                 obstacle_list=obstacle_list,
                 otherrobot_list=otherrobot_list,
                 animation=animation)
             if last_nid != -1:
                 break
         if last_nid == 'connection':
             self.roadmap = nx.compose(self.roadmap_start,
                                       self.roadmap_goal)
             self.roadmap.add_edge(last_nid, goal_nid)
             break
         else:
             while True:
                 if last_nid != -1:
                     goal_nid = last_nid
                 goal_conf = self.roadmap_start.nodes[goal_nid]['conf']
                 rand_conf = self._sample_conf(
                     component_name=component_name,
                     rand_rate=rand_rate,
                     default_conf=goal_conf)
                 last_nid = self._extend_roadmap(
                     component_name=component_name,
                     roadmap=self.roadmap_goal,
                     conf=rand_conf,
                     ext_dist=ext_dist,
                     goal_conf=goal_conf,
                     obstacle_list=obstacle_list,
                     otherrobot_list=otherrobot_list,
                     animation=animation)
                 if last_nid != -1:
                     break
             if last_nid == 'connection':
                 self.roadmap = nx.compose(self.roadmap_start,
                                           self.roadmap_goal)
                 self.roadmap.add_edge(last_nid, goal_nid)
                 break
     else:
         print("Reach to maximum iteration! Failed to find a path.")
         return None
     path = self._path_from_roadmap()
     smoothed_path = self._smooth_path(component_name=component_name,
                                       path=path,
                                       obstacle_list=obstacle_list,
                                       otherrobot_list=otherrobot_list,
                                       granularity=ext_dist,
                                       iterations=smoothing_iterations,
                                       animation=animation)
     return smoothed_path
示例#43
0
 def merge(self, that):
     if self._tmp_merge_dag is not None:
         self._dag = self._tmp_merge_dag
         self._tmp_merge_dag = None
     else:
         self._dag = nx.compose(self._dag, that._dag)
示例#44
0
def full_graph(s, m):
    G = nx.compose(s, m)
    return G
示例#45
0
def get_visualization_image(action_list, env_output_list, environment):
    """Get visualization images.

  Args:
    action_list: List of actions.
    env_output_list: List of observations in the trajectories.
    environment: Testing environment.

  Returns:
    A list which only have one numpy array. The numpy array is the image matrix
    with shape [width, height, 3]
  """
    def is_valid_node(node):
        if node == constants.STOP_NODE_NAME or node == constants.INVALID_NODE_NAME:
            return False
        return True

    current_scan = _get_current_scan(env_output_list)
    ground_truth_path = _get_ground_truth_path(action_list, env_output_list)
    predicted_path = _get_predicted_path(action_list, env_output_list)

    # Map from pano_id to pano_name.
    ground_truth_path = [
        environment.pano_id_to_name(pano_id, current_scan)
        for pano_id in ground_truth_path
    ]
    predicted_path = [
        environment.pano_id_to_name(pano_id, current_scan)
        for pano_id in predicted_path
    ]

    fig = plt.figure(figsize=(2, 2))
    ax = fig.add_subplot(1, 1, 1)
    # Get a graph_utils.Graph instance.
    base_graph = environment.get_scan_graph(current_scan)
    # Convert to networkx Graph.
    base_graph = base_graph.to_nx()
    node_pos_dict = networkx.get_node_attributes(base_graph, 'coords')

    all_nodes = ground_truth_path + predicted_path
    all_nodes = list(filter(is_valid_node, all_nodes))

    # Zoom-in to the predicted subgraph.
    cut = 1.3
    xs = [node_pos_dict[node][0] for node in all_nodes]
    ys = [node_pos_dict[node][1] for node in all_nodes]
    min_x, max_x = min(xs), max(xs)
    min_y, max_y = min(ys), max(ys)
    x_range, y_range = cut * (max_x - min_x), cut * (max_y - min_y)
    max_range = max(x_range, y_range)
    center_x, center_y = (min_x + max_x) / 2, (min_y + max_y) / 2
    ax.set_xlim(center_x - max_range / 2, center_x + max_range / 2)
    ax.set_ylim(center_y - max_range / 2, center_y + max_range / 2)
    ax.set_ymargin(0.01)

    networkx.draw(base_graph,
                  node_pos_dict,
                  edge_color='lightgrey',
                  node_color='lightgrey',
                  node_size=20,
                  width=0.3)

    ground_truth_graph, edges = _draw_path(ground_truth_path,
                                           node_pos_dict,
                                           color='cornflowerblue')
    predicted_graph, pr_edges = _draw_path(predicted_path,
                                           node_pos_dict,
                                           color='orange')

    for key, value in pr_edges.items():
        edges[key].extend(value)

    combined_graph = networkx.compose(ground_truth_graph, predicted_graph)
    overlap_offset = 0.015

    # Draw the edges with a small offset to avoid overlap.
    for (start, end), edge_list in edges.items():
        edge_dir = np.array(node_pos_dict[end]) - np.array(
            node_pos_dict[start])
        offset_dir = np.array([edge_dir[1], -edge_dir[0]])
        offset_dir /= np.linalg.norm(offset_dir) / max_range
        edge_list = edge_list[:3]
        num_edges = len(edge_list)
        for edge_idx, (s, e, color) in enumerate(edge_list):
            offset = (edge_idx - (num_edges - 1.) / 2) * overlap_offset
            shifted_pos = {
                key: (val[0] + offset * offset_dir[0],
                      val[1] + offset * offset_dir[1])
                for key, val in node_pos_dict.items()
            }
            networkx.draw_networkx_edges(combined_graph,
                                         shifted_pos,
                                         edgelist=[(s, e)],
                                         edge_color=color,
                                         **DEFAULT_PARAM)

    fig.canvas.draw()
    data = np.fromstring(fig.canvas.tostring_rgb(), dtype=np.uint8, sep='')
    data = data.reshape(fig.canvas.get_width_height()[::-1] + (3, ))
    plt.close(fig)
    return [data]
示例#46
0
    def _extract_features(self, graphs, ai2d_ann, image, layers):
        """
        Extracts features from the original AI2D annotation and adds them to the
        AI2D-RST graphs.

        Parameters:
            graphs: A dictionary of NetworkX graphs for AI2D-RST annotation.
            ai2d_ann: A dictionary containing the original AI2D annotation.
            image: An image of the diagram from the original AI2D dataset.
            layers: A string defining annotation layers to include in the
                    updated graphs.

        Returns:
            A dictionary of NetworkX graphs with updated features.
        """
        # To begin with, build the grouping graph, which is provides the layout
        # information on all diagram elements, which can be then picked out in
        # other graphs, if necessary.
        graph = graphs['grouping']

        # Check that a graph exists
        try:

            # Fetch nodes from the graph
            nodes = graph.nodes(data=True)

        except AttributeError:

            return None

        # Begin extracting the features by getting the diagram image shape
        h, w = image.shape[:2]

        # Get the number of pixels in the image
        n_pix = h * w

        # Set up a placeholder dictionaries to hold updated node and edge
        # features
        node_features = {}
        edge_features = {}

        # Loop over the nodes and their features
        for node, features in nodes:

            # Fetch the node type from its features under the key 'kind'
            node_type = features['kind']

            # Parse layout annotation
            layout_feats = self._parse_ai2d_layout(
                ai2d_ann,  # annotation
                h,  # image height
                w,  # image width
                n_pix,  # n of pixels
                node_type,  # elem type
                node  # node id
            )

            # Add layout features to the dictionary of updated node features
            node_features[node] = {
                'features': layout_feats,
                'kind': self.node_dict['grouping'][node_type]
            }

            # Updated node attributes in the grouping graph using layout
            # features
            nx.set_node_attributes(graph, node_features)

        # Calculate features for grouping nodes based on their children. This
        # requires a directed tree graph.
        group_tree = nx.dfs_tree(graph, source="I0")

        # Get a list of grouping nodes and image constants in the graph
        groups = [
            n for n, attr in graph.nodes(data=True) if attr['kind'] in [
                self.node_dict['grouping']['imageConsts'],
                self.node_dict['grouping']['group']
            ]
        ]

        # Iterate over the nodes in the graph
        for n, attr in graph.nodes(data=True):

            # Check if the node type is a group
            if n in groups:

                # Get predecessors of the grouping node
                n_preds = nx.dfs_predecessors(group_tree, n)

                # Remove groups from the list of predecessor;
                # each group will be processed indepedently
                n_preds = [n for n in n_preds.keys() if n not in groups]

                # Create a subgraph consisting of preceding nodes
                n_subgraph = graph.subgraph(n_preds)

                # Get layout features for each node
                n_feats = [
                    ad['features'] for n, ad in n_subgraph.nodes(data=True)
                ]

                # Cast stacked features into a 2D numpy array
                stacked_feats = np.array(n_feats)

                # Get average centre point for group by slicing the array
                x_avg = np.average(stacked_feats[:, 0])
                y_avg = np.average(stacked_feats[:, 1])

                # Add up their area
                a_sum = np.sum(stacked_feats[:, 2])

                # Average the solidity
                s_avg = np.average(stacked_feats[:, 3])

                # Concatenate the features
                layout_feats = np.concatenate(
                    [[x_avg], [y_avg], [a_sum], [s_avg]], axis=0)

                # Update group feature dictionary
                upd_group_feats = {
                    n: {
                        'features': layout_feats,
                        'kind': attr['kind']
                    }
                }

                # Update group features
                nx.set_node_attributes(graph, upd_group_feats)

        # Add edge types to the grouping layer, as these are not defined in the
        # JSON annotation. To do so, get the edges from the grouping graph.
        edges = graph.edges(data=True)

        # Loop over the edges in the graph
        for src, dst, features in edges:

            # Add edge type unde key 'kind' to the edge_features dictionary
            edge_features[src, dst] = {'kind': 'grouping'}

        # Update edge features in the grouping graph
        nx.set_edge_attributes(graph, edge_features)

        # Encode edge features
        self._encode_edges(graph, self.edge_dict['grouping'])

        # Update the grouping graph in the graphs dictionary
        graphs['grouping'] = graph

        # Now that the grouping layer has been created, check which other
        # annotation layers must be included in the graph-based representation.

        # The combination of grouping and connectivity layers is a relatively
        # simple case.
        if layers == "grouping+connectivity":

            # If a connectivity graph exists, merge it with the grouping graph
            if graphs['connectivity'] is not None:

                # Use nx.compose() to combine the grouping and connectivity
                # graphs
                graph = nx.compose(graphs['connectivity'], graphs['grouping'])

            # Encode edge type information using numerical labels
            self._encode_edges(graph, self.edge_dict['connectivity'])

            # Update the grouping graph
            graphs['grouping'] = graph

        # The connectivity layer alone is a bit more complex, as the children of
        # grouping nodes need to be copied over to the connectivity graph.
        if layers == 'connectivity' and graphs['connectivity'] is not None:

            # Get the grouping and connectivity graphs
            conn_graph = graphs['connectivity']
            group_graph = graphs['grouping']

            # Get a list of nodes in the connectivity graph
            conn_nodes = list(conn_graph.nodes(data=True))

            # Get a list of grouping nodes in the connectivity graph
            grouping_nodes = [
                n for n, attr_dict in conn_nodes
                if attr_dict['kind'] == 'group'
            ]

            # If grouping nodes are found, get their children and add them to
            # the graph
            if len(grouping_nodes) > 0:

                # Create a directed tree graph using depth-first search,
                # starting from the image constant I0.
                group_tree = nx.dfs_tree(group_graph, source="I0")

                # Loop over each grouping node
                for gn in grouping_nodes:

                    # Resolve grouping nodes by adding their children to the
                    # connectivity graph
                    self._resolve_grouping_node(gn, group_tree, group_graph,
                                                conn_graph)

            # If the connectivity graph does not include grouping nodes, simply
            # copy the node features from the grouping graph.
            n_subgraph = group_graph.subgraph(conn_graph.nodes)

            # Add these nodes to the connectivity graph
            conn_graph.add_nodes_from(n_subgraph.nodes(data=True))

            # Encode edge type information using numerical labels
            self._encode_edges(conn_graph, self.edge_dict['connectivity'])

            # Update the connectivity graph in the graphs dictionary
            graphs['connectivity'] = conn_graph

        # Start building the discourse graph by getting node features from the
        # grouping graph.
        if layers == 'discourse':

            # Get grouping and discourse graphs
            group_graph = graphs['grouping']
            rst_graph = graphs['discourse']

            # Reverse node type dictionary for the grouping layer
            rev_group_dict = {
                int(v.item()): k
                for k, v in self.node_dict['grouping'].items()
            }

            # Re-encode node types to ensure that node types do not clash with
            # those defined for discourse graph
            upd_node_types = {
                k: rev_group_dict[int(v['kind'].item())]
                for k, v in group_graph.nodes(data=True)
            }

            # Update node attributes for the grouping graph
            nx.set_node_attributes(group_graph, upd_node_types, 'kind')

            # Get the nodes participating in the discourse graph from the
            # grouping graph using the .subgraph() method.
            subgraph = group_graph.subgraph(rst_graph.nodes)

            # Add these nodes back to the discourse graph with their features
            # and numerical labels. These will overwrite the original nodes.
            rst_graph.add_nodes_from(subgraph.nodes(data=True))

            # Check if discourse graph contains groups or split nodes. Split
            # nodes are used to preserve the tree structure in case a diagram
            # element participates in multiple RST relations.
            for n, attr_dict in rst_graph.copy().nodes(data=True):

                # Check if the node is a group
                if 'group' in attr_dict['kind']:

                    # Create a directed tree graph using depth-first search,
                    # starting from the image constant I0.
                    group_tree = nx.dfs_tree(group_graph, source="I0")

                    # Resolve grouping nodes by adding their children to the
                    # discourse graph.
                    self._resolve_grouping_node(n, group_tree, group_graph,
                                                rst_graph)

                # Check node for the copy_of attribute, which contains a
                # reference to the node which has been split.
                if 'copy_of' in attr_dict.keys():

                    # Get the identifier of the node in AI2D layout annotation
                    n_orig_id = attr_dict['copy_of']
                    n_orig_kind = attr_dict['kind']

                    # Fetch node data from the AI2D layout annotation
                    layout_feats = self._parse_ai2d_layout(
                        ai2d_ann, h, w, n_pix, n_orig_kind, n_orig_id)

                    # Add updated features to a dictionary
                    upd_node_feats = {
                        n: {
                            'features': layout_feats,
                            'kind': n_orig_kind
                        }
                    }

                    # Update node features in the graph
                    nx.set_node_attributes(rst_graph, upd_node_feats)

                # Check if the node is a relation
                if 'relation' in attr_dict['kind']:

                    # Get integer label for RST relation
                    rst_int_label = self.node_dict['relations'][
                        attr_dict['rel_name']]

                    # Get node labels and encode using label binarizer
                    rst_label = self._rst_binarizer.transform(rst_int_label)

                    # Check if label smoothing is requested:
                    if self._smooth_labels:

                        # Cast into float for label smoothing
                        rst_label = np.asarray(rst_label, dtype=np.float64)

                        # Smooth the labels by a factor of 0.1
                        rst_label *= (1 - 0.1)
                        rst_label += (0.1 / rst_label.shape[1])

                    # Store encoded information into the updated features dict
                    upd_node_feats = {n: {'features': rst_label.flatten()}}

                    # Set the updated features to nodes in the discourse graph
                    nx.set_node_attributes(rst_graph, upd_node_feats)

            # Check if a NetworkX graph should be returned
            if self._return_nx:

                return rst_graph

            # Convert node identifiers to integers. This needs to be performed
            # before creating a heterograph.
            rst_graph = nx.convert_node_labels_to_integers(rst_graph,
                                                           first_label=0)

            # Get nodes and convert to NumPy array; get unique nodes; get node
            # type index vector
            nodes = np.asarray([
                attr['kind'] for n, attr in rst_graph.nodes(data=True)
            ]).flatten()

            ntypes = np.unique(nodes)

            node_ixs = np.array(
                [np.where(ntypes == n) for n in np.nditer(nodes)],
                dtype=np.int64).flatten()

            # Do the same for edges
            edges = np.asarray([
                attr['kind'] for s, t, attr in rst_graph.edges(data=True)
            ]).flatten()

            etypes = np.unique(edges)

            edge_ixs = np.array(
                [np.where(etypes == e) for e in np.nditer(edges)],
                dtype=np.int64).flatten()

            # Create DGL graph object from the discourse graph
            g = dgl.from_networkx(rst_graph)

            # Assign node and edge types
            g.ndata[dgl.NTYPE] = torch.LongTensor(node_ixs)
            g.edata[dgl.ETYPE] = torch.LongTensor(edge_ixs)

            # Create a DGL heterograph from the DGL graph object
            hg = dgl.to_heterogeneous(g, ntypes, etypes)

            # Loop over node types in the heterograph
            for ntype in hg.ntypes:

                # Get unique node identifiers for this node type; cast to list
                rst_node_ids = hg.nodes[ntype].data[dgl.NID].tolist()

                # Loop over RST node identifiers
                features = np.vstack([
                    rst_graph.nodes[node_id]['features']
                    for node_id in rst_node_ids
                ])

                # Add features to DGL heterograph
                hg.nodes[ntype].data['features'] = torch.from_numpy(features)

            # Update the RST graph
            graphs['discourse'] = hg

        # Return all graphs
        return graphs
示例#47
0
cont = 0
if (args.folder):
    for dirpath, dirs, files in os.walk(args.folder):
        print('Processing folder: ' + dirpath)
        bar = Bar('Processing',
                  max=len(files) if not args.limit else int(args.limit))
        for f in files:
            if (args.limit):
                cont += 1
                if (int(args.limit) < cont):
                    break
            graphml = nx.read_graphml(os.path.join(dirpath, f))
            if not fullGraphml:
                fullGraphml = graphml
            else:
                nx.compose(fullGraphml, graphml)
            bar.next()
        bar.finish()
    if args.output:
        print('Printing file: ' + args.output)
        nx.write_gexf(fullGraphml, args.output)
elif (args.input):
    fullGraphml = nx.read_gexf(args.input)
else:
    print('We need some substance here pal, specify input or folder')
    exit()

options = {
    'node_color': 'blue',
    'node_size': 100,
    'width': 1.5,
示例#48
0
#Adjacency matrix can also be loaded back to a graph
#G = nx.Graph(mat)
#print(nx.info(G))

#Graph edges in list form
#Gedges = [i for i in G.edges()]

G = nx.Graph()
gi = nx.Graph()

fileNums = [0]
for i in fileNums:
    fileName = "../Datasets/facebook/edges/" + str(i) + ".edges"
    gi = nx.read_edgelist(fileName)
    G = nx.compose(G, gi)

print(nx.info(G))

#Layout
pos = nx.fruchterman_reingold_layout(G, dim=3)
lay = list()
for i in pos.values():
    lay.append(list(i))

N = len(G.nodes())
#labels=[i for i in pos.keys()]

ulti = {}
for i in pos.keys():
    ulti[i] = list(pos[i])
示例#49
0
def create_graph_by_threshold_knn(adj_mat, percentile, k=1, X=None):
    '''combine the graph from `create_graph_by_threshold` with a kNN graph.
    '''
    G_thres = create_graph_by_threshold(adj_mat, percentile)
    G_knn = create_knn_graph(X, k=k)
    return nx.compose(G_thres, G_knn)
示例#50
0
def greedy_build(nodes,
                 priors=None,
                 cutoff=200,
                 considered=set(),
                 uniq='',
                 targets=[]):
    """
	Greedy algorithm which finds a probable mutation subgraph for given nodes.
	This algorithm chooses splits within the tree based on which mutation occurs most frequently,
	weighted by the prior probabilities of each mutation state for each character.
	Strings with NA ('-') as a state in the split character are segregated with the
	set of nodes which they most closely match to w.r.t. all other characters.

	:param nodes:
		A list of target nodes, where each node is in the form 'Ch1|Ch2|....|Chn'
	:param priors:
		A nested dictionary containing prior probabilities for [character][state] mappings
		where characters are in the form of integers, and states are in the form of strings,
		and values are the probability of mutation from the '0' state.
	:param cutoff:
		A cutoff that tells the greedy algorithm to stop, and return a partial sub-tree
		Set to -1 to run through to the individual samples (ie return the full tree)

	:param considered:
		Internal parameter which keeps track of which mutations have been considered in a set
		DO NOT MODIFY
	:param uniq:
		Internal parameter which keeps track of the path of mutations (1 = mutation taken, 0 = mutation not taken)
		DO NOT MODIFY
	:return:
		Returns a graph which contains splits as nodes in the form "character state (uniq_identifier)", and leaves
		as either samples, or the roots of the subsets of samples that need to be considered by another algorithm.
		Edges are labeled with the corresponding mutation taken
		AND
		a list in the form [[sub_root, sub_samples],....] which is a list of subproblems still needed to be solved
	"""

    # Tracks frequency of states for each character in nodes
    character_mutation_mapping = defaultdict(int)

    # G models the network that is returned recursively
    G = nx.DiGraph()

    root = root_finder(nodes)

    # Base case check for recursion, returns a graph with one node corresponding to the root of the remaining nodes
    if len(nodes) <= cutoff or len(nodes) == 1:
        root = root_finder(nodes)
        G.add_node(root)
        return G, [[root, nodes]]

    # Accounting for frequency of mutated states per character, in order to choose the best split
    for node in nodes:
        node_list = node.split("_")[0].split('|')
        for i in range(0, len(node_list)):
            char = node_list[i]
            if char != '0' and char != '-':
                character_mutation_mapping[(str(i), char)] += 1
#if char != '0':
#    if char == "-":
#        character_mutation_mapping[(str(i), char)] -= 1
#    else:
#        character_mutation_mapping[(str(i), char)] += 1

    # Choosing the best mutation to split on (ie character and state)
    character, state = 0, 0
    max_cost = 0

    min_prior = 1
    if priors:
        for i in priors.keys():
            for j in priors[i].keys():
                min_prior = min(min_prior, priors[i][j])

    for i, j in character_mutation_mapping:
        if not (i, j) in considered:
            if not priors:
                if max_cost < character_mutation_mapping[(i, j)]:
                    max_cost = character_mutation_mapping[(i, j)]
                    character, state = i, j
            else:
                if j not in priors[int(i)]:
                    priors[int(i)][j] = min_prior
                if max_cost < -np.log(priors[int(
                        i)][j]) * character_mutation_mapping[(i, j)]:
                    max_cost = -np.log(
                        priors[int(i)][j]) * character_mutation_mapping[(i, j)]
                    character, state = i, j
    character = int(character)

    # If there is no good split left, stop the process and return a graph with the remainder of nodes
    if character == 0 and state == 0:
        if len(nodes) == 1:
            G.add_node(nodes[0])
        else:
            for i in range(0, len(nodes)):
                if nodes[i] != root:
                    G.add_edge(root, nodes[i])
        return G, []

    # Splitting nodes based on whether they have the mutation, don't have the mutation, or are NA('-') in that character
    # Right split is where nodes with the mutation go, everyone else goes to left split or NA chars
    left_split, right_split, NA_chars = [], [], []
    right_split_temp = []
    left_split_temp = []
    for node in nodes:
        node_list = node.split('|')
        if node_list[character] == state:
            right_split.append(node)
        elif node_list[character] == '-':
            NA_chars.append(node)
        else:
            left_split.append(node)

    # Seperates all nodes with NA in the character chosen to be split upon
    # Puts in right split or left split based on which list shares more mutated characters with this string
    for node in NA_chars:
        right_split_score = 0
        left_split_score = 0
        node_list = node.split('|')
        num_not_missing = len([n for n in node_list if n != "-"])
        for i in range(0, len(node_list)):
            if node_list[i] != '0' and node_list[i] != '-':
                for node_2 in left_split:
                    node2_list = node_2.split('|')
                    if node_list[i] == node2_list[i]:
                        left_split_score += 1
                for node_2 in right_split:
                    node2_list = node_2.split('|')
                    if node_list[i] == node2_list[i]:
                        right_split_score += 1

        avg_left_split_score = left_split_score / float(
            len(left_split) * num_not_missing + 1)
        avg_right_split_score = right_split_score / float(
            len(right_split) * num_not_missing + 1)

        if avg_left_split_score < avg_right_split_score:
            right_split_temp.append(node)
        else:
            left_split_temp.append(node)

    right_split += right_split_temp
    left_split += left_split_temp

    # Add character, state that split occurred to already considered mutations
    considered.add((str(character), state))
    G = nx.DiGraph()
    #splitter = str(character) + " " + str(state) + " (" + uniq + ")"
    splitter = root

    # Recursively build left side of network (ie side that did not mutation at the character with the specific state)
    G.add_node(splitter)
    left_subproblems = []
    left_network = None
    if len(left_split) != 0:
        left_root = root_finder(left_split)
        # if left_root not in left_split and left_root in targets:
        # 	left_root = left_root + "_unique"

        left_network, left_subproblems = greedy_build(left_split,
                                                      priors,
                                                      cutoff,
                                                      considered.copy(),
                                                      uniq + "0",
                                                      targets=targets)

        left_nodes = [
            node for node in left_network.nodes()
            if left_network.in_degree(node) == 0
        ]
        dup_dict = {}
        for n in left_network:
            if n in list(G.nodes()) and n != left_root:
                dup_dict[n] = n + "_" + str(
                    hashlib.md5(left_root.encode('utf-8')).hexdigest())
        left_network = nx.relabel_nodes(left_network, dup_dict)
        G = nx.compose(G, left_network)
        if root != left_root:
            G.add_edge(splitter, left_root, weight=0, label="None")

    # Recursively build right side of network
    right_network, right_subproblems = greedy_build(right_split,
                                                    priors,
                                                    cutoff,
                                                    considered.copy(),
                                                    uniq + "1",
                                                    targets=targets)
    right_nodes = [
        node for node in right_network.nodes()
        if right_network.in_degree(node) == 0
    ]
    right_root = root_finder(right_split)

    dup_dict = {}
    for n in right_network:
        if n in list(G.nodes()) and n != right_root:
            dup_dict[n] = n + "_" + str(
                hashlib.md5(right_root.encode('utf-8')).hexdigest())
    for n in dup_dict:
        rename_dict = {n: dup_dict[n]}
        if right_network.out_degree(n) != 0:
            right_network = nx.relabel_nodes(right_network, rename_dict)
        else:
            rename_dict = {n: dup_dict[n]}
            G = nx.relabel_nodes(G, rename_dict)

    G = nx.compose(G, right_network)
    # if right_root not in right_split and right_root in targets:
    # 	right_root = right_root + "_unique"
    #for node in right_nodes:
    if root != right_root:
        if not priors:
            G.add_edge(splitter,
                       right_root,
                       weight=1,
                       label=str(character) + ": 0 -> " + str(state))
        else:
            G.add_edge(splitter,
                       right_root,
                       weight=-np.log(priors[int(character)][state]),
                       label=str(character) + ": 0 -> " + str(state))

    return G, left_subproblems + right_subproblems
示例#51
0
def predict(data_path, predict_path, model_name):
    # Read the parameters of the trained model
    #net, n_features, n_classes = load_model_txt(model_name)

    # Load the trained model
    #trained_net, config = models.get_model_and_config(net)
    #model = trained_net(n_features,
    #                    n_classes,
    #                    *config['extra_args'])
    #model_path = 'models/' + model_name + '/model.pth'
    #model.load_state_dict(torch.load(model_path))
    #print(model)

    # Get the list of files for prediction
    #pred_files = [os.path.join(data_path, line.rstrip()) for line in open(os.path.join(data_path, predict_path))]
    path = "data/Private/RTX/anno"
    parent_path = os.path.split(path)
    bbox_path = parent_path[0]+'/bboxes'
    if not os.path.exists(bbox_path):
        os.makedirs(bbox_path)

    files = glob.glob(os.path.join(path, '*.gpickle'))
    for file in files:

        print(file)
        path, file_name = os.path.split(file)
        print(path)
        print(file_name)

        # Convert the gpickle file to a dgl graph
        dgl_g = graph_utils.convert_gpickle_to_dgl_graph(file)
        # Get the features from the given graph
        nxg = nx.read_gpickle(file)
        features = graph_utils.chris_get_features(nxg)

        '''model.eval()
        with torch.no_grad():
            logits = model(dgl_g, features)
            _, predictions = torch.max(logits, dim=1)
            predictions = predictions.numpy()'''

        # Get positions
        nxg = nx.read_gpickle(file)
        positions = nx.get_node_attributes(nxg, 'pos')
        positions = list(positions.values())

        # Plot graph
        ''''fig2 = plt.figure(dpi=150)
        fig2.clf()
        ax = fig2.subplots()
        inst_predictions = [0] * nxg.number_of_nodes()
        draw(inst_predictions, ax, nxg, positions)'''

        # Plot graph with predictions
        '''fig1 = plt.figure(dpi=150)
        fig1.clf()
        ax = fig1.subplots()
        draw(predictions, ax, nxg, positions)'''

        # Get labels
        labels = nx.get_node_attributes(nxg, 'label')
        labels = np.array(list(labels.values()))

        # Plot annotated graph
        fig2 = plt.figure(dpi=150)
        fig2.clf()
        ax = fig2.subplots()
        draw(labels, ax, nxg, positions)

        # Perform graph morphology closing
        #predictions_alt = predictions
        # predictions_alt = post_processing(nxg, predictions)

        # Extract door nodes
        sub_nxg = instancing(nxg, labels)

        # Separate disjoint graphs (instancing)
        disjoint_sub_graphs = []
        for c in nx.connected_components(sub_nxg):
            disjoint_sub_graphs.append(sub_nxg.subgraph(c))

        clustered_disjoint_sub_graphs = []
        for graph in disjoint_sub_graphs:
            sub_positions = nx.get_node_attributes(graph, 'pos')
            sub_positions = np.array(list(sub_positions.values()))
            clustering = DBSCAN(eps=1100, min_samples=1).fit(sub_positions)
            cluster_labels = clustering.labels_
            graph_keys = list(graph._NODE_OK.nodes)
            for cluster_label in list(set(cluster_labels)):
                indices = []
                for idx, label in enumerate(cluster_labels):
                    if label == cluster_label:
                        indices.append(graph_keys[idx])
                sub_graph = graph.subgraph(indices)
                clustered_disjoint_sub_graphs.append(sub_graph)

        # Remove graphs not meeting conditions
        min_nr_nodes = 8
        selected_graphs = []
        area_list = []
        width_list = []
        height_list = []
        ratio_list = []

        for disjoint_sub_graph in clustered_disjoint_sub_graphs:
            if disjoint_sub_graph.number_of_nodes() > min_nr_nodes:
                selected_graphs.append(disjoint_sub_graph)
                tmp_positions = nx.get_node_attributes(disjoint_sub_graph, 'pos')
                tmp_positions = np.array(list(tmp_positions.values()))
                area, width, height, ratio = bounding_box_params(tmp_positions)
                area_list.append(area)
                width_list.append(width)
                height_list.append(height)
                ratio_list.append(ratio)

        seleted_graphs_joined = nx.Graph()

        for idx, graph in enumerate(selected_graphs):
            nx.set_node_attributes(graph, [], 'instance')
            for node in graph.nodes:
                graph.nodes[node]['instance'] = idx
            seleted_graphs_joined = nx.compose(seleted_graphs_joined, graph)

        inliers = reject_outliers_hardcoded(area_list, width_list, height_list, ratio_list)
        selected_graphs = [selected_graphs[i] for i in inliers]

        print('Numer of doors: %d' % len(selected_graphs))

        seleted_graphs_joined = nx.Graph()

        for idx, graph in enumerate(selected_graphs):
            nx.set_node_attributes(graph, [], 'instance')
            for node in graph.nodes:
                graph.nodes[node]['instance'] = idx
            seleted_graphs_joined = nx.compose(seleted_graphs_joined, graph)

        # Determine bbox
        list_bboxes, list_gen_bboxes = determine_bboxes(selected_graphs)
        print(list_bboxes)
        x_positions, y_positions = zip(*positions)
        x_min = min(x_positions)
        x_max = max(x_positions)
        y_min = min(y_positions)
        y_max = max(y_positions)

        norm_bboxes = normalize_bboxes(list_bboxes, x_min, x_max, y_min, y_max)
        print(norm_bboxes)
        norm_bboxes_inv_y = [[box[0], 1-box[1], box[2], 1-box[3]] for box in norm_bboxes]
        print(norm_bboxes_inv_y)
        fig_norm_boxes = plt.figure(dpi=150)
        fig_norm_boxes.clf()
        ax = fig_norm_boxes.subplots()
        for box in norm_bboxes_inv_y:

            rect = patches.Rectangle((box[2], box[1]), box[0]-box[2], box[3]-box[1], linewidth=1, edgecolor='r', facecolor='none')
            ax.add_patch(rect)

        bboxes_filename = file_name.replace('_w_annotations.gpickle', '_boxes.txt')
        bboxes_image_filename = file_name.replace('_w_annotations.gpickle', '_boxes_image_format.txt')

        with open(os.path.join(bbox_path, bboxes_filename), 'w') as f:
            for item in norm_bboxes:
                f.write("%s\n" % ' '.join(str(v) for v in item))

        with open(os.path.join(bbox_path, bboxes_image_filename), 'w') as f:
            for item in norm_bboxes_inv_y:
                f.write("%s\n" % ' '.join(str(v) for v in item))

        # Plot graph with generalized doors
        '''pos = nx.get_node_attributes(nxg, 'pos')
示例#52
0
 def plan(self,
          component_name,
          start_conf,
          goal_conf,
          obstacle_list=[],
          otherrobot_list=[],
          ext_dist=2,
          rand_rate=70,
          max_iter=1000,
          max_time=15.0,
          smoothing_iterations=17,
          animation=False):
     """
     :return: [path, all_sampled_confs]
     author: weiwei
     date: 20201226
     """
     self.roadmap.clear()
     self.roadmap_start.clear()
     self.roadmap_goal.clear()
     self.start_conf = start_conf
     self.goal_conf = goal_conf
     # check seed_jnt_values and end_conf
     if self._is_collided(component_name, start_conf, obstacle_list, otherrobot_list):
         print("The start robot_s configuration is in collision!")
         return None
     if self._is_collided(component_name, goal_conf, obstacle_list, otherrobot_list):
         print("The goal robot_s configuration is in collision!")
         return None
     if self._goal_test(conf=start_conf, goal_conf=goal_conf, threshold=ext_dist):
         return [[start_conf, goal_conf], None]
     self.roadmap_start.add_node('start', conf=start_conf, cost=0)
     self.roadmap_goal.add_node('goal', conf=goal_conf, cost=0)
     tic = time.time()
     tree_a = self.roadmap_start
     tree_b = self.roadmap_goal
     tree_a_goal_conf = self.roadmap_goal.nodes['goal']['conf']
     tree_b_goal_conf = self.roadmap_start.nodes['start']['conf']
     for _ in range(max_iter):
         toc = time.time()
         if max_time > 0.0:
             if toc - tic > max_time:
                 print("Too much motion time! Failed to find a path.")
                 return None
         # Random Sampling
         rand_conf = self._sample_conf(component_name=component_name,
                                       rand_rate=100,
                                       default_conf=None)
         last_nid = self._extend_roadmap(component_name=component_name,
                                         roadmap=tree_a,
                                         conf=rand_conf,
                                         ext_dist=ext_dist,
                                         goal_conf=tree_a_goal_conf,
                                         obstacle_list=obstacle_list,
                                         otherrobot_list=otherrobot_list,
                                         animation=animation)
         if last_nid != -1:  # not trapped:
             goal_nid = last_nid
             tree_b_goal_conf = tree_a.nodes[goal_nid]['conf']
             last_nid = self._extend_roadmap(component_name=component_name,
                                             roadmap=tree_b,
                                             conf=tree_a.nodes[last_nid]['conf'],
                                             ext_dist=ext_dist,
                                             goal_conf=tree_b_goal_conf,
                                             obstacle_list=obstacle_list,
                                             otherrobot_list=otherrobot_list,
                                             animation=animation)
             if last_nid == 'connection':
                 self.roadmap = nx.compose(tree_a, tree_b)
                 self.roadmap.add_edge(last_nid, goal_nid)
                 break
             elif last_nid != -1:
                 goal_nid = last_nid
                 tree_a_goal_conf = tree_b.nodes[goal_nid]['conf']
         if tree_a.number_of_nodes() > tree_b.number_of_nodes():
             tree_a, tree_b = tree_b, tree_a
             tree_a_goal_conf, tree_b_goal_conf = tree_b_goal_conf, tree_a_goal_conf
     else:
         print("Reach to maximum iteration! Failed to find a path.")
         return None
     path = self._path_from_roadmap()
     smoothed_path = self._smooth_path(component_name=component_name,
                                       path=path,
                                       obstacle_list=obstacle_list,
                                       otherrobot_list=otherrobot_list,
                                       granularity=ext_dist,
                                       iterations=smoothing_iterations,
                                       animation=animation)
     return smoothed_path
示例#53
0
def wattsstrogatz_graph(args):
    '''Decent small-world model with low diameter and high clustering.
    https://en.wikipedia.org/wiki/Watts%E2%80%93Strogatz_model
    '''
    # Unpack args
    n = args[0]
    k = args[1]
    diameter_goal = args[2]
    rng = args[3]

    # Seed rng
    random.seed(rng)

    g = nx.Graph()
    # Keep track of what edges we will randomly choose from.
    # Start with a complete list of all possible edges.
    log.info("Watts-Strogatz generation requires a complete graph.")
    remaining_edges = import_graph('complete', [n])

    # Generate a ring graph with n nodes, k neighbors
    log.info("Watts-Strogatz generation requires a ring graph.")
    ring_graph = import_graph('ring', [n, k])

    # Remove the ring graph edges from our complete set of remaining edges to choose from
    remaining_edges.remove_edges_from(ring_graph.edges())
    remaining_edges = np.asarray(remaining_edges.edges()).tolist()

    # Clone the ring_graph to a separate, independent graph g
    g = nx.compose(g, ring_graph)
    ring_graph = np.asarray(ring_graph.edges()).tolist()

    # Initializations for edge count statistics
    edges_rewired_count = 0
    edges_total = len(ring_graph)

    current_diameter = nx.diameter(g)
    edges_last_recalculation = 0
    # While the diameter is bigger than our goal...
    while current_diameter > diameter_goal:
        # Choose one random edge from our ring graph to replace with one random edge from our list of remaining edges
        choice = random.choice(remaining_edges)
        edge_removed = random.choice(ring_graph)

        # Replace the edge in our graph g
        g.remove_edge(edge_removed[0], edge_removed[1])
        g.add_edge(choice[0], choice[1])

        # Remove the randomly chosen edges from our lists so we don't pick them again
        ring_graph.remove(edge_removed)
        remaining_edges.remove(choice)

        # Increment edges-rewired count
        edges_rewired_count += 1

        # Recalculate diameter only if % edges rewired changes by at least 1%.
        # TODO(jordan): Dynamically determine this value by the last calculated diameter?
        if ((edges_rewired_count - edges_last_recalculation) / edges_total >=
                0.01):
            current_diameter = nx.diameter(g)
            edges_last_recalculation = edges_rewired_count

            log.debug(
                'Watts-Strogatz Graph Generation: %.2f%% (%d of %d: diameter = %d)'
                % (100 * edges_rewired_count / edges_total,
                   edges_rewired_count, edges_total, current_diameter))

    # Log statistics (how many edges needed to be rewired for this diameter, mCC)
    log.info(
        'Watts-Strogatz Graph Generation: Took %.2f%% (%d of %d) of edges being rewired to reach a diameter of %d.'
        % (100 * edges_rewired_count / edges_total, edges_rewired_count,
           edges_total, diameter_goal))
    log.info(
        'Watts-Strogatz Graph Generation: Mean clustering coefficient: %.4f.' %
        (nx.average_clustering(g)))

    return (g)
示例#54
0
def rollingCPM(dynNetSN, k=3, runningTime=False):

    #print("computing PALLA with k: "+str(k))
    DynCom = dynamicCommunitiesSN()
    old_communities = None
    old_graph = nx.Graph()
    lastcid = 0
    tabDurations = []
    graphs = dynNetSN.snapshots()

    start_time = time.time()

    for (date, graph) in graphs.items():
        #print("--- t:"+str(date))
        start = timer()
        communitiesAtT = list(
            _get_percolated_cliques(graph, k)
        )  #get the percolated cliques (communities) as a list of set of nodes
        for c in communitiesAtT:
            DynCom.addCommunity(date, c)

        if old_communities == None:  #if first snapshot
            old_graph = graph
            dateOld = date
            old_communities = communitiesAtT

        else:
            #communities = {res[idc-lastcid]: idc for idc in range(lastcid, lastcid+len(res))} #associate new IDs to com
            if len(communitiesAtT) > 0:  #if there is at least one community
                union_graph = nx.compose(
                    old_graph, graph
                )  #create the union graph of the current and the previous
                communities_union = list(
                    _get_percolated_cliques(
                        union_graph,
                        k))  #get the communities of the union graph
                #communities_union = {res2[idc-lastcid]: idc for idc in range(lastcid, lastcid+len(res2))} #assign new IDs to coms of union graph

                #jaccardBeforeAndUnion = _jaccard_similarity(old_communities, communities_union,threashold=0.1) #we only care if the value is above 0
                #jaccardUnionAndAfter = _jaccard_similarity(communitiesAtT,communities_union,threashold=0.1) #we only care if the value is above 0
                jaccardBeforeAndUnion = _included(
                    old_communities,
                    communities_union)  #we only care if the value is above 0
                jaccardUnionAndAfter = _included(
                    communitiesAtT,
                    communities_union)  #we only care if the value is above 0

                for c in jaccardBeforeAndUnion:  #for each community in the union graph
                    matched = []
                    born = []
                    killed = []

                    allJaccards = set()
                    for oldC in jaccardBeforeAndUnion[c]:
                        for newC in jaccardUnionAndAfter[c]:
                            allJaccards.add(
                                ((oldC, newC), _singleJaccard(oldC, newC))
                            )  #compute jaccard between candidates before and after
                    allJaccards = sorted(allJaccards,
                                         key=itemgetter(1),
                                         reverse=True)
                    sortedMatches = [k[0] for k in allJaccards]

                    oldCToMatch = dict(
                        jaccardBeforeAndUnion[c])  #get all coms before
                    newCToMatch = dict(
                        jaccardUnionAndAfter[c])  #get all new coms
                    while len(
                            sortedMatches
                    ) > 0:  #as long as there are couples of unmatched communities
                        matchedKeys = sortedMatches[
                            0]  #pair of communities of highest jaccard
                        matched.append(matchedKeys)  #this pair will be matched

                        del oldCToMatch[matchedKeys[
                            0]]  #delete chosen com from possible to match
                        del newCToMatch[matchedKeys[1]]
                        sortedMatches = [
                            k for k in sortedMatches
                            if len(set(matchedKeys) & set(k)) == 0
                        ]  #keep only pairs of unmatched communities

                    if len(oldCToMatch) > 0:
                        killed.append(list(oldCToMatch.keys())[0])
                    if len(newCToMatch) > 0:
                        born.append(list(newCToMatch.keys())[0])

                    #print("checking",matched,killed,born,jaccardUnionAndAfter[c])
                    for aMatch in matched:
                        #print("check continue ",DynCom.getID(dateOld,aMatch[0]),DynCom.getID(date,aMatch[1]))
                        DynCom.addEvent(
                            (dateOld, DynCom.getID(dateOld, aMatch[0])),
                            (date, DynCom.getID(date, aMatch[1])), dateOld,
                            date, "continue")

                    for kil in killed:  #these are actual merge (unmatched communities are "merged" to new ones)
                        for com in jaccardUnionAndAfter[c]:
                            #print("merge",kil,DynCom.getID(dateOld,kil),"=>",com,DynCom.getID(date,com))
                            #print("because",c)
                            #print("oups",jaccardBeforeAndUnion)
                            DynCom.addEvent(
                                (dateOld, DynCom.getID(dateOld, kil)),
                                (date, DynCom.getID(date, com)), dateOld, date,
                                "merged")

                    for b in born:  #these are actual merge (unmatched communities are "merged" to new ones)
                        for com in jaccardBeforeAndUnion[c]:
                            DynCom.addEvent(
                                (dateOld, DynCom.getID(dateOld, com)),
                                (date, DynCom.getID(date, b)), dateOld, date,
                                "split")

            old_graph = graph
            dateOld = date
            old_communities = communitiesAtT

    duration = (time.time() - start_time)
    DynCom.relabelComsFromContinuousEvents()

    if runningTime:
        return duration
    return (DynCom)
示例#55
0
def division(path="./"):
    g1 = networkx.DiGraph()  # create direct graph
    g2 = networkx.DiGraph()  # create direct graph
    g3 = networkx.DiGraph()  # create direct graph
    files_to_parse = graph.get_python_files(path)  # only python files
    allFunctions = []
    fun_dic = {}
    tmpFunctions = []
    # creating nodes
    for file in files_to_parse:
        allFunctions += graph.get_functions_names_from_file(path + "/" + file)
        tmpFunctions = graph.get_functions_names_from_file(path + "/" + file)
    #    for fun in tmpFunctions:
    #       g.add_node(fun, waga=graph.count_method_size(path + "/" + file, fun))
    # creating edges
    i = 1
    for file in files_to_parse:
        for fun in allFunctions:
            if fun != "":

                for otherFun in allFunctions:
                    methodCount = graph.count_method(path + "/" + file, fun,
                                                     otherFun)
                    if (methodCount > 0):
                        #g.add_edge(fun, otherFun, weight=methodCount)
                        if otherFun != "" and otherFun != fun:
                            dic = {i: (fun, otherFun, {"nr": methodCount})}
                            fun_dic.update(dic)
                            i = i + 1
    print(fun_dic.items())
    i = 1
    while fun_dic:
        x, f1, f2 = get_max(fun_dic)
        print(x, f1, f2)
        if i == 1:
            g1.add_node(f1)
            g1.add_node(f2)
            g1.add_edge(f1, f2, weight=x)
        if i == 2:
            g2.add_node(f1)
            g2.add_node(f2)
            g2.add_edge(f1, f2, weight=x)
        if i == 3:
            g3.add_node(f1)
            g3.add_node(f2)
            g3.add_edge(f1, f2, weight=x)
        i = i + 1
        if i == 4:
            i = 1
    ##  labels copied from other graph
    # matplotlib.pyplot.figure()
    pos1 = networkx.spring_layout(g1, center=[2, 1])
    pos2 = networkx.spring_layout(g2, center=[3, 2])
    pos3 = networkx.spring_layout(g3, center=[4, 3])
    color_map1 = []
    color_map2 = []
    color_map3 = []
    for node in g1:
        color_map1.append("green")
    #networkx.draw(g1, pos1, node_color=color_map1, with_labels=True, font_weight='bold')
    for node in g2:
        color_map2.append("red")
    #networkx.draw(g2, pos2, node_color=color_map2, with_labels=True, font_weight='bold')
    for node in g2:
        color_map3.append("yellow")
    #networkx.draw(g3, pos3, node_color=color_map3, with_labels=True, font_weight='bold')

    g4 = networkx.compose(g1, g2)
    g5 = networkx.compose(g4, g3)
    pos5 = networkx.spring_layout(g5, center=[1, 1])
    networkx.draw(g5, with_labels=True, center=[2, 1], weight="bold")
    pos_attr1 = {}
    pos_attr2 = {}
    pos_attr3 = {}
    for node, coords in pos1.items():
        pos_attr1[node] = (coords[0], coords[1] + 00.07)
    for node, coords in pos2.items():
        pos_attr2[node] = (coords[0], coords[1] + 00.07)
    for node, coords in pos3.items():
        pos_attr3[node] = (coords[0], coords[1] + 00.07)

    node_attr1 = networkx.get_node_attributes(g1, 'waga')
    node_attr2 = networkx.get_node_attributes(g2, 'waga')
    node_attr3 = networkx.get_node_attributes(g3, 'waga')
    custom_node_attrs1 = {}
    custom_node_attrs2 = {}
    custom_node_attrs3 = {}
    for node, attr in node_attr1.items():
        custom_node_attrs1[node] = str(attr)
    for node, attr in node_attr2.items():
        custom_node_attrs2[node] = str(attr)
    for node, attr in node_attr3.items():
        custom_node_attrs3[node] = str(attr)

    edge_labels1 = dict([((u, v), d['weight'])
                         for u, v, d in g1.edges(data=True)])
    edge_labels2 = dict([((u, v), d['weight'])
                         for u, v, d in g2.edges(data=True)])
    edge_labels3 = dict([((u, v), d['weight'])
                         for u, v, d in g3.edges(data=True)])

    networkx.draw_networkx_edge_labels(g1, pos1, edge_labels=edge_labels1)
    networkx.draw_networkx_edge_labels(g2, pos2, edge_labels=edge_labels2)
    networkx.draw_networkx_edge_labels(g3, pos3, edge_labels=edge_labels3)

    networkx.draw_networkx_labels(g1, pos_attr1, labels=custom_node_attrs1)
    networkx.draw_networkx_labels(g2, pos_attr2, labels=custom_node_attrs2)
    networkx.draw_networkx_labels(g3, pos_attr3, labels=custom_node_attrs3)


#lf = graph.loadFolder()
#division(lf)
#matplotlib.pyplot.show()
示例#56
0
    def _load_graph(self, zeep_relations, zeep_objects, add2self=True):
        new_graph = ResnetGraph()
        # loading entities and their properties
        id2entity = self._zeep2psobj(zeep_objects)
        new_graph.add_nodes_from([(k, v.items())
                                  for k, v in id2entity.items()])

        if type(zeep_relations) != type(None):
            new_relations = dict()
            for rel in zeep_relations.Objects.ObjectRef:
                ps_rel = PSRelation.from_zeep(rel)
                rel_id = rel['Id']
                new_relations[rel_id] = ps_rel

            # loading relations and their properties
            for prop in zeep_relations.Properties.ObjectProperty:
                rel_id = prop['ObjId']
                prop_id = prop['PropId']
                prop_set_id = prop['PropSet']
                prop_name = prop['PropName']
                prop_display_name = prop['PropDisplayName']
                values = prop['PropValues']['string']

                if not self.IdToPropType[prop_id]['IsMultiple']:
                    new_relations[rel_id][prop_id] = values
                    new_relations[rel_id][prop_name] = values
                    new_relations[rel_id][prop_display_name] = values
                elif prop_set_id in new_relations[rel_id].PropSetToProps.keys(
                ):
                    new_relations[rel_id].PropSetToProps[prop_set_id][
                        prop_id] = values
                    new_relations[rel_id].PropSetToProps[prop_set_id][
                        prop_name] = values
                    new_relations[rel_id].PropSetToProps[prop_set_id][
                        prop_display_name] = values
                else:
                    new_relations[rel_id].PropSetToProps[prop_set_id] = {
                        prop_id: values
                    }
                    new_relations[rel_id].PropSetToProps[prop_set_id] = {
                        prop_name: values
                    }
                    new_relations[rel_id].PropSetToProps[prop_set_id] = {
                        prop_display_name: values
                    }

            # loading connected entities from Links
            for l in zeep_relations.Links.Link:
                rel_id = l['RelationId']
                direction = l['Dir']
                link = (l['EntityId'], direction, l['Effect'])

                if direction == 1:
                    if len(new_relations[rel_id].Nodes) < 2:
                        new_relations[rel_id].Nodes['Targets'] = [link]
                    else:
                        new_relations[rel_id].Nodes['Targets'].append(link)
                else:
                    if len(new_relations[rel_id].Nodes) < 1:
                        new_relations[rel_id].Nodes['Regulators'] = [link]
                    else:
                        new_relations[rel_id].Nodes['Regulators'].append(link)

            try:
                new_relations[rel_id].Nodes['Targets'].sort(key=self.link_id)
            except KeyError:
                pass

            try:
                new_relations[rel_id].Nodes['Regulators'].sort(
                    key=self.link_id)
            except KeyError:
                pass

            for rel in new_relations.values():
                regulator_target = rel.get_regulators_targets()
                for pair in regulator_target:
                    try:
                        ref_count = rel['RelationNumberOfReferences'][0]
                    except KeyError:
                        ref_count = 0
                    new_graph.add_edge(pair[0],
                                       pair[1],
                                       relation=rel,
                                       weight=float(ref_count))
                    # print (newGraph.get_edge_data(pair[0], pair[1]))

            self.IDtoRelation.update(
                new_relations
            )  # must be kept since Resnet relation may not be binary

        if add2self:
            self.Graph = nx.compose(self.Graph, new_graph)

        return new_graph
示例#57
0
    nx.draw_networkx_edges(subgraph, _pos, alpha=0.3, edge_color="k")
    node_color_list_c = [nc for _, nc in subgraph.nodes(data="node_color")]
    nx.draw_networkx_nodes(subgraph, _pos, node_color=node_color_list_c)
    nx.draw_networkx_labels(subgraph, _pos, font_size=14)
    plt.axis("off")
    plt.title("One of the subgraphs.")
    plt.show()

###############################################################################
# Put the graph back from the list of subgraphs
# ---------------------------------------------
#
G_ex_r = nx.DiGraph()
# Composing all subgraphs.
for subgraph in subgraphs_of_G_ex:
    G_ex_r = nx.compose(G_ex_r, subgraph)
# Adding the previously stored edges.
G_ex_r.add_edges_from(removed_edges.edges())

###############################################################################
# Check that the original graph and the reconstructed graphs are isomorphic.
# --------------------------------------------------------------------------
#
assert nx.is_isomorphic(G_ex, G_ex_r)

###############################################################################
# Plot the reconstructed graph.
# -----------------------------
#
node_color_list = [nc for _, nc in G_ex_r.nodes(data="node_color")]
pos = nx.spectral_layout(G_ex_r)
示例#58
0
from tool.util import *
import networkx as nx
import random

keyword_list = get_key_list2()
rg = nx.Graph()
r_dirr = r"D:\semantic analysis\新结果\去虚词去单字\合成共现网络\常用词年份//"
for key_word in keyword_list:
    dirr = r"D:\semantic analysis\新结果\去虚词去单字\总网络\{0}\p//".format(key_word)

    print(key_word)
    g_list = get_file_list(dirr, ".pkl")
    os.chdir(dirr)
    for i, g in enumerate(g_list):
        # if nn < len(g_list)-1:
        #     rg = nx.compose(rg, get_nw(g))
        #     nn += 1
        #     print(g)
        # else:
        #     save_nw(rg, r_dirr+g)
        #     nn = 0
        #     rg = get_nw(g_list[i])
        #     print("save")
        rg = nx.compose(rg, get_nw(g))
        print(g)

save_nw(rg, r_dirr + "总网络.pkl")
print("save")
# print(len(get_nw(g).nodes()))
示例#59
0
@description To read all graphml files from the current directory and stack them into one single graphml files

"""

import glob
import os
import networkx as nx
import sys

cwd = os.getcwd()
print('Your current directory is {}'.format(cwd))
graphml = glob.glob(cwd+"/*.graphml")
print('You have {} graphml files.'.format(len(graphml)))

# Initialise a directed graph and compose it
# with graph files read in the directory

print('\nStacking or composing graphs begins.....')
G = nx.DiGraph()
for item in graphml:
	try:
		sub_g = nx.read_graphml(item)
		G = nx.compose(G, sub_g)
	except TypeError:
		sys.stderr.write("Erroneous graphml\n")
		sys.exit(1)

nx.write_graphml(G, 'all_nodes.graphml')
print('Stacking is completed!')

示例#60
0
def create_diffusion_graph(twitter_corpus_file, diffusion_graph_file):

    diffusion_graph_dir = '/'.join(diffusion_graph_file.split('/')[:-1]) + '/'

    #initialize graph
    G = nx.DiGraph()

    for v in institutions['URL'].tolist():
        G.add_edge(v, graph_nodes['institution'])

    for v in repositories['URL'].tolist():
        G.add_edge(v, graph_nodes['repository'])

    G.add_edge(graph_nodes['institution'], graph_nodes['source'])
    G.add_edge(graph_nodes['repository'], graph_nodes['source'])

    epoch = 0
    frontier = []
    connected_components = 0
    last_pass = False
    while True:

        #expand graph
        if not os.path.exists(diffusion_graph_dir + 'epoch_' + str(epoch) +
                              '.tsv'):
            graph_epoch_n(frontier, epoch, last_pass, twitter_corpus_file,
                          diffusion_graph_dir)

        df = pd.read_csv(diffusion_graph_dir + 'epoch_' + str(epoch) + '.tsv',
                         sep='\t').dropna()
        G = nx.compose(
            G,
            nx.from_pandas_edgelist(df,
                                    source='source_url',
                                    target='target_url',
                                    create_using=nx.DiGraph()))
        frontier = [x for x in G.nodes() if G.out_degree(x) == 0]

        print('Epoch:', epoch)
        print('Connected Components:',
              nx.number_connected_components(G.to_undirected()))
        print('Frontier Size:', len(frontier))

        if last_pass:
            break

        #last pass condition
        if epoch != 0 and (connected_components -
                           nx.number_connected_components(G.to_undirected())
                           ) / connected_components < components_ratio:
            last_pass = True
        connected_components = nx.number_connected_components(
            G.to_undirected())
        epoch += 1

    #add root node
    df = pd.read_csv(diffusion_graph_dir + 'epoch_0.tsv', sep='\t').dropna()
    df['social'] = project_url + '#twitter'
    G = nx.compose(
        G,
        nx.from_pandas_edgelist(df,
                                source='social',
                                target='source_url',
                                create_using=nx.DiGraph()))

    write_graph(G, diffusion_graph_file)