def fix_connectivity_of_network(initial_g, pos, tx, threshold=80.0):
    g = initial_g
    x = [(len(c), c) for c in nx.connected_components(g)]
    maximum = max(c[0] for c in x)
    if maximum < len(pos)*threshold/100:
        return False
    while len(x) > 1:
        x.sort(lambda a, b: -1 if a[0] < b[0] else 1)
        sizes = [c[0] for c in x]
        # print len(x), " components with sizes: ", sizes
        idx_c = 0
        c = x[0]
        # print "\thola: ", c[1]
        ttt = [ find_closest(pos, x, idx, idx_c) for idx in c[1] ]
        ttt.sort(lambda a, b: -1 if a[0] < b[0] else 1)
        pair = ttt[0]
        # print "\t\tminimum distance : ", pair[0] - tx, ", to node:", pair[1]
        toward = pair[1]
        x0 = pos[pair[2]][0]
        y0 = pos[pair[2]][1]
        x1 = pos[toward][0]
        y1 = pos[toward][1]
        d = pair[0] - tx + 1
        px = (x1 - x0)/d
        py = (y1 - y0)/d
        # move all nodes in this component
        for idx in c[1]:
            pos[idx][0] += px
            pos[idx][1] += py
        g = build_graph(pos, tx)
        x = [(len(c), c) for c in nx.connected_components(g)]
    return True
示例#2
0
文件: gn.py 项目: liupenggl/hybrid
def runGirvanNewman(G, Orig_deg, m_):
    #let's find the best split of the graph    
    BestQ = 0.0
    Q = 0.0
    Bestcomps = list(nx.connected_components(G))
    while True:    
        CmtyGirvanNewmanStep(G)
        Q = _GirvanNewmanGetModularity(G, Orig_deg, m_);
        #print "current modularity: %f" % Q
        if Q > BestQ:
            BestQ = Q
            Bestcomps = list(nx.connected_components(G))    #Best Split
            #print "comps:" 
            #print Bestcomps
        if G.number_of_edges() == 0:
            #print "at last use this break"
            break
    """
    if BestQ > 0.0:
        print "Best Q: %f" % BestQ
        print Bestcomps
    else:
        print "Best Q: %f" % BestQ
    """
    return Bestcomps
示例#3
0
def curation(strange,Z,CV,FV,EV):
	v_,ee_ = TRANS(strange)
	e_ = set(CAT(ee_))
	vv = set(CAT([EV[e] for e in e_]))
	VIEW(STRUCT(AA(POLYLINE)([[EV[e1],EV[e2]] for e1,e2 in ee_])))
	import networkx as nx
	G=nx.Graph()
	G.add_nodes_from(e_)
	G.add_edges_from(ee_)
	nx.connected_components(G)
	edges2remove = []; newEdges = []
	for edges in nx.connected_components(G):
		print edges
		edict = defaultdict(list)
		verts = CAT([EV[e] for e in edges])
		for v in verts: 
			edict[v] += [1]
		newEdge = [v for v in edict.keys() if len(edict[v])==1]
		edges2remove += list(edges)
		newEdges += [newEdge]
	v = 0; 
	W = [None for k in range(len(Z)-len(v_))]
	newindex = [None for k in range(len(Z))]
	for k,vert in enumerate(Z):
		if k not in v_:
			W[v] =  vert
			newindex[k]=v
			v += 1
	CW = [list(set([newindex[v] for v in cv if newindex[v]!=None])) for cv in CV]
	FW = [[newindex[v] for v in fv if newindex[v]!=None] for fv in FV if len(set(fv))>1]
	print "FV = ",FV
	EW = [[newindex[v] for v in ev if newindex[v]!=None] for e,ev in enumerate(EV) if e not in edges2remove]
	EW = [[w1,w2] for w1,w2 in EW if w1!=w2]  + [[newindex[v1],newindex[v2]] for [v1,v2] in newEdges]
	return W,CW,FW,EW
示例#4
0
def runGirvanNewman(G, Orig_deg, m_):
    #let's find the best split of the graph
    BestQ = 0.0
    Q = 0.0
    print "runGirvanNewman"
    while True:    
        CmtyGirvanNewmanStep(G)
        Q = _GirvanNewmanGetModularity(G, Orig_deg, m_);
        print "current modularity: %f" % Q
        if len(nx.connected_components(G)) >= 10 or Q >= 0.5:
            break;
        if Q > BestQ:
            BestQ = Q
            Bestcomps = nx.connected_components(G)    #Best Split
 #           print "comps:"
 #           print Bestcomps
        if G.number_of_edges() == 0:
            break
    if BestQ > 0.0:
#        print "Best Q: %f" % BestQ
        result_data = {};
        result_data['num_clusters'] = len(Bestcomps)
        result_data['list'] = Bestcomps
        return result_data
    else:
#        print "Best Q: %f" % BestQ
        result_data = {};
        result_data['num_clusters'] = len(nx.connected_components(G))
        result_data['list'] = nx.connected_components(G)
        return result_data
def compareGraphs(g1, g2):
    
    """#Compares the quantitative properties of two graph. So I can check the coarse graining. """

    
    #Nodes and edges
    print 'Graph1: #(Nodes, Edges) = (' + str(len(g1.nodes())) + ', ' + str(len(g1.edges())) + ')'
    print 'Graph2: #(Nodes, Edges) = (' + str(len(g2.nodes())) + ', ' + str(len(g2.edges())) + ')'

    #Connected Components
    #print '\n#CCs for graph 1: ' + str(len(nx.connected_components(g1)))
    #print '#CCs for graph 2: ' + str(len(nx.connected_components(g2)))
    
    plt.hist([len(i) for i in nx.connected_components(g1)])
    plt.hist([len(i) for i in nx.connected_components(g2)])
    plt.title('Cluster Size')
    plt.xlabel('Cluster Size')
    plt.ylabel('#Cluster')
    show()
    
    #Degree Distribution
    plt.hist(nx.degree_histogram(g1))
    plt.hist(nx.degree_histogram(g2))
    plt.title('Degree Distribution' )
    plt.xlabel('Degree')
    plt.ylabel('#Nodes')
    show()
    
    #Betweeness --- this is by far the most compuationally demanding.
    plt.hist(nx.betweenness_centrality(g1, normalized = False).values())
    plt.hist(nx.betweenness_centrality(g2, normalized = False).values())
    plt.title('Distribution of Betweenness' )
    plt.xlabel('Betweenness')
    plt.ylabel('#Nodes')
    show()        
示例#6
0
def betweenness_fracture(infile, outfile, fraction, recalculate = False):
    """
    Removes given fraction of nodes from infile network in reverse order of 
    betweenness centrality (with or without recalculation of centrality values 
    after each node removal) and saves the network in outfile.
    """

    g = networkx.read_gml(infile)
    m = networkx.betweenness_centrality(g)
    l = sorted(m.items(), key = operator.itemgetter(1), reverse = True)
    largest_component = max(networkx.connected_components(g), key = len)
    n = len(g.nodes())
    for i in range(1, n):
        g.remove_node(l.pop(0)[0])
        if recalculate:
            m = networkx.betweenness_centrality(g)
            l = sorted(m.items(), key = operator.itemgetter(1), 
                       reverse = True)
        largest_component = max(networkx.connected_components(g), key = len)
        if i * 1. / n >= fraction:
            break
    components = networkx.connected_components(g)
    component_id = 1
    for component in components:
        for node in component:
            g.node[node]["component"] = component_id
        component_id += 1
    networkx.write_gml(g, outfile)
示例#7
0
def closeness(infile, recalculate = False):
    """
    Performs robustness analysis based on closeness centrality,  
    on the network specified by infile using sequential (recalculate = True) 
    or simultaneous (recalculate = False) approach. Returns a list 
    with fraction of nodes removed, a list with the corresponding sizes of 
    the largest component of the network, and the overall vulnerability 
    of the network.
    """

    g = networkx.read_gml(infile)
    m = networkx.closeness_centrality(g)
    l = sorted(m.items(), key = operator.itemgetter(1), reverse = True)
    x = []
    y = []
    largest_component = max(networkx.connected_components(g), key = len)
    n = len(g.nodes())
    x.append(0)
    y.append(len(largest_component) * 1. / n)
    R = 0.0
    for i in range(1, n):
        g.remove_node(l.pop(0)[0])
        if recalculate:
            m = networkx.closeness_centrality(g)
            l = sorted(m.items(), key = operator.itemgetter(1), 
                       reverse = True)
        largest_component = max(networkx.connected_components(g), key = len)
        x.append(i * 1. / n)
        R += len(largest_component) * 1. / n
        y.append(len(largest_component) * 1. / n)
    return x, y, 0.5 - R / n
示例#8
0
    def _reduce_graph(self, graph, min0list):
        """determine how much of the graph to include in the disconnectivity graph
        """
        used_nodes = []
        # make sure we include the subgraph containing min0
        if len(min0list) > 0:
            for min0 in min0list:
                nodes = nx.node_connected_component(graph, min0)
                if len(nodes) > 2:
                    used_nodes += nodes
                else:
                    print("dgraph: too few nodes connected to", min0)  
        if len(used_nodes) == 0: 
            # use the biggest connected cluster
            cc = sorted(nx.connected_components(graph), key=len, reverse=True)
            used_nodes += cc[0]  # list is ordered by size of cluster

        if self.subgraph_size is not None:
            node_lists = nx.connected_components(graph)
            for nodes in node_lists:
                if len(nodes) >= self.subgraph_size:
                    used_nodes += nodes

        newgraph = graph.subgraph(used_nodes).copy()
        return newgraph
def connect_simple_graph(G):
    """check if simple graph G is disconnected and connect if necessary"""

    cc = list(nx.connected_components(G))  # cc returns the connected components of G as lists cc[0], cc[1], etc
    component_count = len(cc)
    while component_count > 1:  # while G is not connected, reduce number of components

        # pick a random node in the largest component cc[0] that has degree > 1
        node1 = rnd.choice(cc[0])
        while G.degree(node1) == 1:
            node1 = rnd.choice(cc[0])

            # pick a node in another component
        node2 = rnd.choice(cc[1])

        # pick neighbors of node1 and node2
        nbr1 = rnd.choice(G.neighbors(node1))
        nbr2 = rnd.choice(G.neighbors(node2))

        # swap connections between node1,nbr1 with connections between node2,nbr2
        #  to attempt to connect the two components
        G.remove_edges_from([(node1, nbr1), (node2, nbr2)])
        G.add_edges_from([(node1, node2), (nbr1, nbr2)])

        cc = list(nx.connected_components(G))
        component_count = len(cc)
def connect_module_graph(G, outdegree_list):
    """Connect disconnected modules. Note: This function cannot be used to 
    connect the entire modular graph."""
    cc_tot = list(nx.connected_components(G))  # cc returns the connected components of G as lists cc[0], cc[1], etc.
    isolated_comp, outedge_comp, isolated_comp_count, outedge_comp_count = partition_network_components(
        cc_tot, outdegree_list
    )

    while isolated_comp_count > 0:  # while G is not connected, reduce number of components
        # pick a random node in the largest component cc[0] that has degree > 1
        node1 = rnd.choice(isolated_comp[0])
        # pick a node in another component whose degree >1
        node2 = rnd.choice(outedge_comp[rnd.choice([x for x in xrange(outedge_comp_count)])])
        while G.degree(node2) <= 1:
            node2 = rnd.choice(outedge_comp[rnd.choice([x for x in xrange(outedge_comp_count)])])

        # pick neighbors of node1 and node2
        nbr1 = rnd.choice(G.neighbors(node1))
        nbr2 = rnd.choice(G.neighbors(node2))

        # swap connections between node1,nbr1 with connections between node2,nbr2
        #  to attempt to connect the two components
        G.remove_edges_from([(node1, nbr1), (node2, nbr2)])
        G.add_edges_from([(node1, node2), (nbr1, nbr2)])

        cc_tot = list(nx.connected_components(G))
        isolated_comp, outedge_comp, isolated_comp_count, outedge_comp_count = partition_network_components(
            cc_tot, outdegree_list
        )
示例#11
0
	def communities(self, nCommunities, weight=None):
		"""
		Compute communities.

		Parameters
		----------
		nCommunities - number of communities to be returned.
			This is added to simplify the process, the original GN algorithm doesn't 
			need predecided number of communities. 
			Other measures like a threshold on betweenness centrality can be used instead.
		
		weight (string) - If None, all edge weights are considered equal. 
			Otherwise holds the name of the edge attribute used as weight. 


		Returns
		--------
		A list of communities where each community is a list of the nodes in the community.	 
		"""
		gr = self.g
		n = nx.number_connected_components(gr)
		components = nx.connected_components(gr)

		while (n < nCommunities):
			gr = self.communitySplits(gr, weight=weight)
			components = nx.connected_components(gr)
			n = nx.number_connected_components(gr)
			if gr.number_of_edges() == 0:
				break
		return components
def select_clusters_for_length(selected_protein_pairs, genome_graph, genome_graph_query):
	# defines the shorteste length of a synteny otholog cluster
	pairs={}
	for n in selected_protein_pairs:	
		pairs[n[0]]=n[1]


	valid_graph=nx.Graph()
	edges=list()
	for edge in genome_graph.edges():


		if edge[0] in pairs.keys() and edge[1] in pairs.keys():
			valid_graph.add_edge(edge[0],edge[1])

	for cluster in nx.connected_components(valid_graph):
		if len(cluster)>4:
			query_nodes={}
			valid_query_graph=nx.Graph()
			for node in cluster:
				
				query_nodes[pairs[node]]=node


			for query_edge in genome_graph_query.edges():
				if query_edge[0] in query_nodes.keys() and query_edge[1] in query_nodes.keys():
					
					valid_query_graph.add_edge(query_edge[0],query_edge[1])
			
			for query_cluster in nx.connected_components(valid_query_graph):
				if len(query_cluster)>4:
					for query_node in sorted(query_cluster):
						print query_nodes[query_node]+"\t"+query_node
def eigenvector(g, recalculate=False):
    """
    Performs robustness analysis based on eigenvector centrality,  
    on the network specified by infile using sequential (recalculate = True) 
    or simultaneous (recalculate = False) approach. Returns a list 
    with fraction of nodes removed, a list with the corresponding sizes of 
    the largest component of the network, and the overall vulnerability 
    of the network.
    """

    m = networkx.eigenvector_centrality(g, max_iter=5000)
    l = sorted(m.items(), key=operator.itemgetter(1), reverse=True)
    x = []
    y = []
    largest_component = max(networkx.connected_components(g), key=len)
    n = len(g.nodes())
    x.append(0)
    y.append(len(largest_component) * 1. / n)
    r = 0.0
    for i in range(1, n - 1):
        g.remove_node(l.pop(0)[0])
        if recalculate:

            try:
                m = networkx.eigenvector_centrality(g, max_iter=5000)
            except networkx.NetworkXError:
                break

            l = sorted(m.items(), key=operator.itemgetter(1),
                       reverse=True)
        largest_component = max(networkx.connected_components(g), key=len)
        x.append(i * 1. / n)
        r += len(largest_component) * 1. / n
        y.append(len(largest_component) * 1. / n)
    return x, y, r / n
示例#14
0
def find_communities_modularity(G, max_iter=None):
    '''
    INPUT:
        G: networkx Graph
        max_iter: (optional) if given, maximum number of iterations
    OUTPUT: list of lists of strings (node names)

    Run the Girvan-Newman algorithm on G and find the communities with the
    maximum modularity.
    '''
    degrees = G.degree()
    num_edges = G.number_of_edges()
    G1 = G.copy()
    best_modularity = -1.0
    best_comps = nx.connected_components(G1)
    i = 0
    while G1.number_of_edges() > 0:
        subgraphs = nx.connected_component_subgraphs(G1)
        modularity = get_modularity(subgraphs, degrees, num_edges)
        if modularity > best_modularity:
            best_modularity = modularity
            best_comps = list(nx.connected_components(G1))
        girvan_newman_step(G1)
        i += 1
        if max_iter and i >= max_iter:
            break
    return best_comps
示例#15
0
	def conn_comps(self, g2):
		# then try to do connectedness
		num_e = len(g2.edges())
		conn_comps = networkx.connected_components(g2)
		conn_trials = 0
		while len(conn_comps) > 1:
			small_comps = conn_comps[1:]
			small_comps.reverse()   # start with smallest components, easiest to fix (?)
			for comp in small_comps:
				edges = []
				for u in comp:
					i = self.state.get_partition_of_node(u)
					nodes_i = self.state.get_nodes(i)
					# sample by edges
					for v in nodes_i:
						if u != v and g2.degree(v) >= 2:
							edges += [ (u, v, w) for w in g2.neighbors(v) \
							if (w != u and not g2.has_edge(u,w)) ]

				if len(edges) == 0:
					continue
				(u, v, w) = random.choice(edges)
				g2.remove_edge(v,w)
				assert u != w
				assert not g2.has_edge(u,w)
				g2.add_edge(u,w)
				assert g2.has_edge(u,w)
				assert num_e == len(g2.edges()), "expected %d vs. actual %d: u=%s v=%s w=%s" % (num_e, len(g2.edges()), str(u), str(v), str(w))
			conn_comps = networkx.connected_components(g2)
			conn_trials += 1
			if conn_trials > 50:
				print "After %d tries, could not connect graph, leaving %d components" % (conn_trials, len(conn_comps))
				break
示例#16
0
def rand(infile):
    """
    Performs robustness analysis based on random attack, on the network 
    specified by infile. Returns a list with fraction of nodes removed, a 
    list with the corresponding sizes of the largest component of the 
    network, and the overall vulnerability of the network.
    """

    g = networkx.read_gml(infile)
    l = [(node, 0) for node in g.nodes()]
    random.shuffle(l)
    x = []
    y = []
    largest_component = max(networkx.connected_components(g), key = len)
    n = len(g.nodes())
    x.append(0)
    y.append(len(largest_component) * 1. / n)
    R = 0.0
    for i in range(1, n):
        g.remove_node(l.pop(0)[0])
        largest_component = max(networkx.connected_components(g), key = len)
        x.append(i * 1. / n)
        R += len(largest_component) * 1. / n
        y.append(len(largest_component) * 1. / n)
    return x, y, 0.5 - R / n
示例#17
0
	def conn_comps(self):
		self.components={}
		for n in nx.connected_components(self.G):
			components[len(n)]=components.get(len(n),0)+1
		self.n_comp=len(nx.connected_components(self.G))
		self.comp_mode=mode(components)
		self.comp_average=average(components)
 def pruneBranches(self, G): #Based on high-degree neighbors - use on a pruned MST, before merging chains
     nodesToMerge = set()
     for n in G:
         if G.degree(n) >= 3:
             for nbr in G[n]:
                 if G.degree(nbr) >= 3:
                     nodesToMerge.add(n)
                     nodesToMerge.add(nbr)
                     
     SG = G.subgraph(nodesToMerge)
     print "pruneBranches2 is going to merge the following node clusters:", nx.connected_components(SG)
     #We're going to merge each connected component in this subgraph into a single node
     for C in nx.connected_components(SG):
         if len(C) <= 1: #should always be false
             raise InputError('Single node in pruneBranches2 subgraph')
         else:
             maxDegree = 0
             maxNode = None
             for n in C:
                 if SG.degree(n) > maxNode:
                     maxDegree = SG.degree(n)
                     maxNode = n
             C.remove(maxNode)
             for n in C:
                 self.mergeGraphNodes(G, maxNode, n)
示例#19
0
    def detectGroupsNX(self, nodes, links):
        '''
        detect groups using the NetworkX library. It uses a simple algorithm of
        remove "highest between_ness centrality nodes" and then detecting the graph
        split.
        '''
        def make_nx_graph(nodes, links):
            G = nx.Graph()
            G.add_nodes_from(nodes)
            link_tupples = [(node_tupple[0], node_tupple[1], val['count'])
                            for node_tupple, val in links.iteritems()]
            G.add_weighted_edges_from(link_tupples)
            return G

        def calc_betweenness(graph):
            centrality = nx.edge_betweenness_centrality(graph, False)
            centrality = sorted(
                centrality.iteritems(), key=operator.itemgetter(1), reverse=True)
            return centrality

        def calc_average(iter):
            averge = sum(iter) * 1.0 / len(centrality)
            return averge

        def centrality_stddev(centrality):
            # find the standard deviation of centrality
            count = len(centrality)
            average = 0.0
            std_dev = 0.0
            if count > 0:
                total = sum(
                    itertools.imap(operator.itemgetter(1), centrality)) * 1.0
                average = total / count
                variance_sum = sum(map(
                    lambda x: (x - average)**2, itertools.imap(operator.itemgetter(1), centrality)))
                std_dev = math.sqrt(variance_sum / count)
            return average, std_dev

        graph = make_nx_graph(nodes, links)
        groups = nx.connected_components(graph)

        print "number of groups detected %d" % len(groups)
        centrality = calc_betweenness(graph)
        average, censtddev = centrality_stddev(centrality)
        # remove all the edges with centrality > (average+stddev)
        centrality_maxval = average + (censtddev * 1.96)
        edges = [edge_info[0]
                 for edge_info in centrality if edge_info[1] >= centrality_maxval]
        # Store the information about suggested edges to remove
        self.edgesToRemove = edges
        graph.remove_edges_from(edges)
        print "edges removed %d" % len(edges)

        # now extract the groups (or connected components) from the graph.
        groups = nx.connected_components(graph)
        groups = sorted(groups, key=lambda g: len(g), reverse=True)
        print "number of groups detected %d" % len(groups)
        return groups
示例#20
0
def linker_graph(seeds, interactome):
    """Creates an expanded graph by iteratively connecting genes of
    interest into a largest connected component (LCC).

    An initial graph is created by finding the largest connected
    component of genes of interest only. Each unincluded GOI is
    included by finding any intermediate interactor to connect
    it to the LCC. That is, if there is path such that
    Unincluded GOI <-> Other Gene <-> Included GOI, the path is
    added to the network. This process is repeated until either
    all GOIs are included, or no more GOIs can be included.
    
    Paramters
    ---------
    seeds : An iterable of genes to generate the network from

    interactome : The consolidate interactome

    Returns
    -------
    An expanded NetworkX Graph
    """

    seeds = set(seeds) & set(interactome)

    nodes = set(max(
        nx.connected_components(
            interactome.subgraph(seeds)),
        key=len))

    orphaned = seeds - nodes

    while orphaned:
        # Find any common interactors for the orphans and the current graph
        orphan_edges = set()
        for orphan in orphaned:
            orphan_edges.update(interactome[orphan].keys())

        linkers = set()
        for node in nodes:
            linkers.update(set(interactome[node].keys()) & orphan_edges)


        # Give up if we can't find any new connections
        if not linkers:
            break

        # Update the graph to include the old graph, new linkers and any
        # newly included orphans
        nodes = set(max(
            nx.connected_components(
                interactome.subgraph(nodes | linkers | orphaned)),
            key=len))

        orphaned -= nodes

    return interactome.subgraph(nodes)
示例#21
0
 def get_connected_sink(self, mst, s1, u):
     s2 = None
     for parent, child in nx.bfs_edges(mst, s1):
         if u[child] == 0.:
             s2 = child
             break
     if s2 is None:
         print nx.connected_components(mst)
     assert s2 is not None
     return s2
示例#22
0
def approximation_k_components_dense(G, max_k=None):
    # Compute only until max k
    if max_k is None:
        max_k = float('infinity')
    # Dictionary with connectivity level (k) as keys and a list of
    # sets of nodes that form a k-component as values
    k_components = {}
    # Dictionary with nodes as keys and maximum k of the deepest 
    # k-component in which they are embedded
    k_number = dict(((n,0) for n in G.nodes()))
    # We deal first with k = 1
    k_components[1] = []
    for cc in networkx.connected_components(G):
        for node in cc:
            k_number[node] = 1
        if len(cc) > 2:
            k_components[1].append(set(cc))
    # Start from k_cores: all k-components are also k-cores
    # but not all k-cores are k-components
    core_number = networkx.core_number(G)
    for k in range(2, min(max(core_number.values())+1, max_k + 1)):
        k_components[k] = []
        # Build k-core subgraph
        C = G.subgraph((n for n, cnum in core_number.items() if cnum >= k))
        for candidates in networkx.connected_components(C):
            # Compute pairwise vertex connectivity for each connected part
            # of this k-core using White and Newman 2001 algorithm.
            K = all_pairs_vertex_connectivity(G.subgraph(candidates), 
                                                    max_paths=k,
                                                    strict=True)
            # Build a graph where two nodes are linked if they have at least k
            # node independent paths between them. Suggested in 
            # White & Newman, 2001 (This is a very dense graph, almost complete 
            # in many cases)
            H = networkx.Graph()
            # Too slow because we add every edge twice
            #H.add_edges_from(((u,v) for u in K \
            #                    for (v,w) in K[u].iteritems() if w >= k))
            seen = set()
            for u, nbrs in K.items():
                for v, ni_paths in nbrs.iteritems():
                    if v not in seen and ni_paths >= k:
                        H.add_edge(u,v)
                seen.add(u)
            # Compute k-core of H and assume that the core of level k is a good
            # approximation for a component of level k
            core_number_2 = networkx.core_number(H)
            C2 = H.subgraph((n for n, cnum in core_number_2.items() if cnum >= k))
            for k_component in networkx.connected_components(C2):
                if len(k_component) >= k:
                    k_components[k].append(set(k_component))
                    for node in k_component:
                        k_number[node] = k
    
    return k_components, k_number
示例#23
0
def get_client_family_ids(hmis, cp):
    """
    Given raw HMIS and Connecting Point dataframes, de-duplicate individuals and determine families across time.

    See the README for more information about rationale and methodology.

    The graph contains IDs from both HMIS and Connecting Point, so each vertex is represented as a tuple `(c, id)`,
    where `c` is either `'h'` or `'c'`, to indicate whether the `id` corresponds to a row in HMIS or Connecting Point.
    For example, `('h', 1234)` represents the row(s) in HMIS with individual ID `1234`, and `('c',5678)` represents the
    row(s) in Connecting Point with individual ID `5678`.

    :param hmis: HMIS dataframe.
    :type hmis: Pandas.Dataframe.

    :param cp: Connecting Point dataframe.
    :type cp: Pandas.Dataframe.
    """
    hmis = hmis.rename(columns={'Subject Unique Identifier': 'Raw Subject Unique Identifier'})
    cp = cp.rename(columns={'Clientid': 'Raw Clientid'})

    # create graph of individuals
    G_individuals = nx.Graph()
    G_individuals.add_nodes_from([('h', v) for v in hmis['Raw Subject Unique Identifier'].values])
    G_individuals.add_nodes_from([('c', v) for v in cp['Raw Clientid'].values])

    # add edges between same individuals
    G_individuals.add_edges_from(group_edges('h', pd.read_csv('../data/hmis/hmis_client_duplicates_link_plus.csv'), ['Set ID'], 'Subject Unique Identifier'))
    G_individuals.add_edges_from(group_edges('c', pd.read_csv('../data/connecting_point/cp_client_duplicates_link_plus.csv'), ['Set ID'], 'Clientid'))
    G_individuals.add_edges_from(matching_edges())

    # copy graph of individuals and add edges between individuals in the same family
    G_families = G_individuals.copy()
    G_families.add_edges_from(group_edges('h', hmis, ['Family Site Identifier','Program Start Date'], 'Raw Subject Unique Identifier'))
    G_families.add_edges_from(group_edges('c', cp, ['Caseid'], 'Raw Clientid'))

    # compute connected components and pull out ids for each dataframe for individuals and families
    hmis_individuals = [get_ids_from_nodes('h', c) for c in nx.connected_components(G_individuals)]
    cp_individuals = [get_ids_from_nodes('c', c) for c in nx.connected_components(G_individuals)]
    hmis_families = [get_ids_from_nodes('h', c) for c in nx.connected_components(G_families)]
    cp_families = [get_ids_from_nodes('c', c) for c in nx.connected_components(G_families)]

    # create dataframes to merge
    hmis_individuals = create_dataframe_from_grouped_ids(hmis_individuals, 'Subject Unique Identifier')
    hmis_families = create_dataframe_from_grouped_ids(hmis_families, 'Family Identifier')
    cp_individuals = create_dataframe_from_grouped_ids(cp_individuals, 'Clientid')
    cp_families = create_dataframe_from_grouped_ids(cp_families, 'Familyid')

    # merge into hmis and cp dataframes
    hmis = hmis.merge(hmis_individuals, left_on='Raw Subject Unique Identifier', right_index=True, how='left')
    hmis = hmis.merge(hmis_families, left_on='Raw Subject Unique Identifier', right_index=True, how='left')
    cp = cp.merge(cp_individuals, left_on='Raw Clientid', right_index=True, how='left')
    cp = cp.merge(cp_families, left_on='Raw Clientid', right_index=True, how='left')

    return (hmis, cp)
示例#24
0
def graphs_stats():
	print "Created directed graph, with: ", G.number_of_nodes(), "nodes; and: ", G.number_of_edges(), " edges."
	print "7 maximum degrees of nodes: ", sorted(nx.degree(G).values())[-7:]
	print "7 maximum indegrees of nodes: ", sorted(G.in_degree().values())[-7:]
	print "7 maximum outdegrees of nodes: ", sorted(G.out_degree().values())[-7:]
	print "Connected components: ", len(nx.connected_components(G.to_undirected()))
	i = 0
	print "7 maximum connected components: "
	for el in sorted(nx.connected_components(G.to_undirected()), key=lambda x: len(x), reverse=True):
		i+=1
		print len(el)
		if i==7: break
示例#25
0
    def RemoveBreaks(self):

        listSubgraphNodes = nx.connected_components(self.graph)
        while len(listSubgraphNodes) > 1:
            NodesA = listSubgraphNodes[0]
            NodesB = listSubgraphNodes[1]
            del listSubgraphNodes[1]

            self.BridgeSubgraphs(self.graph, NodesA, NodesB)
            NodesA.extend(NodesB)

        assert len(nx.connected_components(self.graph)) == 1
示例#26
0
def robustness(brain, iter_len=500, window_size=50):
    """
    A function to calculate robustness based on "Error and attack
    tolerance of complex networks" Albert et al. Nature 2000 406:378-382

    The function calculates the rate of change in the size of the largest
    connected component (S) as nodes are randomly removed. The process runs
    iteratively and takes a mean. The gradient of S is smoothed to provide
    a more accurate measure by a sliding window.

    Note, this function is relatively slow compared to other metrics due to
    the multiple iterations.

    Parameters
    ----------
    brain: maybrain.brain.Brain
        An instance of the `Brain` class
    iter_len: int
        number of iterations
    window_size: int
        size of the sliding window for smoothing the gradient

    """
    f_list = np.zeros(iter_len)
    for i in range(iter_len):
        a_max = max((brain.G.subgraph(c).copy() for c in nx.connected_components(brain.G)), key=len)
        n_list = [v for v in brain.G.nodes()]
        random.shuffle(n_list)
        n_list = n_list[:-1]
        mat = np.zeros(len(n_list))
        count = 0
        a_len = a_max.number_of_nodes()

        while n_list and a_len > 1:
            n = n_list.pop()
            if n in a_max.nodes():
                a_max.remove_node(n)
                if not nx.is_connected(a_max):  # only recalculate if the further fragmentation
                    a_max = max((a_max.subgraph(c).copy() for c in nx.connected_components(a_max)), key=len)
                    a_len = a_max.number_of_nodes()
                else:
                    a_len -= 1
            mat[count] = a_len
            count += 1

        grad = np.gradient(mat)
        run_mean = np.convolve(grad, np.ones((window_size,)) / window_size)[(window_size - 1):]
        diffs = np.diff(run_mean)
        nr_arg = np.argmin(diffs)

        f_list[i] = nr_arg
    return np.mean(f_list) / brain.G.number_of_nodes()
示例#27
0
def __group_to_color(segments, samples=None, snp_range=None):
    '''Given a -list- of key-value pairs ((start, stop), (v1,v2)), where the key denotes
    a segment [start,stop) of the infinite integer lattice on which v1 and v2 are equal,
    Return a list of lists, each of contains segments of the same color (IBD sharing).'''
    (sub_segments, intersections, value_to_segments, _, _) = \
    im.segment.form_intersections(segments, True, samples=samples, snp_range=snp_range)
    # Build a graph G where the nodes = segments and edges = (segments intersect AND their sample
    # sets intersect). G's connected components are groups, where group is a set of segments of the
    # same color.
    return nx.connected_components(nx.from_edgelist(it.chain.from_iterable(it.product(sharing_segments, sharing_segments)
              for sharing_segments in (util.union_all(*(value_to_segments[i][x] for x in component))
                                       for i in xrange(len(sub_segments))
                                       for component in nx.connected_components(nx.Graph(intersections[i]))))))
示例#28
0
def probeLengthScale(data, r_min, r_max):
    """invesitgates how qualitative behaviour (the number of connected components: CC = Connected Component, #CC = number of CC) of graph changes as a function of 
       interaction radius. Computes the number of CC for each r, denoted CC(r), in the interval (r_min, r_max),
       and if CC(r_i) != CC(r_min), stops and prints r_i. I developed this to test coarse graining. Best to use this on a subset of data (I took the first 500 from)."""
    
    g_coarse = makeGraph(data)
    connect(g_coarse, r_min)
    
    for i in xrange(r_min, r_max):
        g = makeGraph(data)
        connect(g, i)
        if nx.connected_components(g_coarse) != nx.connected_components(g):
            print 'Different # of connected components at r =  ' + str(i)
            break
示例#29
0
def disintegrate(gr):
    components = list(nx.connected_components(gr))
    num_comps = len(components)
    num_nodes = nx.number_of_nodes(gr)
    yield components
    while num_comps < num_nodes:
        bw = nx.edge_betweenness_centrality(gr)              # betweenness dict
        to_remove = max(bw.keys(), key=(lambda x: bw[x]))    # edge with highest betweenness
        gr.remove_edge(*to_remove)                           # throw it away
        components = list(nx.connected_components(gr))
        new_num_comps = len(components)
        if new_num_comps > num_comps:
            num_comps = new_num_comps
            yield components
示例#30
0
文件: GTrie.py 项目: schmidtj/G-Trie
 def GTrieInsert(self, graph, label=None, states=False):
     if not self.root.isLeaf() and self.null_graph:
         self.insertRecursive(networkx.Graph(), [], networkx.adjacency_matrix(networkx.Graph()).todense(),
                              self.root, 0, label, states)
     components = networkx.connected_components(graph.to_undirected()) \
         if networkx.is_directed(graph) else networkx.connected_components(graph)
     component_len = [1 for x in components if len(x) > 1]
     if len(list(components)) > 1 and sum(component_len) > 1:
         print "Illegal Graph Insert: Graph has more than one connnected component."
         return
     cannonGraph = self.GTCannon(graph.copy())
     matrix = networkx.adjacency_matrix(cannonGraph).todense()
     conditions = self.utility.symmetryConditions(cannonGraph)
     self.insertRecursive(cannonGraph, conditions, matrix, self.root, 0, label, states)
示例#31
0
def find_fragments(atoms, scale=1.0) -> list:
    """Finds unconnected structural fragments by constructing
    the first-neighbor topology matrix and the resulting graph
    of connected vertices.

    Args:
        atoms: :class:`~ase.atoms.Atoms` or :class:`~aimstools.structuretools.structure.Structure`.
        scale: Scaling factor for covalent radii.

    Note:
        Requires networkx library.

    Returns:
        list: NamedTuple with indices and atoms object.

    """

    atoms = atoms.copy()
    radii = scale * covalent_radii[atoms.get_atomic_numbers()]
    nl = neighborlist.NeighborList(radii, skin=0, self_interaction=False, bothways=True)
    nl.update(atoms)
    connectivity_matrix = nl.get_connectivity_matrix(sparse=False)

    con_tuples = {}  # connected first neighbors
    for row in range(connectivity_matrix.shape[0]):
        con_tuples[row] = []
        for col in range(connectivity_matrix.shape[1]):
            if connectivity_matrix[row, col] == 1:
                con_tuples[row].append(col)

    pairs = []  # cleaning up the first neighbors
    for index in con_tuples.keys():
        for value in con_tuples[index]:
            if index > value:
                pairs.append((index, value))
            elif index <= value:
                pairs.append((value, index))
    pairs = set(pairs)

    graph = nx.from_edgelist(pairs)  # converting to a graph
    con_tuples = list(
        nx.connected_components(graph)
    )  # graph theory can be pretty handy

    fragments = {}
    i = 0
    for tup in con_tuples:
        fragment = namedtuple("fragment", ["indices", "atoms"])
        ats = ase.Atoms()
        indices = []
        for entry in tup:
            ats.append(atoms[entry])
            indices.append(entry)
        ats.cell = atoms.cell
        ats.pbc = atoms.pbc
        fragments[i] = fragment(indices, ats)
        i += 1
    fragments = [
        v
        for k, v in sorted(
            fragments.items(),
            key=lambda item: np.average(item[1][1].get_positions()[:, 2]),
        )
    ]
    return fragments
示例#32
0
文件: genes.py 项目: bdgp/grit
def cluster_intron_connected_segments(segments, introns):
    if len(segments) == 0:
        return []
    segments = sorted(segments)
    segment_starts = numpy.array([x[0] for x in segments])
    segment_stops = numpy.array([x[1] for x in segments])

    edges = set()
    for start, stop in introns:
        # Skip junctions that dont fall into any segment
        if start - 1 < segment_starts[0]: continue
        if stop + 1 >= segment_stops[-1]: continue

        # find which bin the segments fall into. Note, that since the
        # segments don't necessarily tile the genome, it's possible
        # for the returned bin to not actually contain the junction
        start_bin = segment_starts.searchsorted(start - 1, side='right') - 1
        assert start_bin >= 0
        stop_bin = segment_starts.searchsorted(stop + 1, side='right') - 1

        # since the read coverage is determined in part determined by
        # the junctions, we should never see a junction that doesn't fall
        # into a segment
        try:
            assert (segment_starts[start_bin] <= start - 1 <=
                    segment_stops[start_bin]), str([
                        segment_starts[start_bin], start - 1,
                        segment_stops[start_bin],
                        segment_starts[start_bin + 1], start - 1,
                        segment_stops[start_bin + 1]
                    ])

            assert (segment_starts[stop_bin] <= stop + 1 <=
                    segment_stops[stop_bin]), str([
                        segment_starts[stop_bin], stop - 1,
                        segment_stops[stop_bin], segment_starts[stop_bin + 1],
                        stop - 1, segment_stops[stop_bin + 1]
                    ])
        except:
            raise
            continue
        #if start > segment_stops[start_bin]: continue
        #if stop > segment_stops[stop_bin]: continue
        # XXX - dont rememeber why I was doing this
        #assert stop_bin < len(segment_starts)-1, \
        #    str([stop_bin, len(segment_stops), segment_stops[stop_bin]])
        if start_bin != stop_bin:
            edges.add((int(min(start_bin,
                               stop_bin)), int(max(start_bin, stop_bin))))

    genes_graph = nx.Graph()
    genes_graph.add_nodes_from(xrange(len(segment_starts)))
    genes_graph.add_edges_from(edges)

    segments = []
    for g in nx.connected_components(genes_graph):
        g = sorted(g)
        segment = []
        prev_i = g[0]
        segment.append([
            segment_starts[prev_i],
        ])
        for i in g[1:]:
            # if we've skipped at least one node, then add
            # a new region onto this segment
            if i > prev_i + 1:
                segment[-1].append(segment_stops[prev_i])
                segment.append([
                    segment_starts[i],
                ])
                prev_i = i
            # otherwise, we've progressed to an adjacent sergments
            # so just merge the adjacent intervals
            else:
                assert i == prev_i + 1
                prev_i += 1
        segment[-1].append(segment_stops[g[-1]])

        segments.append(segment)

    return segments
示例#33
0
            yield (n, set(self.adj) - set(self.adj[n]) - {n})


# Build several pairs of graphs, a regular graph
# and the AntiGraph of it's complement, which behaves
# as if it were the original graph.
Gnp = nx.gnp_random_graph(20, 0.8, seed=42)
Anp = AntiGraph(nx.complement(Gnp))
Gd = nx.davis_southern_women_graph()
Ad = AntiGraph(nx.complement(Gd))
Gk = nx.karate_club_graph()
Ak = AntiGraph(nx.complement(Gk))
pairs = [(Gnp, Anp), (Gd, Ad), (Gk, Ak)]
# test connected components
for G, A in pairs:
    gc = [set(c) for c in nx.connected_components(G)]
    ac = [set(c) for c in nx.connected_components(A)]
    for comp in ac:
        assert comp in gc
# test biconnected components
for G, A in pairs:
    gc = [set(c) for c in nx.biconnected_components(G)]
    ac = [set(c) for c in nx.biconnected_components(A)]
    for comp in ac:
        assert comp in gc
# test degree
for G, A in pairs:
    node = list(G.nodes())[0]
    nodes = list(G.nodes())[1:4]
    assert G.degree(node) == A.degree(node)
    assert sum(d for n, d in G.degree()) == sum(d for n, d in A.degree())
def l_Connection_strength(G):
    l_Connection_strength_Dic = {}
    node_set = G.nodes()
    Connection_num = 0

    #_l阶连通图的数量

    #print nid,i_2_nei
    for nid in node_set:

        degree = G.degree(nid)
        Neighbor_Set = G.neighbors(nid)
        #print nid,Neighbor_Set
        #print len(Neighbor_Set)

        # i__nei=set(G.neighbors(i))

        ###current_1_neighbor=G.neighbors(nid)
        #print nid,current_1_neighbor
        ###current_2_neighbor=[]
        ###for nnid in current_1_neighbor:
        ###current_2_neighbor = list(set(current_2_neighbor).union(set(G.neighbors(nnid))))
        #print '2_hop:', nid,current_2_neighbor
        ###current_2_neighbor= list(  set(current_2_neighbor).difference( set(current_1_neighbor).union(set([nid]))  ) )
        #print nid ,current_2_neighbor
        #print nid,Neighbor_Set

        if len(Neighbor_Set) == 1:
            Connection_num = 1
            #print nid
            l_Connection_strength_Dic[nid] = 1.0
            #print nid,l_Connection_strength_Dic[nid]

        elif len(Neighbor_Set) > 1:
            G_conn = nx.Graph()
            #print nid, Neighbor_Set
            ##vi,j组合
            Cluster_head_connection_set = []
            for i in range(0, len(Neighbor_Set)):
                #vi目标节点的邻居
                vi = Neighbor_Set[i]
                #print nid,Neighbor_Set[i]
                n_vi_2 = []
                ##n_vi 是vi的邻居
                for n_vi in G.neighbors(vi):
                    n_vi_2 = list(set(n_vi_2).union(set(G.neighbors(n_vi))))
                n_vi_2 = list(
                    set(n_vi_2).difference(
                        set(G.neighbors(vi)).union(set([nid]))))
                for j in range(i + 1, len(Neighbor_Set)):
                    vj = Neighbor_Set[j]
                    #print vi,vj
                    fai_ij = list(
                        set(n_vi_2).intersection(set(G.neighbors(vj))))
                    #print vi,vj,fai_ij
                    if fai_ij:
                        Cluster_head_connection_set.append(list([vi, vj]))
                        #
            #print nid,Cluster_head_connection_set
            for k in Cluster_head_connection_set:
                G_conn.add_edge(k[0], k[1])
            H = len(list(nx.connected_components(G_conn)))
            #print nid,H
            G_conn_nodenums = int(nx.number_of_nodes(G_conn))
            ##独立簇的数量
            independent_cluster_num = int(
                len(Neighbor_Set)) - int(G_conn_nodenums)
            ##l-阶的连通数
            Connection_num = int(H) + int(independent_cluster_num)
            l_Connection_strength_Dic[nid] = round(
                float(Connection_num) / float(len(Neighbor_Set)), 3)
            #print nid,l_Connection_strength_Dic[nid]
    return l_Connection_strength_Dic
示例#35
0
def predict(data_path, predict_path, model_name, show=False):
    # Read the parameters of the trained model
    net, n_features, n_classes = load_model_txt(model_name)

    # Load the trained model
    trained_net, config = models.get_model_and_config(net)
    model = trained_net(n_features,
                        n_classes,
                        *config['extra_args'])
    model_path = 'trained_models/' + model_name + '/model.pth'
    model.load_state_dict(torch.load(model_path))
    print(model)

    # Get the list of files for prediction
    pred_files = [os.path.join(data_path, line.rstrip()) for line in open(os.path.join(data_path, predict_path))]
    for file in pred_files[4:]:
        path, file_name = os.path.split(file)
        print(file)
        bbox_path = path.replace('anno','pred_bboxes')
        if not os.path.exists(bbox_path):
            os.makedirs(bbox_path)

        # Convert the gpickle file to a dgl graph
        dgl_g = graph_utils.convert_gpickle_to_dgl_graph(file)
        # Get the features from the given graph
        nxg = nx.read_gpickle(file)
        features = graph_utils.chris_get_features(nxg)

        model.eval()
        with torch.no_grad():
            logits = model(dgl_g, features)
            _, predictions = torch.max(logits, dim=1)
            predictions = predictions.numpy()

        # Get positions
        nxg = nx.read_gpickle(file)
        positions = nx.get_node_attributes(nxg, 'pos')
        positions = list(positions.values())

        if show:
            # Plot graph
            ''''fig2 = plt.figure(dpi=150)
            fig2.clf()
            ax = fig2.subplots()
            inst_predictions = [0] * nxg.number_of_nodes()
            draw(inst_predictions, ax, nxg, positions)'''

            # Plot graph with predictions
            fig1 = plt.figure(dpi=150)
            fig1.clf()
            ax = fig1.subplots()
            draw(predictions, ax, nxg, positions)

        # Get labels
        labels = nx.get_node_attributes(nxg, 'label')
        labels = np.array(list(labels.values()))

        # Plot annotated graph
        '''fig2 = plt.figure(dpi=150)
        fig2.clf()
        ax = fig2.subplots()
        draw(labels, ax, nxg, positions)'''

        # Perform graph morphology closing
        predictions_alt = predictions
        # predictions_alt = post_processing(nxg, predictions)

        # Extract door nodes
        sub_nxg = instancing(nxg, predictions_alt)

        # Separate disjoint graphs (instancing)
        disjoint_sub_graphs = []
        for c in nx.connected_components(sub_nxg):
            disjoint_sub_graphs.append(sub_nxg.subgraph(c))

        clustered_disjoint_sub_graphs = []
        for graph in disjoint_sub_graphs:
            sub_positions = nx.get_node_attributes(graph, 'pos')
            sub_positions = np.array(list(sub_positions.values()))
            clustering = DBSCAN(eps=1100, min_samples=1).fit(sub_positions)
            cluster_labels = clustering.labels_
            graph_keys = list(graph._NODE_OK.nodes)
            for cluster_label in list(set(cluster_labels)):
                indices = []
                for idx, label in enumerate(cluster_labels):
                    if label == cluster_label:
                        indices.append(graph_keys[idx])
                sub_graph = graph.subgraph(indices)
                clustered_disjoint_sub_graphs.append(sub_graph)

        # Remove graphs not meeting conditions
        min_nr_nodes = 8
        selected_graphs = []
        area_list = []
        width_list = []
        height_list = []
        ratio_list = []

        for disjoint_sub_graph in clustered_disjoint_sub_graphs:
            if disjoint_sub_graph.number_of_nodes() > min_nr_nodes:
                selected_graphs.append(disjoint_sub_graph)
                tmp_positions = nx.get_node_attributes(disjoint_sub_graph, 'pos')
                tmp_positions = np.array(list(tmp_positions.values()))
                area, width, height, ratio = bounding_box_params(tmp_positions)
                area_list.append(area)
                width_list.append(width)
                height_list.append(height)
                ratio_list.append(ratio)

        seleted_graphs_joined = nx.Graph()

        for idx, graph in enumerate(selected_graphs):
            nx.set_node_attributes(graph, [], 'instance')
            for node in graph.nodes:
                graph.nodes[node]['instance'] = idx
            seleted_graphs_joined = nx.compose(seleted_graphs_joined, graph)

        inliers = reject_outliers_hardcoded(area_list, width_list, height_list, ratio_list)
        selected_graphs = [selected_graphs[i] for i in inliers]

        print('Numer of doors: %d' % len(selected_graphs))

        seleted_graphs_joined = nx.Graph()

        for idx, graph in enumerate(selected_graphs):
            nx.set_node_attributes(graph, [], 'instance')
            for node in graph.nodes:
                graph.nodes[node]['instance'] = idx
            seleted_graphs_joined = nx.compose(seleted_graphs_joined, graph)

        # Determine bbox
        list_bboxes, list_gen_bboxes = determine_bboxes(selected_graphs)

        x_positions, y_positions = zip(*positions)
        x_min = min(x_positions)
        x_max = max(x_positions)
        y_min = min(y_positions)
        y_max = max(y_positions)

        norm_bboxes = normalize_bboxes(list_bboxes, x_min, x_max, y_min, y_max)
        norm_bboxes_inv_y = [[box[0], 1-box[1], box[2], 1-box[3]] for box in norm_bboxes]
        #print(norm_bboxes_inv_y)
        '''
        fig_norm_boxes = plt.figure(dpi=150)
        fig_norm_boxes.clf()
        ax = fig_norm_boxes.subplots()
        for box in norm_bboxes_inv_y:
            rect = patches.Rectangle((box[2], box[1]), box[0]-box[2], box[3]-box[1], linewidth=1, edgecolor='r', facecolor='none')
            ax.add_patch(rect)
        '''
        bboxes_filename = file_name.replace('_w_annotations.gpickle', '_gnn_boxes.txt')
        with open(os.path.join(bbox_path, bboxes_filename), 'w') as f:
            for item in norm_bboxes:
                f.write("%s\n" % ' '.join(str(v) for v in item))

        bboxes_image_filename = file_name.replace('_w_annotations.gpickle', '_gnn_boxes_image_format.txt')
        with open(os.path.join(bbox_path, bboxes_image_filename), 'w') as f:
            for item in norm_bboxes_inv_y:
                f.write("%s\n" % ' '.join(str(v) for v in item))

        if show:

            # Plot graph with generalized doors
            pos = nx.get_node_attributes(nxg, 'pos')
            fig5 = plt.figure(dpi=150)
            fig5.suptitle('graph with generalized doors', fontsize=12)
            fig5.clf()
            ax = fig5.subplots()
            nx.draw(seleted_graphs_joined, pos, with_labels=False, node_size=10, ax=ax, node_color='b')

            nxg_copy = nxg.copy()

            bbox_and_org_graph = nx.Graph()
            bbox_and_org_graph = nx.compose(bbox_and_org_graph, nxg_copy)
            bbox_graph = nx.Graph()
            for g_idx, g in enumerate(list_gen_bboxes):
                gen_pos = nx.get_node_attributes(g, 'pos')
                nx.draw(g, gen_pos, with_labels=False, node_color='g', node_size=30, ax=ax)
                nx.draw_networkx_edges(g, gen_pos, width=2, alpha=0.8, edge_color='g')
                g = nx.convert_node_labels_to_integers(g, first_label=4 * g_idx)
                bbox_graph = nx.compose(bbox_graph, g)
                bbox_graph = nx.convert_node_labels_to_integers(bbox_graph,
                                                            first_label=bbox_and_org_graph.number_of_nodes())
            bbox_and_org_graph = nx.compose(bbox_and_org_graph, bbox_graph)

            # Save res graph
            base = os.path.basename(file)
            file_name = os.path.splitext(base)[0]
            # nx.write_gpickle(door_graph, 'C:/Users/Chrips/Aalborg Universitet/Frederik Myrup Thiesson - data/door_graphs/' + file_name + '_door_graph.gpickle')
            # nx.write_gpickle(door_generalized_graph, 'C:/Users/Chrips/Aalborg Universitet/Frederik Myrup Thiesson - data/door_generalized_graphs/' + file_name + '_door_generalized_graph.gpickle')

            fig6 = plt.figure(dpi=150)
            fig6.suptitle('bbox and graph', fontsize=12)
            fig6.clf()
            ax = fig6.subplots()
            bbox_and_org_graph_pos = nx.get_node_attributes(bbox_and_org_graph, 'pos')
            nx.draw(bbox_and_org_graph, bbox_and_org_graph_pos, with_labels=False, node_size=10, ax=ax, node_color='r')

            fig7 = plt.figure(dpi=150)
            fig7.clf()
            ax = fig7.subplots()
            bbox_graph_pos = nx.get_node_attributes(bbox_graph, 'pos')
            nx.draw(bbox_graph, bbox_graph_pos, with_labels=False, node_size=10, ax=ax,
                    node_color='g')

            # Plot graph with instances
            fig4 = plt.figure(dpi=150)
            fig4.suptitle('instances and graph', fontsize=12)
            fig4.clf()
            ax = fig4.subplots()
            # ax.axis('equal')
            draw_inst(seleted_graphs_joined, ax, positions)

            plt.show()
示例#36
0
    # Adding nodes to the graph with their attributes
    G.add_node(tempInt, pos=temp)
    tempInt = tempInt + 1

G = Graph_EdgesConstruction(centroids, G, 130.0)

Graph = nx.to_numpy_matrix(G)
print("Graph: ")
print(Graph)

# Number of connected components
number_objects = nx.number_connected_components(G)
print("Number of objects found in the image: ", number_objects)

# Find connected components
objects = sorted(nx.connected_components(G), key=len, reverse=True)

t = 0
centers = []

# New graph for objects
O = nx.Graph()
tmp = 0
for obj in objects:
    tempSum = [0, 0]
    t = 0
    for component in obj:
        t = t + 1
        add(tempSum, G.node[component]['pos'])

    centers.append(divide(tempSum, t))
def fast_greedy_from_seperate_to_whole(a_graph):
    start_time = time.time()
    adjacency_dict = get_adjacency_dict(a_graph)
    for key in adjacency_dict:
        for sub_key in adjacency_dict[key]:
            if key == sub_key:
                print(key, sub_key)
    degree_dict = get_degree_dict(adjacency_dict)
    new_g = nx.Graph()
    new_g.add_nodes_from(list(a_graph.nodes))
    # print('new_g', tuple(nx.connected_components(new_g)))
    # print('new_g_edges', new_g.edges)
    left_edges = list(a_graph.edges)
    count = 0
    tmp_file = open('fast_greedy_community_detection_n20_m100_with_new_modularity_with_partial_iter_fast_edge_drop.txt', 'w')
    while len(left_edges) > 0:
        added_edge = left_edges[0]
        max_modularity_community = tuple(nx.connected_components(new_g))
        max_new_community_modularity = new_community_and_new_modularity(a_graph, new_g, added_edge, adjacency_dict, degree_dict)[1]
        if count == 0:
            last_iter_max_modularity = max_new_community_modularity
        for edge in left_edges:
            tmp_community, tmp_new_modularity = new_community_and_new_modularity(a_graph, new_g, edge, adjacency_dict, degree_dict)
            if tmp_new_modularity > max_new_community_modularity:
                max_new_community_modularity = tmp_new_modularity
                added_edge = edge
                max_modularity_community = tmp_community
        current_iter_max_modularity = max_new_community_modularity
        # print('added_edge11111111:', added_edge)
        # print('community_modularity1111111111111:', max_new_community_modularity)
        # print('community11111111111111111', max_modularity_community)
        new_g.add_edge(*added_edge)
        left_edges.remove(added_edge)
        # 更新加边机制
        if current_iter_max_modularity == last_iter_max_modularity:
            print('1111111111111111111111111111')
            add_edges = []
            for c in sorted(nx.connected_components(new_g), key=len, reverse=True):
                if len(c) > 1:
                    sub_graph_nodes = list(c)
                    partial_sub_graph = a_graph.subgraph(sub_graph_nodes)
                    add_edges += list(partial_sub_graph.edges)
                    print(c)
            new_g.add_edges_from(add_edges)
            left_edges = list(set(left_edges) - set(new_g.edges))

            ss1 = copy.deepcopy(set(add_edges))
            for i in set(add_edges):
                ss1.add((i[1], i[0]))
            ss2 = copy.deepcopy(set(new_g.edges))
            for j in set(new_g.edges):
                ss2.add((j[1], j[0]))

            ss = list(ss1 - ss2)
            print('ss', ss)
            print('add_eges:', add_edges)
            print('new_edges:', new_g.edges)
        # above 更新加边机制
        print('count:', count)
        print(left_edges)
        last_iter_max_modularity = current_iter_max_modularity

        tmp_file.write(str(max_new_community_modularity))
        tmp_file.write('\t')
        tmp_file.write(str(max_modularity_community))
        tmp_file.write('\n')

        count += 1
        if count % 10 == 1:
            print('just after:', count)
        # print('added_new_g_edges00000000000000', new_g.edges)
        # print('added_new_g000000000000', tuple(nx.connected_components(new_g)))
        # print('left_edges0000000000000', left_edges)

    end_time = time.time()
    tmp_file.write(str(end_time - start_time))
    tmp_file.close()
    print('time_spent:', end_time - start_time)
    pass
def get_max_sub_component(a_graph):
    largest_components = max(nx.connected_components(a_graph), key=len)
    tmp_largest_sub = a_graph.subgraph(largest_components)
    return tmp_largest_sub
def get_graph_properties(edges):
    # Set up graph
    connections = np.array([int(x) for x in edges.split(';')])

    nodes = sorted(list(set(connections)))
    # Calculate Properties
    properties = []
    timings = {}

    if connections[0] > 0:
        edges = connections.reshape(int(connections.size / 2), 2)
        timeS = time.time()

        # directed graph
        G = nx.DiGraph()
        G.add_edges_from(edges)

        # undirected graph
        U = nx.Graph()
        U.add_edges_from(edges)
        # graph generated

        # property 1: number of components
        num_comp = nx.number_connected_components(U)
        properties.append(num_comp)

        # property 2: number of strongly connected components
        num_strong_comp = nx.number_strongly_connected_components(G)
        properties.append(num_strong_comp)

        # property 3: average in/out degree
        indeg = []
        outdeg = []
        indeg_ls = list(G.in_degree())
        outdeg_ls = list(G.out_degree())

        for x in np.arange(len(nodes)):
            indeg.append(indeg_ls[x][1])
            outdeg.append(outdeg_ls[x][1])
        av_deg = np.mean(indeg)
        properties.append(av_deg)

        # property 4: link density
        linkden = connections.size / (len(nodes) * len(nodes))
        properties.append(linkden)

        # property 5: number of self loops
        numloop = list(G.selfloop_edges())
        numloop = len(numloop)
        properties.append(numloop)
        #       # property 6: number of simple cycles (excluding self loops)
        #       numcyc = list(nx.simple_cycles(G))
        #       numcyc = len(numcyc) - numloop
        #       properties.append(numcyc)

        #       timings.update({'p6':time.time()-timeS})
        #       print('p6')
        #       print(timings['p6'])
        #       timeS = time.time()

        # find all components
        components = list(nx.connected_components(U))

        ischain = [None] * len(components)
        istree = [None] * len(components)
        isdag = [None] * len(components)
        unicel = [None] * len(components)
        isscc = [None] * len(components)
        iscyc = [None] * len(components)
        iseul = [None] * len(components)
        indeg_by_comp = []
        outdeg_by_comp = []
        node_conn = [0] * len(components)
        av_clust = [0.] * len(components)
        assort = [0.] * len(components)
        indeg_cen_av = [0.] * len(components)
        indeg_cen_max = [0.] * len(components)
        indeg_cen_min = [0.] * len(components)
        outdeg_cen_av = [0.] * len(components)
        outdeg_cen_max = [0.] * len(components)
        outdeg_cen_min = [0.] * len(components)
        bet_cen_av = [0.] * len(components)
        bet_cen_max = [0.] * len(components)
        bet_cen_min = [0.] * len(components)
        eig_cen_av = [0.] * len(components)
        eig_cen_max = [0.] * len(components)
        eig_cen_min = [0.] * len(components)
        triangles_av = [0.] * len(components)
        triangles_max = [0.] * len(components)
        triangles_min = [0.] * len(components)
        squares_av = [0.] * len(components)
        squares_max = [0.] * len(components)
        squares_min = [0.] * len(components)
        transitivity = [0.] * len(components)
        rc = [0.] * len(components)
        loopnumber = [0] * len(components)

        for compnum in np.arange(len(components)):
            # property 6: ischain?(remove self-loops and then test this property)
            # want: how many chains does the graph contain.. look at each component, not the whole graph in one go.
            # most graphs are single components.
            G1 = G.subgraph(list(components[compnum]))
            Gnoself = G1.copy()
            Gnoself.remove_edges_from(Gnoself.selfloop_edges())
            Unoself = nx.Graph()
            Unoself.add_edges_from(Gnoself.edges)

            # if all in and out degrees are 1, graph is a chain..do not include in trees
            indeg2 = []
            outdeg2 = []
            indeg_ls2 = list(Gnoself.in_degree())
            outdeg_ls2 = list(Gnoself.out_degree())
            # nx gives indeg and outdeg as tuples (nodename, in/out deg). which is why i need the for loop below
            for x in np.arange(len(G1.nodes())):
                indeg2.append(indeg_ls2[x][1])
                outdeg2.append(outdeg_ls2[x][1])
            indeg_by_comp.append(int_arr_to_str(indeg2, delim=';'))
            outdeg_by_comp.append(int_arr_to_str(outdeg2, delim=';'))

            indeg2 = np.array(indeg2)
            outdeg2 = np.array(outdeg2)
            in_min_out = indeg2 - outdeg2
            ischain[compnum] = int((np.sum(in_min_out) == 0)
                                   & (np.sum(np.abs(in_min_out)) == 2)
                                   & (np.all(indeg2 <= 1))
                                   & (np.all(outdeg2 <= 1)))
            # property 7: istree(remove chains first)
            istree[compnum] = int((nx.is_tree(Gnoself) - ischain[compnum]) > 0)
            # property 8: isdag(only looking at DAGs other than trees and chains)
            isdag[compnum] = int((int(nx.is_directed_acyclic_graph(Gnoself)) -
                                  istree[compnum] - ischain[compnum]) > 0)
            if isdag[compnum] > 0:
                loopnumber[compnum] = len(list(
                    Gnoself.edges)) - (len(list(Gnoself.nodes)) - 1)
            # property 9: single celled
            unicel[compnum] = int(len(Gnoself.nodes) == 1)
            istree[compnum] = int(istree[compnum]) - int(
                unicel[compnum]
            )  # nx counts single node with no self-edge as a tree
            # property 10: isscc (excluding unicellular)
            num_strong_comp2 = nx.number_strongly_connected_components(Gnoself)
            isscc[compnum] = int(num_strong_comp2 == 1)
            isscc[compnum] = int((isscc[compnum] - unicel[compnum]) > 0)
            # property 11: iscyc(cyclic graphs other than those with a single scc and single celled graphs)
            iscyc[compnum] = int((isdag[compnum] + istree[compnum] +
                                  ischain[compnum] + isscc[compnum] +
                                  unicel[compnum]) == 0)
            # property 12: is eulerian
            iseul[compnum] = int(nx.is_eulerian(Gnoself))
            # property 13: node connectivity
            node_conn[compnum] = approx.node_connectivity(Gnoself)
            # property 14: clustering coefficient
            av_clust[compnum] = nx.average_clustering(Gnoself)
            # property 15: assortativity(pearson's coefficient)
            try:
                assort[compnum] = nx.degree_pearson_correlation_coefficient(
                    Gnoself)  #####################check
            except:
                assort[compnum] = 0.0
            # property 16,17,18: in degree centrality (average, maximum and minimum)
            indeg_cen = []
            dict1 = nx.in_degree_centrality(Gnoself)
            for a1 in dict1:
                indeg_cen.append(dict1[a1])
            indeg_cen_av[compnum] = np.average(indeg_cen)
            indeg_cen_max[compnum] = max(indeg_cen)
            indeg_cen_min[compnum] = min(indeg_cen)
            # property 19,20,21: out degree centrality (average, maximum, minimum)
            outdeg_cen = []
            dict1 = nx.out_degree_centrality(Gnoself)
            for a1 in dict1:
                outdeg_cen.append(dict1[a1])
            outdeg_cen_av[compnum] = np.average(outdeg_cen)
            outdeg_cen_max[compnum] = max(outdeg_cen)
            outdeg_cen_min[compnum] = min(outdeg_cen)
            # property 22,23,24: betweenness centrality (average,maximum, minimum)
            bet_cen = []
            dict1 = nx.betweenness_centrality(Gnoself)
            for a1 in dict1:
                bet_cen.append(dict1[a1])
            bet_cen_av[compnum] = np.average(bet_cen)
            bet_cen_max[compnum] = max(bet_cen)
            bet_cen_min[compnum] = min(bet_cen)
            # property 25,26,27: eigen vector centrality (average,maximum, minimum)
            eig_cen = []
            try:
                dict1 = nx.eigenvector_centrality(Gnoself)
                for a1 in dict1:
                    eig_cen.append(dict1[a1])
                eig_cen_av[compnum] = np.average(eig_cen)
                eig_cen_max[compnum] = max(eig_cen)
                eig_cen_min[compnum] = min(eig_cen)
            except nx.PowerIterationFailedConvergence:
                pass
            # property 28,29,30: number of triangles for each node (average,maximum, minimum)
            triangles = []
            dict1 = nx.triangles(Unoself)
            for a1 in dict1:
                triangles.append(dict1[a1])
            if len(triangles):
                triangles_av[compnum] = np.average(triangles)
                triangles_max[compnum] = max(triangles)
                triangles_min[compnum] = min(triangles)
            # property 31: transitivity (fraction of all possible triangles present in the graph)
            transitivity[compnum] = nx.transitivity(Gnoself)
            # property 32,33,34: square clustering for each node(fraction of all possible squares present at a node)
            squares = []
            dict1 = nx.square_clustering(Gnoself)
            for a1 in dict1:
                squares.append(dict1[a1])
            if len(squares):
                squares_av[compnum] = np.average(squares)
                squares_max[compnum] = max(squares)
                squares_min[compnum] = min(squares)
            # propery 35: rich club coefficient
            if len(list(Unoself.nodes())) > 3:
                rc[compnum] = 0.0


#               rc[compnum] = nx.rich_club_coefficient(Unoself).values()# only works if graph has 4 or more edges
# property 36 and 37: number of source and target nodes

        iseul = sum(iseul)
        iscyc = sum(iscyc)
        isscc = sum(isscc)
        unicel = sum(unicel)
        isdag = sum(isdag)
        istree = sum(istree)
        ischain = sum(ischain)
        indeg_by_comp = ';'.join([str(x) for x in indeg_by_comp])
        outdeg_by_comp = ';'.join([str(x) for x in outdeg_by_comp])
        node_conn = ';'.join([str(x) for x in node_conn
                              ])  # node connectivity for each component
        avav_clust = np.average(
            av_clust)  # average clustering coefficient over all components
        av_clust = ';'.join([
            str(round(x, 2)) for x in av_clust
        ])  # average clustering coefficients for each component
        av_assort = np.average(
            assort)  # average assortativity over all components
        assort = ';'.join([str(round(x, 2)) for x in assort
                           ])  # assortativity for each component
        indeg_cen_avav = np.average(
            indeg_cen_av)  # average indeg centrality over all components
        indeg_cen_av = ';'.join([
            str(round(x, 2)) for x in indeg_cen_av
        ])  # average indeg centrality for each component
        indeg_cen_maxmax = max(
            indeg_cen_max)  # maximum indeg centrality across all components
        indeg_cen_max = ';'.join([
            str(round(x, 2)) for x in indeg_cen_max
        ])  # maximum indeg centrality for each component
        indeg_cen_minmin = min(
            indeg_cen_min)  # minimum indeg centrality across all components
        indeg_cen_min = ';'.join([
            str(round(x, 2)) for x in indeg_cen_min
        ])  # minimum indeg centrality for each component

        outdeg_cen_avav = np.average(outdeg_cen_av)
        outdeg_cen_av = ';'.join([str(round(x, 2)) for x in outdeg_cen_av])
        outdeg_cen_maxmax = max(outdeg_cen_max)
        outdeg_cen_max = ';'.join([str(round(x, 2)) for x in outdeg_cen_max])
        outdeg_cen_minmin = min(outdeg_cen_min)
        outdeg_cen_min = ';'.join([str(round(x, 2)) for x in outdeg_cen_min])
        bet_cen_avav = np.average(bet_cen_av)
        bet_cen_av = ';'.join([str(round(x, 2)) for x in bet_cen_av])
        bet_cen_maxmax = max(bet_cen_max)
        bet_cen_max = ';'.join([str(round(x, 2)) for x in bet_cen_max])
        bet_cen_minmin = min(bet_cen_min)
        bet_cen_min = ';'.join([str(round(x, 2)) for x in bet_cen_min])
        eig_cen_avav = np.average(eig_cen_av)
        eig_cen_av = ';'.join([str(round(x, 2)) for x in eig_cen_av])
        eig_cen_maxmax = max(eig_cen_max)
        eig_cen_max = ';'.join([str(round(x, 2)) for x in eig_cen_max])
        eig_cen_minmin = min(eig_cen_min)
        eig_cen_min = ';'.join([str(round(x, 2)) for x in eig_cen_min])
        triangles_avav = np.average(triangles_av)
        triangles_av = ';'.join([str(x) for x in triangles_av])
        triangles_maxmax = max(triangles_max)
        triangles_max = ';'.join([str(x) for x in triangles_max])
        triangles_minmin = min(triangles_min)
        triangles_min = ';'.join([str(x) for x in triangles_min])
        transitivity_av = np.average(transitivity)
        transitivity_max = max(transitivity)
        transitivity_min = min(transitivity)
        transitivity = ';'.join([str(x) for x in transitivity])
        squares_avav = np.average(squares_av)
        squares_maxmax = max(squares_max)
        squares_minmin = min(squares_min)
        squares_av = ';'.join([str(x) for x in squares_av])
        squares_max = ';'.join([str(x) for x in squares_max])
        squares_min = ';'.join([str(x) for x in squares_min])
        rc_av = np.average(rc)
        rc_max = max(rc)
        rc_min = min(rc)
        rc = ';'.join([str(x) for x in rc])
        ln = [loopnumber[x] for x in np.nonzero(loopnumber)[0]]
        if any(ln):
            loopnumber_av = np.average(ln)
        else:
            loopnumber_av = 0.0
        loopnumber = ';'.join([str(x) for x in loopnumber])

        # check.. sum of iscyc, isscc, unicel, dag,tree, chain should be the total number of components
        if num_comp != (iscyc + isscc + unicel + isdag + istree + ischain):
            print('Number of components is wrong!!!!!!')
            print(num_comp)
            print([iscyc, isscc, unicel, isdag, istree, ischain])
            sys.exit()

        properties.append(indeg_by_comp)  # string
        properties.append(outdeg_by_comp)  #string
        properties.append(ischain)  #int
        properties.append(istree)  #int
        properties.append(isdag)  #int
        properties.append(unicel)  #int
        properties.append(isscc)  #int
        properties.append(iscyc)  #int
        properties.append(iseul)  #int
        properties.append(loopnumber_av)  #float
        properties.append(loopnumber)  #string
        properties.append(node_conn)  #string
        properties.append(avav_clust)  #float
        properties.append(av_clust)  #string
        properties.append(av_assort)  #float
        properties.append(assort)  #string
        properties.append(indeg_cen_avav)  #float
        properties.append(indeg_cen_av)  #string
        properties.append(indeg_cen_maxmax)  #float
        properties.append(indeg_cen_max)  #string
        properties.append(indeg_cen_minmin)  #float
        properties.append(indeg_cen_min)  #string
        properties.append(outdeg_cen_avav)  #float
        properties.append(outdeg_cen_av)  #string
        properties.append(outdeg_cen_maxmax)  #float
        properties.append(outdeg_cen_max)  #string
        properties.append(outdeg_cen_minmin)  #float
        properties.append(outdeg_cen_min)  #string
        properties.append(bet_cen_avav)  #float
        properties.append(bet_cen_av)  #string
        properties.append(bet_cen_maxmax)  #float
        properties.append(bet_cen_max)  #string
        properties.append(bet_cen_minmin)  #float
        properties.append(bet_cen_min)  #string
        properties.append(eig_cen_avav)  #float
        properties.append(eig_cen_av)  #string
        properties.append(eig_cen_maxmax)  #float
        properties.append(eig_cen_max)  #string
        properties.append(eig_cen_minmin)  #float
        properties.append(eig_cen_min)  #string
        properties.append(triangles_avav)  #float
        properties.append(triangles_av)  #string
        properties.append(triangles_maxmax)  #float
        properties.append(triangles_max)  #string
        properties.append(triangles_minmin)  #float
        properties.append(triangles_min)  #string
        properties.append(transitivity_av)  # float
        properties.append(transitivity_max)  #float
        properties.append(transitivity_min)  #float
        properties.append(transitivity)  #string
        properties.append(squares_avav)  #float
        properties.append(squares_av)  #string
        properties.append(squares_maxmax)  #float
        properties.append(squares_max)  #string
        properties.append(squares_minmin)  #float
        properties.append(squares_min)  #string
        properties.append(rc_av)  # float
        properties.append(rc_max)  #float
        properties.append(rc_min)  #float
        properties.append(rc)  #string

        # append more properties.....
        # property 14:

        # property x: in-degree sequence
        #indeg = # list(G.in_degree())[iterate over number of nodes][1]
        # property y: out-degree sequence
        #outdeg = # list(G.in_degree())[iterate over number of nodes][1]
        #.....
    else:
        properties = [0] * 2 + [0.] * 2 + [0] + [''] * 2 + [0] * 7 + [
            0.
        ] + [''] * 2 + [0., ''] * 17 + [0.] * 3 + [''] + [0., ''] * 3 + [
            0., 0., 0., ''
        ]

    # return list of properties
    return properties
示例#40
0
    ### Starting to define spacer arms information and coordinates
    pabaAtoms = getAtomsMol(psfList, 'PAB') # takes paba atoms from psflist
    baseAtoms  = getAtoms(psfList, 'C15', tcolumn=5) # takes C15 (base carbon) atom from each molecule
    upperAtoms = getAtoms(psfList, 'C60', tcolumn=5) # takes C06 (upper carbon) atom from each molecule
    
    ### Starting to define graph and shortests path
    numBonds  = (subprocess.check_output("cat " + psfFile + "| grep NBOND | cut -d \" \" -f4", shell=True))[:-1]
    bondConnects = getBonds(psfList, numBonds) # get bond connections list
    
    G = nx.Graph()
    G.add_edges_from(bondConnects)
    Gsub = G.subgraph(pabaAtoms)
    #nx.draw(Gsub)

    ### Starting to get atom indices and coordinates from the spacer arm    
    comp            = list(Gsub.subgraph(c) for c in nx.connected_components(Gsub))
    graphIndexes    = getSpacerArmGraph(comp,baseAtoms,upperAtoms) #list of atom indexes    
    atomCoordinates = getPABACoordinates(graphIndexes, psfList, pdbList) # numpy Array

    ### Reshape coordinates 
    #reshapeCoordinates(atomCoordinates) # reshape coordinates (3,12) for better visualization
    
    ### Calculate distance and angle between coordinates
    distances = getDistancesAngles(atomCoordinates)
    reshapedCoordinates = reshapeCoordinates(distances,2,11,'2D')
    print reshapedCoordinates
    
        

    #res = abNorm[0] * bcNorm[0] + abNorm[1] * bcNorm[1] + abNorm[2] * bcNorm[2];
    #angle = arccos(res)*180.0/ pi
示例#41
0
    #We should find the lcc of the network and discard the rest.
    start = timeit.default_timer()
    eta, nu, k, G = get_network(n, nr, na)

    if args.type == 0:
        row, col = np.where(eta[::2] - nu[::2] != 0)
        data = (eta[::2] - nu[::2])[row, col]
        A = csr_matrix((data, (row, col)), shape=(2 * nr, n), dtype=int)
        adj = A.T.dot(A)
        g = nx.convert_matrix.from_scipy_sparse_matrix(adj)

    if args.type == 1:
        g = nx.gnm_random_graph(n, nr, seed=seed)
        adj = nx.adjacency_matrix(g)

    lcc = np.array(list(max(nx.connected_components(g), key=len)))
    n = len(lcc)
    eta = eta[:, lcc]
    nu = nu[:, lcc]
    G = G[lcc]

    X0 = np.exp(-G)
    r = n
    if rank:
        r = np.linalg.matrix_rank(adj.toarray()[np.ix_(lcc, lcc)])
        if output:
            print("rank is ", r, "lcc is ", n)

    XD1, XD2, inds = get_drive(eta, nu, k, G, d0, nd)

    bif = -3
示例#42
0
def cut_threshold(labels, rag, thresh, in_place=True):
    """Combine regions separated by weight less than threshold.

    Given an image's labels and its RAG, output new labels by
    combining regions whose nodes are separated by a weight less
    than the given threshold.

    Parameters
    ----------
    labels : ndarray
        The array of labels.
    rag : RAG
        The region adjacency graph.
    thresh : float
        The threshold. Regions connected by edges with smaller weights are
        combined.
    in_place : bool
        If set, modifies `rag` in place. The function will remove the edges
        with weights less that `thresh`. If set to `False` the function
        makes a copy of `rag` before proceeding.

    Returns
    -------
    out : ndarray
        The new labelled array.

    Examples
    --------
    >>> from skimage import data, segmentation
    >>> from skimage.future import graph
    >>> img = data.astronaut()
    >>> labels = segmentation.slic(img)
    >>> rag = graph.rag_mean_color(img, labels)
    >>> new_labels = graph.cut_threshold(labels, rag, 10)

    References
    ----------
    .. [1] Alain Tremeau and Philippe Colantoni
           "Regions Adjacency Graph Applied To Color Image Segmentation"
           :DOI:`10.1109/83.841950`

    """
    if not in_place:
        rag = rag.copy()

    # Because deleting edges while iterating through them produces an error.
    to_remove = [(x, y) for x, y, d in rag.edges(data=True)
                 if d['weight'] >= thresh]
    rag.remove_edges_from(to_remove)

    comps = nx.connected_components(rag)

    # We construct an array which can map old labels to the new ones.
    # All the labels within a connected component are assigned to a single
    # label in the output.
    map_array = np.arange(labels.max() + 1, dtype=labels.dtype)
    for i, nodes in enumerate(comps):
        for node in nodes:
            for label in rag.nodes[node]['labels']:
                map_array[label] = i

    return map_array[labels]
示例#43
0
 def _get_components(self):
     """Return connected components (as sorted numpy arrays), sorted by size."""
     return [
         np.sort(list(component))
         for component in NX.connected_components(self.graph)
     ]
degree_sequence = sorted([d for n, d in G.degree()], reverse=True)
degreeCount = collections.Counter(degree_sequence)
deg, cnt = zip(*degreeCount.items())

fig, ax = plt.subplots()
plt.bar(deg, cnt, width=0.80, color='b')

plt.title("Degree Histogram")
plt.ylabel("Count")
plt.xlabel("Degree")
ax.set_xticks([d + 0.4 for d in deg])
ax.set_xticklabels(deg)

plt.axes([0.4, 0.4, 0.5, 0.5])
Gcc = G.subgraph(sorted(nx.connected_components(G), key=len, reverse=True)[0])
pos = nx.spring_layout(G)
plt.axis('off')
nx.draw_networkx_nodes(G, pos, node_size=20)
nx.draw_networkx_edges(G, pos, alpha=0.4)

plt.show()


print("ne = {}".format(ne))
print("node = {}, edge = {}".format(n, sum(degree_sequence)/2))


r_R = 0.5
resource_rate = r_R
R = float(lbs.sum() * r_R)
示例#45
0
def part2blob(graph, pop_col, pop_target, epsilon):

    h = graph.copy()

    start = random.choice(list(h.nodes()))
    clusters = {x: [{x}, graph.nodes[x][pop_col]] for x in graph.nodes()}

    neighbors = list(h.neighbors(start))

    while clusters[start][1] < pop_target - epsilon * pop_target:

        print(clusters[start][1] / pop_target)

        if neighbors == []:
            neighbors = list(h.neighbors(start))

        for neighbor in neighbors:

            cpop = clusters[start][1] + clusters[neighbor][1]

            if cpop < pop_target + epsilon * pop_target:
                k = h.copy()
                k.remove_node(start)
                k.remove_node(neighbor)
                print(len(k.nodes()))
                if nx.is_connected(k):
                    print(True, True)
                    h = nx.contracted_edge(h, (start, neighbor),
                                           self_loops=False)
                    clusters[start][0] = clusters[start][0].union(
                        clusters[neighbor][0])
                    clusters[start][1] = cpop
                    clusters.pop(neighbor)
                else:
                    print(True, False)

                    cc = list(nx.connected_components(k))
                    tsums = []
                    for l in cc:
                        tsums.append(0)
                        for n in l:
                            #print(removed)
                            tsums[-1] = tsums[-1] + graph.nodes[n][pop_col]

                    val, idx = min(
                        (val, idx) for (idx, val) in enumerate(tsums))

                    l = cc[idx]

                    for n in l:
                        #print(n)
                        h.add_edge(neighbor, n)
                        #h.nodes[neighbor][pop_col]+=h.nodes[n][pop_col]
                        clusters[neighbor][
                            1] = clusters[neighbor][1] + clusters[n][1]
                        h = nx.contracted_edge(h, (neighbor, n),
                                               self_loops=False)
                        clusters[neighbor][0] = clusters[neighbor][0].union(
                            clusters[n][0])
                        clusters.pop(n)
                        if n in neighbors:
                            neighbors.remove(n)

            else:
                print(False, False)
                h.remove_edge(start, neighbor)
            neighbors.remove(neighbor)

    cd2 = {}
    cd2[start] = clusters[start][0]
    cd2[-1] = []
    for node in graph.nodes():
        if node not in cd2[start]:
            cd2[-1].append(node)

    cd2[-1] = set(cd2[-1])
    return cd2
示例#46
0
flat_list = [item for sublist in lista_atores for item in sublist]
atores = sorted(set(list(filter(None, flat_list))))

G = nx.Graph()

G.add_nodes_from(atores)

#adicionando os vértices
for linha in rows:
    cast_list = [x.strip() for x in linha[4].split(',')]
    for i in range(len(cast_list)):
        for j in range(i + 1, len(cast_list)):
            #           G.add_edge(cast_list[i], cast_list[j], weight=float(linha[-1]))
            G.add_edge(cast_list[i], cast_list[j])

largest_cc = max(nx.connected_components(G), key=len)
GCC = G.subgraph(largest_cc)

# Para obter o grafo com nós fora da componente principal
#G = G.subgraph(set(G.nodes()).difference(largest_cc))

###################################################################
#Métricas da rede e dos nós
###################################################################
nx.density(GCC)
nx.average_clustering(GCC)
triadic_closure = nx.transitivity(GCC)
nx.graph_number_of_cliques(GCC)
nx.diameter(GCC)
nx.average_shortest_path_length(GCC)
nx.number_connected_components(G)
示例#47
0
def dissolve_waterbodies(df, joins):
    """Dissolve waterbodies that overlap, duplicate, or otherwise touch each other.

    WARNING: some adjacent waterbodies are divided by dams, etc.  These will need to be
    accounted for later when snapping dams.

    Parameters
    ----------
    df : GeoDataFrame
        waterbodies
    joins : DataFrame
        waterbody / flowline joins

    Returns
    -------
    tuple of (GeoDataFrame, DataFrame)
        (waterbodies, waterbody joins)
    """

    ### Join waterbodies to themselves to find overlaps
    start = time()
    to_agg = pd.DataFrame(sjoin(df.geometry, df.geometry))

    # drop the self-intersections
    to_agg = to_agg.loc[to_agg.index != to_agg.index_right].copy()
    print("Found {:,} waterbodies that touch or overlap".format(
        len(to_agg.index.unique())))

    if len(to_agg):
        # Use network (mathematical, not aquatic) adjacency analysis
        # to identify all sets of waterbodies that touch.
        # Construct an identity map from all wbIDs to their newID (will be new wbID after dissolve)
        grouped = to_agg.groupby(level=0).index_right.unique()
        network = nx.from_pandas_edgelist(
            grouped.explode().reset_index().rename(columns={
                "wbID": "index",
                "index_right": "wbID"
            }),
            "index",
            "wbID",
        )

        components = pd.Series(nx.connected_components(network)).apply(list)
        groups = pd.DataFrame(components.explode().rename("wbID"))

        next_id = df.index.max() + 1
        groups["group"] = (next_id + groups.index).astype("uint32")
        groups = groups.set_index("wbID")

        # assign group to polygons to aggregate
        to_agg = (to_agg.join(groups).reset_index().drop(
            columns=["index_right"]).drop_duplicates().set_index("wbID").join(
                df[["geometry", "FType"]]))

        ### Dissolve groups
        # Buffer geometries slightly to make sure that any which intersect actually overlap
        print("Buffering {:,} unique waterbodies before dissolving...".format(
            len(to_agg)))
        buffer_start = time()
        # TODO: use pg, and simplify since this creates a large number of vertices by default
        to_agg["geometry"] = pg.simplify(
            pg.buffer(to_agg.geometry, 0.1, quadsegs=1), 0.1)
        print("Buffer completed in {:.2f}s".format(time() - buffer_start))

        print("Dissolving...")
        dissolve_start = time()

        # NOTE: automatically takes the first FType
        # dissolved = to_agg.dissolve(by="group").reset_index(drop=True)
        dissolved = dissolve(to_agg, by="group")

        errors = (pg.get_type_id(
            dissolved.geometry) == pg.GeometryType.MULTIPOLYGON.value)
        if errors.max():
            print(
                "WARNING: Dissolve created {:,} multipolygons, these will cause errors later!"
                .format(errors.sum()))

        # this may create multipolygons if polygons that are dissolved don't sufficiently share overlapping geometries.
        # for these, we want to retain them as individual polygons
        # dissolved = dissolved.explode().reset_index(drop=True)
        # WARNING: this doesn't work with our logic below for figuring out groups associated with original wbIDs
        # since after exploding, we don't know what wbID went into what group

        # assign new IDs and update fields
        next_id = df.index.max() + 1
        dissolved["wbID"] = (next_id + dissolved.index).astype("uint32")
        dissolved["AreaSqKm"] = (pg.area(dissolved.geometry) *
                                 1e-6).astype("float32")
        dissolved["NHDPlusID"] = 0
        dissolved.NHDPlusID = dissolved.NHDPlusID.astype("uint64")
        dissolved.wbID = dissolved.wbID.astype("uint32")

        print(
            "Dissolved {:,} adjacent polygons into {:,} new polygons in {:.2f}s"
            .format(len(to_agg), len(dissolved),
                    time() - dissolve_start))

        # remove waterbodies that were dissolved, and append the result
        # of the dissolve
        df = (df.loc[~df.index.isin(to_agg.index)].reset_index().append(
            dissolved, ignore_index=True, sort=False).set_index("wbID"))

        # update joins
        ix = joins.loc[joins.wbID.isin(groups.index)].index

        # NOTE: this mapping will not work if explode() is used above
        joins.loc[ix, "wbID"] = joins.loc[ix].wbID.map(groups.group)

        # Group together ones that were dissolved above
        joins = joins.drop_duplicates().reset_index(drop=True)

    print("Done resolving overlapping waterbodies in {:.2f}s".format(time() -
                                                                     start))

    return df, joins
示例#48
0
def Graph_Statistic_Attributes():
    fname1 = 'F:/Link_Prediction_Code/Dataset/1-Topology_Train.txt'
    fname2 = 'F:/Link_Prediction_Code/Dataset/1-Topology_Train_Regular.txt'
    #Get edge from txt type data
    try:
        fdobj = open(fname1, 'r')
        fwobj = open(fname2, 'w')
    except IOError as e:
        print "***file open error:", e
    else:
        G = nx.Graph()

        eline = fdobj.readline()
        while eline:
            line = eline.strip().split()
            #edge = (line[0],line[1],line[2],line[3])
            #Edge.append(tep)
            if G.has_edge(string.atoi(line[0]), string.atoi(line[1])):
                G[string.atoi(line[0])][string.atoi(
                    line[1])]['weight'] = G[string.atoi(line[0])][string.atoi(
                        line[1])]['weight'] + string.atof(line[2])
                if G[string.atoi(line[0])][string.atoi(
                        line[1])]['timestamp'] > string.atof(line[3]):
                    G[string.atoi(line[0])][string.atoi(
                        line[1])]['timestamp'] = string.atof(line[3])
            else:
                G.add_edge(string.atoi(line[0]),
                           string.atoi(line[1]),
                           weight=string.atof(line[2]),
                           timestamp=string.atof(line[3]))
            eline = fdobj.readline()
        #end while

    #print nx.is_connected(G)
    #print nx.number_connected_components(G)

    ### Get the Greatest Component of Networks #####
    components = sorted(nx.connected_components(G), key=len, reverse=True)
    for i in range(1, len(components)):
        for node in components[i]:
            G.remove_node(node)
    #end for
    print nx.is_connected(G)

    ####Statistic attributes of graphs##
    print "N", nx.number_of_nodes(G)
    print "M", nx.number_of_edges(G)
    print "C", nx.average_clustering(G)
    print "Cw", nx.average_clustering(G, weight='weight')
    print "<d>", nx.average_shortest_path_length(G)
    print "r", nx.degree_assortativity_coefficient(G)
    #print nx.density(G)
    #print nx.transitivity(G)
    degree_list = list(G.degree_iter())
    #print degree_list
    avg_degree_1 = 0.0
    avg_degree_2 = 0.0
    for node in degree_list:
        avg_degree_1 = avg_degree_1 + node[1]
        avg_degree_2 = avg_degree_2 + node[1] * node[1]
    avg_degree = avg_degree_1 / len(degree_list)
    avg_degree_square = (avg_degree_2 / len(degree_list)) / (avg_degree *
                                                             avg_degree)
    print "<k>", avg_degree
    print "H", avg_degree_square

    #Regularization weight, timestamp to range(0,1)
    E = G.edges(data=True)
    TimeSet = []
    for i in range(0, len(E)):
        TimeSet.append(E[i][2]['timestamp'])
    min = sorted(TimeSet)[0]
    max = sorted(TimeSet)[-1]
    #print min, max
    for i in range(0, len(E)):
        fwobj.write(
            str(E[i][0]) + " " + str(E[i][1]) + " " + str(E[i][2]['weight']) +
            " " + str(math.exp((-1 / E[i][2]['weight']))) + " " +
            str(E[i][2]['timestamp']) + " " +
            str((E[i][2]['timestamp'] - min) / (max - min)) + '\n')

    fwobj.close()
    fdobj.close()
    return 1
示例#49
0
import networkx as nx
import matplotlib.pyplot as plt
from collections import Counter

G = nx.read_edgelist('data/imdb_edges.tsv', delimiter='\t')

G1 = nx.read_edgelist('data/actor_edges.tsv', delimiter='\t')

nodes = G1.degree().values()
plt.hist(nodes,bins=25)
plt.xlim(0,200)
plt.show()

Counter(nx.degree_centrality(G)).most_common(5)

len(list(nx.connected_components(G1)))


size = [len(c) for c in nx.connected_components(G1)]

plt.hist(size[1:])

G2 = nx.read_edgelist('data/small_actor_edges.tsv', delimiter='\t')

len(list(nx.connected_components(G2)))

Counter(nx.degree_centrality(G2)).most_common(5)

Counter(nx.betweenness_centrality(G2)).most_common(5)

示例#50
0
    def partial_cluster(self,
                        method='sca',
                        threshold=0.45,
                        scale=0.5,
                        factor=0.3,
                        restricted_chars='_T',
                        mode='overlap',
                        cluster_method='infomap',
                        gop=-1,
                        restriction='',
                        ref='',
                        external_function=None,
                        split_on_tones=True,
                        **keywords):
        """
        Cluster the words into partial cognate sets.

        Function for flat clustering of words into cognate sets.

        Parameters
        ----------
        method : {'sca','lexstat','edit-dist','turchin'} (default='sca')
            Select the method that shall be used for the calculation.
        cluster_method : {'upgma','single','complete', 'mcl'} (default='upgma')
            Select the cluster method. 'upgma' (:evobib:`Sokal1958`) refers to
            average linkage clustering, 'mcl' refers to the "Markov Clustering
            Algorithm" (:evobib:`Dongen2000`).
        threshold : float (default=0.3)
            Select the threshold for the cluster approach. If set to c{False},
            an automatic threshold will be calculated by calculating the
            average distance of unrelated sequences (use with care).
        scale : float (default=0.5)
            Select the scale for the gap extension penalty.
        factor : float (default=0.3)
            Select the factor for extra scores for identical prosodic segments.
        restricted_chars : str (default="T_")
            Select the restricted chars (boundary markers) in the prosodic
            strings in order to enable secondary alignment.
        mode : {'global','local','overlap','dialign'} (default='overlap')
            Select the mode for the alignment analysis.
        verbose : bool (default=False)
            Define whether verbose output should be used or not.
        gop : int (default=-2)
            If 'sca' is selected as a method, define the gap opening penalty.
        restriction : {'cv'} (default="")
            Specify the restriction for calculations using the edit-distance.
            Currently, only "cv" is supported. If *edit-dist* is selected as
            *method* and *restriction* is set to *cv*, consonant-vowel matches
            will be prohibited in the calculations and the edit distance will
            be normalized by the length of the alignment rather than the length
            of the longest sequence, as described in :evobib:`Heeringa2006`.
        inflation : {int, float} (default=2)
            Specify the inflation parameter for the use of the MCL algorithm.
        expansion : int (default=2)
            Specify the expansion parameter for the use of the MCL algorithm.
        
        """
        kw = dict(imap_mode=True,
                  post_processing=False,
                  inflation=2,
                  expansion=2,
                  max_steps=1000,
                  add_self_loops=True,
                  sep=lingpy.settings.rcParams['morpheme_separator'],
                  word_sep=lingpy.settings.rcParams['word_separator'],
                  word_seps=lingpy.settings.rcParams['word_separators'],
                  seps=lingpy.settings.rcParams['morpheme_separators'],
                  mcl_logs=lambda x: -np.log2((1 - x)**2))
        kw.update(keywords)

        # check for parameters and add clustering, in order to make sure that
        # analyses are not repeated
        if not hasattr(self, 'params'):
            self.params = {}
        self.params['partial_cluster'] = "{0}_{1}_{2:.2f}".format(
            method, cluster_method, threshold)
        self._stamp += '# Partial Cluster: ' + self.params['partial_cluster']

        matrices = self._get_partial_matrices(
            method=method,
            scale=scale,
            factor=factor,
            restricted_chars=restricted_chars,
            mode=mode,
            gop=gop,
            imap_mode=kw['imap_mode'],
            split_on_tones=split_on_tones)
        k = 0
        C = defaultdict(list)  # stores the pcogids
        G = {}  # stores the graphs
        with pb(desc='PARTIAL SEQUENCE CLUSTERING',
                total=len(self.rows)) as progress:
            for concept, trace, matrix in matrices:
                progress.update(1)
                lingpy.log.info('Analyzing concept {0}...'.format(concept))
                if external_function:
                    c = external_function(threshold,
                                          matrix,
                                          taxa=list(range(len(matrix))),
                                          revert=True)
                elif cluster_method == 'infomap':
                    c = extra.infomap_clustering(threshold,
                                                 matrix,
                                                 taxa=list(range(len(matrix))),
                                                 revert=True)
                elif cluster_method == 'mcl':
                    c = clustering.mcl(threshold,
                                       matrix,
                                       taxa=list(range(len(matrix))),
                                       max_steps=kw['max_steps'],
                                       inflation=kw['inflation'],
                                       expansion=kw['expansion'],
                                       add_self_loops=kw['add_self_loops'],
                                       logs=kw['mcl_logs'],
                                       revert=True)
                elif cluster_method in ['upgma', 'single', 'complete', 'ward']:
                    c = clustering.flat_cluster(cluster_method,
                                                threshold,
                                                matrix,
                                                revert=True)
                else:
                    raise ValueError("No suitable cluster method specified.")

                for i, (idx, pos, slc) in enumerate(trace):
                    C[idx] += [c[i] + k]
                if kw['post_processing']:
                    _g = nx.Graph()
                    for i, (idx, pos, slc) in enumerate(trace):
                        _g.add_node((i, idx, pos))
                    remove_edges = []
                    for (i, n1), (j,
                                  n2) in combinations2(enumerate(_g.nodes())):
                        if C[n1[1]][n1[2]] == C[n2[1]][n2[2]]:
                            _g.add_edge(n1, n2)
                            if n1[1] == n2[1]:
                                # get scores for n1 and n2 with all the rest in
                                # the matrix to decide for one
                                sn1, sn2 = 0, 0
                                for i, row in enumerate(matrix):
                                    sn1 += matrix[i][n1[0]]
                                    sn2 += matrix[i][n2[0]]
                                sn1 = sn1 / len(matrix)
                                sn2 = sn2 / len(matrix)
                                if sn1 <= sn2:
                                    remove_edges += [n2]
                                else:
                                    remove_edges += [n1]
                    for node in remove_edges:
                        for edge in sorted(_g[node]):
                            _g.remove_edge(node, edge)

                    for i, coms in enumerate(nx.connected_components(_g)):
                        cogid = i + 1 + k
                        for j, idx, pos in coms:
                            C[idx][pos] = cogid

                    G[concept] = _g

                k += max(c.values())
        self.add_entries(ref or self._partials, C, lambda x: x)
        self.graphs = G
示例#51
0
fb_dict = nx.degree(g)

###### Loop it through and output
for node in fb_dict:
	if fb_dict[node] > max_degree:
		max_degree = fb_dict[node]
		friend = g.node[node]['name']

print friend, "has the most common friends with you:", max_degree, "in common"


#### Two fb friends with most common friends (with most neighbors)
# ??? CONNECTIVITY??? 

#### Components in the ego network
print "There are", len(nx.connected_components(g)), "connected components in the network"
#### the following prints the subgraphs
# nx.connected_component_subgraphs(g)


### Small world coefficient (transitivity/diameter)
##### Extract the largest subgraph (first one?)
max_size = 0
biggest = -1

for i,component in enumerate(nx.connected_components(g)):
#	print "len:", len(component), "maxsize:", max_size, "i:", i, "biggest:", biggest
	if len(component) > max_size:
		max_size = len(component)
		biggest = i
def create_graph(mesh, centroids, normals, robot_pos,
                 traversal_tresh=35, bumpiness_tresh=0.37, dbscan_eps=3, dbscan_min_samples=2):
    """

    :param mesh:
    :param centroids:
    :param normals:
    :param closer_centroid_idx:
    :param traversal_tresh:
    :param dbscan_eps:
    :param dbscan_min_samples:
    :return:
    """
    print("Creating Graph... num faces:", mesh.num_faces)

    frontiers = extract_frontiers(mesh)
    print("Found ", frontiers, "frontiers")

    G = nx.Graph()

    for face_idx in xrange(mesh.num_faces):
        face = mesh.faces[face_idx]

        face_inclination = graph_search.MeshGraphSearch.calculate_traversal_angle(normals[face_idx])
        # if 0 <= face_inclination <= traversal_tresh or 180 - traversal_tresh <= face_inclination <= 180:
        if traversal_tresh < face_inclination < 180 - traversal_tresh:
            continue

        G.add_node(face_idx)

    for face_idx in list(G.nodes()):
        face_vertexes = mesh.faces[face_idx]
        for v in face_vertexes:
            vertex_adj_faces = mesh.get_vertex_adjacent_faces(v)
            for face_adjacent in vertex_adj_faces:
                if face_adjacent != face_idx and G.has_node(face_adjacent):
                    G.add_edge(face_idx, face_adjacent, weight=1)

    #print "G edge_list:", len(list(G.edges())), sorted(list(G.edges()))

    # remove small connected components
    for component in list(nx.connected_components(G)):
        if len(component) < 3:
            for node in component:
                G.remove_node(node)

    g_centroids = [(centroids[v][0], centroids[v][1], centroids[v][2]) for v in sorted(G.nodes())]
    centroid_g_dict = {i: v for i, v in enumerate(sorted(G.nodes()))}

    closer_centroid_idx = mesh_planner.mesh_helper.find_closer_centroid(g_centroids, robot_pos,
                                                                        force_return_closer=True)
    conn_nodes = nx.node_connected_component(G, centroid_g_dict[closer_centroid_idx])
    Gconn = G.subgraph(conn_nodes).copy()

    kdtree = spatial.KDTree(g_centroids)
    pairs = kdtree.query_pairs(bumpiness_tresh)
    print "pairs:", len(pairs), pairs

    joined_by_bumpiness_nodes = set()
    for pair in pairs:
        p1_conn = centroid_g_dict[pair[0]]  # node inside the biggest connected component
        p2_out = centroid_g_dict[pair[1]]  # node outside of the biggest connected component

        # if edge is already mapped, then drop it
        if Gconn.has_edge(p1_conn, p2_out) or Gconn.has_edge(p2_out, p1_conn):
            continue

        # if edge is only connecting inside the Gconn drop it
        if Gconn.has_node(p1_conn) and Gconn.has_node(p2_out):
            continue

        # if edge is not connecting Gconn with other connected elements drop it
        if not Gconn.has_node(p1_conn) and not Gconn.has_node(p2_out):
            continue

        if p1_conn not in Gconn.nodes():
            p1_conn, p2_out = p2_out, p1_conn

        # if there is already a connection between the outside element and Gconn
        intersecting_gconn_nodes = list(set(G.neighbors(p2_out)).intersection(Gconn.nodes()))
        if len(intersecting_gconn_nodes) > 1:
            continue

        # this node is an another connected subgraph
        # add this node and the other ones of the subgraph
        # if not Gconn.has_node(p2_out):
        #     small_component = nx.node_connected_component(G, p2_out)
        #     for n in small_component:
        #         if not Gconn.has_node(n):
        #             Gconn.add_node(n)

        small_comp_nodes = nx.node_connected_component(G, p2_out)
        Gsmall = G.subgraph(small_comp_nodes).copy()
        GconnTemp = Gconn.copy()
        Gconn = nx.compose(Gconn, Gsmall)

        for pair2 in pairs:
            pair2_1 = centroid_g_dict[pair2[0]]
            pair2_2 = centroid_g_dict[pair2[1]]

            if (Gsmall.has_node(pair2_1) and GconnTemp.has_node(pair2_2)) or \
                (GconnTemp.has_node(pair2_1) and Gsmall.has_node(pair2_2)):

                gconn_node = pair2_1
                outside_node = pair2_2

                if not GconnTemp.has_node(gconn_node):
                    outside_node, gconn_node = gconn_node, outside_node

                # intersecting_gconn_nodes = list(set(Gconn.neighbors(gconn_node)).intersection(Gsmall.nodes()))
                # if len(intersecting_gconn_nodes) > 0:
                #     continue

                Gconn.add_edge(pair2_1, pair2_2, weight=1)
                joined_by_bumpiness_nodes.add(pair2_1)
                joined_by_bumpiness_nodes.add(pair2_2)

                # a = np.asarray(centroids[pair2_1])
                # b = np.asarray(centroids[pair2_2])
                #
                # print("dist:", np.linalg.norm(a - b))

    # add remaining edges of the new component from the original graph
    for e in G.edges():
        p1_conn = e[0]
        p2_out = e[1]

        # if edge is already mapped, then drop it
        if Gconn.has_edge(p1_conn, p2_out) or Gconn.has_edge(p2_out, p1_conn):
            continue

        # if edge is only connecting inside the Gconn drop it
        if not p1_conn in Gconn.nodes() and not p2_out in Gconn.nodes():
            continue

        Gconn.add_edge(p1_conn, p2_out, weight=1)

    print "Gconn node_list:", list(Gconn.nodes())

    # numpy array of x,y,z positions in sorted node order
    gcon_centroids = [(centroids[v][0], centroids[v][1], centroids[v][2]) for v in sorted(Gconn.nodes())]
    xyz = np.array(gcon_centroids)
    # scalar colors
    scalars = xyz[:, 2] #np.array(list(Gconn.nodes())) #xyz[:, 2]  #+ 5

    mlab.figure(1, bgcolor=(0, 0, 0))
    mlab.clf()

    pts = mlab.points3d(xyz[:, 0], xyz[:, 1], xyz[:, 2],
                        scalars,
                        scale_factor=0.1,
                        scale_mode='none',
                        colormap='Blues',
                        resolution=20)

    # estimate borders of the remainder graph
    border_centroids = []
    # for v in sorted(Gconn.nodes()):
    #     print "info:", v, nx.info(Gconn, v)
    #     if nx.degree(G, v) <= 8 and v not in joined_by_bumpiness_nodes:
    #         border_centroids.append((centroids[v][0], centroids[v][1], centroids[v][2]))

    # xyz_d2 = np.array(centroids_degree_2)
    # print "xyz_d2.shape:", xyz_d2.shape
    # scalars_d2 = np.ones(xyz_d2.shape[0])
    # pts2 = mlab.points3d(xyz_d2[:, 0], xyz_d2[:, 1], xyz_d2[:, 2],
    #                     scalars_d2,
    #                     scale_factor=0.2,
    #                     scale_mode='none',
    #                     color=(1.0, 0.0, 0.0),
    #                     resolution=20)

    centroid_gcon_dict = {v: int(i) for i, v in enumerate(gcon_centroids)}
    print "centroid_gcon_dict:", centroid_gcon_dict.keys()
    edge_list = []
    for e in Gconn.edges():
        e1 = (centroids[e[0]][0], centroids[e[0]][1], centroids[e[0]][2])
        e2 = (centroids[e[1]][0], centroids[e[1]][1], centroids[e[1]][2])
        edge_list.append([centroid_gcon_dict[e1], centroid_gcon_dict[e2]])

    edge_list = np.array(edge_list)
    #edge_list = np.array(list(Gconn.edges()))
    print "edge_list:", edge_list
    pts.mlab_source.dataset.lines = np.array(edge_list)
    #pts.update()
    lines = mlab.pipeline.stripper(pts)
    mlab.pipeline.surface(lines, color=(0.2, 0.4, 0.5), line_width=1.5, opacity=.9)  #colormap='Accent',

    # tube = mlab.pipeline.tube(pts, tube_radius=0.1)
    # mlab.pipeline.surface(tube, color=(0.8, 0.8, 0.8))

    #print "frontiers:", list(frontiers)
    intersecting_frontiers = list(set(frontiers).intersection(Gconn.nodes()))
    if len(intersecting_frontiers) > 0:
        centroids_frontiers = [centroids[v] for v in intersecting_frontiers]
        db = DBSCAN(eps=dbscan_eps, min_samples=dbscan_min_samples).fit(centroids_frontiers)
        core_samples_mask = np.zeros_like(db.labels_, dtype=bool)
        core_samples_mask[db.core_sample_indices_] = True
        labels = db.labels_

        # Number of clusters in labels, ignoring noise if present.
        n_clusters_ = len(set(labels)) - (1 if -1 in labels else 0)
        n_noise_ = list(labels).count(-1)

        rospy.loginfo('Estimated number of clusters: %d, noise_points: %s', n_clusters_, n_noise_)
        unique_labels = set(labels)
        colors = plt.cm.get_cmap('gist_rainbow', len(unique_labels))

        X = np.array(centroids_frontiers)
        for idx, label in enumerate(unique_labels):
            if label == -1:
                # Black used for noise.
                # col = [0, 0, 0, 1]
                continue
            else:
                col = colors(idx)

            #print "col", col

            class_member_mask = (labels == label)
            xyz = X[class_member_mask & core_samples_mask]
            #print "xyz:", xyz

            #fxyz = np.array([centroids[v] for v in intersecting_frontiers])
            pts2 = mlab.points3d(xyz[:, 0], xyz[:, 1], xyz[:, 2],
                                color=(col[0], col[1], col[2]),
                                scale_factor=0.15,
                                scale_mode='none',
                                resolution=20)

            # centroid = get_centroid_of_pts(xyz)
            # #print "centroid:", centroid
            # pts3 = mlab.points3d(centroid[:, 0], centroid[:, 1], centroid[:, 2],
            #                      color=(col[0], col[1], col[2]),
            #                      scale_factor=0.5,
            #                      scale_mode='none',
            #                      resolution=20)

    mlab.points3d(robot_pos[0], robot_pos[1], robot_pos[2],
                         color=(1, 1, 1),
                         scale_factor=0.8,
                         scale_mode='none',
                         resolution=20)


    neighbours_ids = sorted(list(set(second_neighbors(Gconn, centroid_g_dict[closer_centroid_idx]))))
    print "centroid_g_dict[closer_centroid_idx]:", centroid_g_dict[closer_centroid_idx], "second_neighbors:", len(neighbours_ids), neighbours_ids

    neighbours_data = []
    rx, ry, rz = centroids[centroid_g_dict[closer_centroid_idx]]
    for idx in neighbours_ids:
        face_inclination = graph_search.MeshGraphSearch.calculate_traversal_angle(normals[idx])
        centroid = centroids[idx]
        cx = centroid[0]
        cy = centroid[1]
        cz = centroid[2]

        mlab.points3d(cx, cy, cz,
                      color=(0.5, 1, 1),
                      scale_factor=0.3,
                      scale_mode='none',
                      resolution=20)

        d = math.sqrt((cx - rx)**2 + (cy - ry)**2 + (cz - rz)**2)
        decay = estimate_decay(d)

        neighbours_data.append({
            'theta': face_inclination,
            'c': centroid,
            'd': d,
            'decay': decay
        })

        print 'theta:', face_inclination, '\tdecay:', decay, '\td:', d

    theta_list = np.array([a['theta'] for a in neighbours_data])
    decay_list = np.array([a['decay'] for a in neighbours_data])
    decay_list[np.abs(decay_list) < 0.001] = 0

    avg, std = weighted_avg_and_std(theta_list, decay_list)
    print "avg:", avg, "std:", std

    #print neighbours_data
    mlab.show()
示例#53
0
    def _batch_by_shared_component(self, submission, force_field_id):
        """Batches a set of requested properties based on which substance they were
        measured for. Properties which were measured for substances sharing at least
        one common component (defined only by its smiles pattern and not necessarily
        in the same amount) will be placed into the same batch.

        Parameters
        ----------
        submission: EvaluatorClient._Submission
            The full request submission.
        force_field_id: str
            The unique id of the force field to use.

        Returns
        -------
        list of Batch
            The property batches.
        """

        reserved_batch_ids = {
            *self._queued_batches.keys(),
            *self._finished_batches.keys(),
        }

        all_smiles = set(x.smiles for y in submission.dataset.substances
                         for x in y)

        # Build a graph containing all of the different component
        # smiles patterns as nodes.
        substance_graph = networkx.Graph()
        substance_graph.add_nodes_from(all_smiles)

        # Add edges to the graph based on which substances contain
        # the different component nodes.
        for substance in submission.dataset.substances:

            if len(substance) < 2:
                continue

            smiles = [x.smiles for x in substance]

            for smiles_a, smiles_b in zip(smiles, smiles[1:]):
                substance_graph.add_edge(smiles_a, smiles_b)

        # Find clustered islands of those smiles which exist in
        # overlapping substances.
        islands = [
            substance_graph.subgraph(c)
            for c in networkx.connected_components(substance_graph)
        ]

        # Create one batch per island
        batches = []

        for _ in range(len(islands)):

            batch = Batch()
            batch.force_field_id = force_field_id

            # Make sure we don't somehow generate the same uuid
            # twice (although this is very unlikely to ever happen).
            while batch.id in reserved_batch_ids:
                batch.id = str(uuid.uuid4()).replace("-", "")

            batch.options = RequestOptions.parse_json(
                submission.options.json())

            batch.parameter_gradient_keys = copy.deepcopy(
                submission.parameter_gradient_keys)

            reserved_batch_ids.add(batch.id)
            batches.append(batch)

        for physical_property in submission.dataset:

            smiles = [x.smiles for x in physical_property.substance]

            island_id = 0

            for island_id, island in enumerate(islands):

                if not any(x in island for x in smiles):
                    continue

                break

            batches[island_id].queued_properties.append(physical_property)

        return batches
    """
    if isolates := list(nx.isolates(g)):
        g = g.copy()
        g.remove_nodes_from(isolates)

    if len(g.edges) == 0:
        print('no edges given')
        return

    directed = nx.is_directed(g)

    if largest_component:
        if directed:
            cc = nx.weakly_connected_components(g)
        else:
            cc = nx.connected_components(g)

        cc = sorted(cc, key=len, reverse=True)
        g = g.subgraph(cc[0])

    v = pyvis.network.Network(
            notebook=True,
            width='100%',
            height=height,
            directed=directed
    )

    sizes = [attr.get('size') or attr.get('weight') for (_, attr) in
             g.nodes.items()]

    if all(s is not None for s in sizes):
示例#55
0
def merge_segments(content_segmentation, style_segmentation,
                   semantic_threshold):
    print("Semantic merge of segments started")

    # load color - label mapping
    color_label_dict = load_color_label_dict()
    label_color_dict = {
        label: color
        for color, labels in color_label_dict.items() for label in labels
    }
    colors = color_label_dict.keys()

    # Extract the boolean mask for every color
    content_masks = extract_segmentation_masks(content_segmentation, colors)
    style_masks = extract_segmentation_masks(style_segmentation, colors)

    content_colors = content_masks.keys()
    style_colors = style_masks.keys()

    # Merge all colors that only occur in the style segmentation with the most similar in the content segmentation
    style_masks = merge_difference(style_masks, style_colors, content_colors,
                                   color_label_dict, label_color_dict)
    style_colors = style_masks.keys()

    # Merge all colors that only occur in the content segmentation with the most similar in the style segmentation
    content_masks = merge_difference(content_masks, content_colors,
                                     style_colors, color_label_dict,
                                     label_color_dict)
    content_colors = content_masks.keys()

    #assert style_colors == content_colors

    # Get all colors that are contained in both segmentation images
    intersection = list(set(content_colors).intersection(style_colors))

    # Combine minimal set of colors to compare via semantic similarity
    intersection_colors_to_compare = it.combinations(intersection, 2)

    # Transform colors to labels
    intersection_labels_to_compare = color_tuples_to_label_list_tuples(
        intersection_colors_to_compare, color_label_dict)

    # Add similarity score to label tuples
    annotated_intersection_labels = annotate_label_similarity(
        intersection_labels_to_compare)

    # For labels that are contained in both segmentation images merge only these with a similarity over the threshold
    above_threshold_intersection = [
        (similarity, label_tuple)
        for (similarity, label_tuple) in annotated_intersection_labels
        if similarity >= semantic_threshold
    ]

    # Drop similarity score
    edge_list_labels = [
        label_tuple for similarity, label_tuple in above_threshold_intersection
    ]

    # Turn labels back to colors
    edge_list_colors = [(label_color_dict[l1], label_color_dict[l2])
                        for l1, l2 in edge_list_labels]

    # Find all sub graphs
    color_sub_graphs = list(
        nx.connected_components(nx.from_edgelist(edge_list_colors)))

    # Create a dictionary with all necessary color replacements
    replacement_colors = {
        color: list(color_graph)[0]
        for color_graph in color_sub_graphs for color in color_graph
    }

    new_content_segmentation = replace_colors_in_dict(content_masks,
                                                      replacement_colors)
    new_style_segmentation = replace_colors_in_dict(style_masks,
                                                    replacement_colors)

    assert new_content_segmentation.keys() == new_style_segmentation.keys()

    return new_content_segmentation, new_style_segmentation
def generate(input_data):
    graph = nx.Graph()
    groups_size = [
        random.choice(
            range(input_data['min_group_nodes'],
                  input_data['max_group_nodes'] + 1))
        for i in range(input_data['num_groups'])
    ]
    num_attacker = int(
        sum(groups_size) * input_data['num_attacker_to_num_honest'])
    num_sybil = int(input_data['num_sybil_to_num_attacker'] * num_attacker)
    categories = {
        'Seed': {
            'nodes': [],
            'num': input_data['num_seed_nodes']
        },
        'Honest': {
            'nodes': [],
            'num':
            sum(groups_size) - input_data['num_seed_nodes'] - num_attacker
        },
        'Attacker': {
            'nodes': [],
            'num': num_attacker
        },
        'Sybil': {
            'nodes': [],
            'num': num_sybil
        },
    }
    start_node = input_data.get('start_node', 0)
    counter = start_node
    for category in categories:
        for i in range(categories[category]['num']):
            node = Node(counter, category)
            categories[category]['nodes'].append(node)
            graph.add_node(node)
            counter += 1
    non_sybils = categories['Honest']['nodes'] + \
        categories['Seed']['nodes'] + categories['Attacker']['nodes']
    random.shuffle(non_sybils)
    for group_num, size in enumerate(groups_size):
        group_name = 'group_{0}'.format(group_num)
        start_point = sum(groups_size[:group_num])
        end_point = start_point + size
        groups_nodes = non_sybils[start_point:end_point]
        for node in groups_nodes:
            node.groups.add(group_name)

    groups = set(sum([list(node.groups) for node in non_sybils], []))
    i = 0
    while i < input_data['num_joint_node']:
        joint_node = random.choice(non_sybils)
        other_groups = groups - joint_node.groups
        if len(other_groups) > 0:
            random_group = random.choice(list(other_groups))
            joint_node.groups.add(random_group)
            i += 1

    if input_data['num_seed_groups'] != 0:
        seed_groups = [
            'seed_group_{0}'.format(i)
            for i in range(input_data['num_seed_groups'])
        ]
        for node in categories['Seed']['nodes']:
            node.groups.add(random.choice(seed_groups))

    for group in groups:
        nodes = [node for node in non_sybils if group in node.groups]
        nodes_degree = dict((node, 0) for node in nodes)
        min_degree = int(input_data['min_known_ratio'] * len(nodes))
        avg_degree = int(input_data['avg_known_ratio'] * len(nodes))
        max_degree = min(int(input_data['max_known_ratio'] * len(nodes)),
                         len(nodes) - 1)
        low_degrees = range(
            min_degree,
            avg_degree) if min_degree != avg_degree else [min_degree]
        up_degrees = range(avg_degree, max_degree + 1)
        for i, node in enumerate(nodes):
            group_degree = sum(nodes_degree.values()) / (i + 1)
            if group_degree < avg_degree:
                degree = random.choice(up_degrees)
            else:
                degree = random.choice(low_degrees)
            j = counter = 0
            pairs = []
            while j < degree:
                pair = random.choice(nodes)
                if node != pair and nodes_degree[
                        pair] <= max_degree and pair not in pairs:
                    graph.add_edge(node, pair)
                    pairs.append(pair)
                    j += 1
                    nodes_degree[node] += 1
                else:
                    counter += 1
                    if counter > 100 * degree:
                        # j += 1
                        raise Exception(
                            "Can't find pair. group_degree={}".format(
                                group_degree))
    num_connection_to_attacker = max(
        int(input_data['sybil_to_attackers_con'] *
            categories['Attacker']['num']), 1)
    for i, node in enumerate(categories['Sybil']['nodes']):
        pairs = []
        j = 0
        while j < num_connection_to_attacker:
            pair = random.choice(categories['Attacker']['nodes'])
            if pair not in pairs:
                graph.add_edge(node, pair)
                pairs.append(pair)
                j += 1

    for node in categories['Attacker']['nodes'] + categories['Sybil']['nodes']:
        node.groups.add('attacker')

    # Add inter-group connections
    i = 0
    inter_group_pairs = []
    while i < input_data['num_inter_group_con']:
        node = random.choice(non_sybils)
        pair = random.choice(non_sybils)
        if len(node.groups
               & pair.groups) == 0 and (node, pair) not in inter_group_pairs:
            graph.add_edge(node, pair)
            inter_group_pairs.append((node, pair))
            i += 1
    # sew graph parts together
    if not nx.is_connected(graph):
        components = list(nx.connected_components(graph))
        biggest_comp = []
        for i, component in enumerate(components):
            if len(component) > len(biggest_comp):
                biggest_comp = list(component)
        for component in components:
            if component == biggest_comp:
                continue
            non_sybils = False
            i = 0
            while not non_sybils:
                i += 1
                left_node = random.choice(list(component))
                right_node = random.choice(biggest_comp)
                if left_node.node_type != 'Sybil' and right_node.node_type != 'Sybil':
                    graph.add_edge(left_node, right_node)
                    print('Add Edge: {0} --> {1}'.format(
                        left_node, right_node))
                    non_sybils = True
                if i > len(biggest_comp):
                    print([
                        '%s %s' % (node.name, node.node_type)
                        for node in component
                    ])
                    raise ("Can't sew above component to the biggest_comp")
    return graph
# for n in G.nodes():
#     print(n, G.nodes[n]['birth_year'])
    

#simple network metrics
density = nx.density(G)
print("Network density:", density)

fell_whitehead_path = nx.shortest_path(G, source="Margaret Fell", target = "George Whitehead")
print("Shortest path between Fell and Whitehead:", fell_whitehead_path)

print("Length of that path: ", len(fell_whitehead_path)-1)

#finding diameter -> because the network is not connected, you need a subgraph to calculate the diameter of the largest community
print(nx.is_connected(G))
components = nx.connected_components(G)
largest_component = max(components, key=len)

subgraph = G.subgraph(largest_component)
diameter = nx.diameter(subgraph)
print("Network diameter of largest component: ", diameter)

triadic_closure = nx.transitivity(G)
print("Triadic closure: ", triadic_closure)

#calculating centrality

degree_dict = dict(G.degree(G.nodes()))
nx.set_node_attributes(G, degree_dict, 'degree')
print(G.nodes['William Penn'])
def compute_commitment_diagram(attractors: dict,
                               fname_image: Optional[str] = None,
                               fname_json=None,
                               edge_data=False) -> networkx.DiGraph:
    """
    Computes the commitment diagram for the AttrJson and STG defined in *attractors*, a json object computed by :ref:`AttrJson_compute_json`
    The nodes of the diagram represent states that can reach the exact same subset of *attractors*.
    Edges indicate the existence of a transition between two nodes in the respective commitment sets.
    Edges are labeled by the number of states of the source set that can reach the target set and,
    if *EdgeData* is true, additionally by the size of the border.

    **arguments**:
        * *attractors*: json attractor data, see :ref:`compute_attractors`
        * *fname_image*: generate image for diagram
        * *fname_json*: save diagram as json
        * *edge_data*: toggles computation of additional edge data

    **returns**::
        * *diagram*: the commitment diagram

    **example**::

        >>> attractors = compute_attractors(primes, update)
        >>> diagram = compute_phenotype_diagram(attractors)
    """

    primes = attractors["primes"]
    update = attractors["update"]

    subspaces = []
    for x in attractors["attractors"]:
        if x["min_trap_space"]["is_univocal"] and x["min_trap_space"][
                "is_faithful"]:
            subspaces.append(x["min_trap_space"]["dict"])
        else:
            subspaces.append(x["state"]["dict"])

    log.info("Commitment.compute_diagram(..)")

    size_total = size_state_space(primes)

    if len(subspaces) == 1:
        log.info(" single attractor, trivial case.")
        diagram = networkx.DiGraph()
        counter_mc = 0

        diagram.add_node("0")
        diagram.nodes["0"]["attractors"] = subspaces
        diagram.nodes["0"]["size"] = size_total
        diagram.nodes["0"]["formula"] = "TRUE"

    else:
        igraph = primes2igraph(primes)
        outdag = find_outdag(igraph)

        attractor_nodes = [x for A in subspaces for x in A]
        critical_nodes = find_ancestors(igraph, attractor_nodes)
        outdag = [x for x in outdag if x not in critical_nodes]

        igraph.remove_nodes_from(outdag)
        log.info(f"excluding the non-critical out-dag nodes {outdag}")

        components = networkx.connected_components(igraph.to_undirected())
        components = [list(x) for x in components]
        log.info(f"working on {len(components)} connected component(s)")

        counter_mc = 0
        diagrams = []
        for component in components:
            sub_primes = copy_primes(primes)
            remove_all_variables_except(sub_primes, component)
            attractors_projected = _project_attractors(subspaces, component)
            diagram, count = _compute_diagram_component(
                sub_primes, update, attractors_projected, edge_data)
            counter_mc += count
            diagrams.append(diagram)

        factor = 2**len(outdag)
        diagram = _cartesian_product_of_diagrams(diagrams, factor, edge_data)

        for x in attractors:
            diagram.graph[x] = copy_json_data(attractors[x])

        nodes_sum = 0
        for x in diagram.nodes():
            projection = diagram.nodes[x]["attractors"]
            diagram.nodes[x]["attractors"] = _lift_attractors(
                subspaces, projection)
            nodes_sum += diagram.nodes[x]["size"]

        if not nodes_sum == size_total:
            log.warning(
                "commitment diagram does not partition the state space, this may be due to rounding of large numbers."
            )

        sorted_ids = sorted(diagram, key=lambda x: diagram.nodes[x]["formula"])
        mapping = {x: str(sorted_ids.index(x)) for x in diagram}
        networkx.relabel_nodes(diagram, mapping, copy=False)

    log.info(f"total executions of NuSMV: {counter_mc}")

    if fname_image:
        commitment_diagram2image(diagram,
                                 fname_image=fname_image,
                                 style_inputs=True,
                                 style_edges=edge_data,
                                 style_ranks=True,
                                 first_index=1)

    if fname_json:
        save_commitment_diagram(diagram, fname_json)

    return diagram
示例#59
0
文件: graph.py 项目: leonsn/vresutils
def giant_component(G, copy=True):
    H = G.subgraph(max(nx.connected_components(G), key=len))
    if copy:
        return H.copy()
    else:
        return H
示例#60
0
def process_file(f, args):
    g = nx.convert_node_labels_to_integers(nx.read_edgelist(f))
    if not g.is_directed():
        g = g.subgraph(max(nx.connected_components(g), key=len))
        nx.write_edgelist(g, f)