示例#1
0
    def test_attracting_components(self):
        ac = list(nx.attracting_components(self.G1))
        assert_true({2} in ac)
        assert_true({9} in ac)
        assert_true({10} in ac)

        ac = list(nx.attracting_components(self.G2))
        ac = [tuple(sorted(x)) for x in ac]
        assert_true(ac == [(1, 2)])

        ac = list(nx.attracting_components(self.G3))
        ac = [tuple(sorted(x)) for x in ac]
        assert_true((1, 2) in ac)
        assert_true((3, 4) in ac)
        assert_equal(len(ac), 2)
    def test_attracting_components(self):
        ac = nx.attracting_components(self.G1)
        assert_true([2] in ac)
        assert_true([9] in ac)
        assert_true([10] in ac)

        ac = nx.attracting_components(self.G2)
        ac = [tuple(sorted(x)) for x in ac]
        assert_true(ac == [(1,2)])

        ac = nx.attracting_components(self.G3)
        ac = [tuple(sorted(x)) for x in ac]
        assert_true((1,2) in ac)
        assert_true((3,4) in ac)
        assert_equal(len(ac), 2)
示例#3
0
    def test_attracting_components(self):
        ac = nx.attracting_components(self.G1)
        assert_true([2] in ac)
        assert_true([9] in ac)
        assert_true([10] in ac)

        ac = nx.attracting_components(self.G2)
        ac = [tuple(sorted(x)) for x in ac]
        assert_true(ac == [(1, 2)])

        ac = nx.attracting_components(self.G3)
        ac = [tuple(sorted(x)) for x in ac]
        assert_true((1, 2) in ac)
        assert_true((3, 4) in ac)
        assert_equal(len(ac), 2)
    def test_attracting_components(self):
        ac = list(nx.attracting_components(self.G1))
        assert_true({2} in ac)
        assert_true({9} in ac)
        assert_true({10} in ac)

        ac = list(nx.attracting_components(self.G2))
        ac = [tuple(sorted(x)) for x in ac]
        assert_true(ac == [(1, 2)])

        ac = list(nx.attracting_components(self.G3))
        ac = [tuple(sorted(x)) for x in ac]
        assert_true((1, 2) in ac)
        assert_true((3, 4) in ac)
        assert_equal(len(ac), 2)
示例#5
0
    def find_components(self):
        peaks = list(nx.attracting_components(self.G))
        # only local maxima
        assert all(len(p) == 1 for p in peaks)
        # return set now?
        peaks = [list(p)[0] for p in peaks]
        components = []
        for peak in peaks:
            nodes = nx.shortest_path(self.G, target=peak).keys()
            components += [nodes]

        self.ambiguous = np.where(
            np.bincount([n for c in components for n in c]) > 1)
        self.ambiguous = set(list(self.ambiguous[0]))

        component_graphs = []
        self.peak_map = {}
        for peak, nodes in zip(peaks, components):
            nodes = set(nodes) - self.ambiguous
            self.peak_map.update({n: peak for n in nodes})
            component_graphs += [self.G.subgraph(nodes)]

        count_reads = lambda x: sum(self.counts[i] for i in x.nodes())
        self.components = sorted(list(component_graphs),
                                 key=count_reads,
                                 reverse=True)
示例#6
0
def FindAttractors(Counts, folder='Data'):
    '''Identifying attractors and basins using NetworkX'''
    print 'Now identifying attractors, please wait...'
    results = {}
    TransNet = nx.DiGraph()
    for source, target in Counts:  # add source and target node to network objects
        TransNet.add_edge(source, target)
    TransNet.remove_edges_from(TransNet.selfloop_edges())
    attractors = nx.attracting_components(
        TransNet)  # find attractors (SCC with not out edge)
    ReTransNet = TransNet.reverse(
    )  # reverse the directd graph to creat a tree with attactors are the roots
    try:
        os.mkdir(folder)
    except:
        pass
    print 'Now identifying basins for each attractor...'
    for attractor in attractors:
        basin_tree = nx.dfs_tree(
            ReTransNet, attractor[0]
        )  #just need to find the sons of the first node in attractor
        results[tuple(attractor)] = basin_tree.nodes()
        #results_origin[tuple(attractor)]=[leaf for leaf in basin if ReTransNet.out_degree(leaf) == 0] # record initial states of attractors
        #AttNet=TransNet.subgraph(attractor)
        #nx.write_edgelist(AttNet,'%s/Attractor%s.txt'%(folder,attractors.index(attractor)),data=False)
    print 'Writing out transition graph in %s/TransGraph.txt' % folder
    nx.write_edgelist(TransNet, '%s/TransGraph.txt' % folder, data=False)
    return results
示例#7
0
    def attractors(self, mode='stg'):
        """Find the attractors of the boolean network.

		Args:
			mode (string) : ``stg`` or ``sat``. Defaults to ``stg``.
				``stg``: Uses the full State Transition Graph (STG) and identifies the attractors as strongly connected components.
				``bns``: Uses the SAT-based :mod:`cana.bns` to find all attractors.
		Returns:
			attractors (list) : A list containing all attractors for the boolean network.
		See also:
			:mod:`cana.bns`
		"""
        self._check_compute_variables(stg=True)

        if mode == 'stg':
            self._attractors = [
                list(a) for a in nx.attracting_components(self._stg)
            ]

        elif mode == 'bns':
            self._attractors = bns.attractors(
                self.to_cnet(file=None, adjust_no_input=False))
        else:
            raise AttributeError(
                "Could not find the specified mode. Try 'stg' or 'bns'.")

        self._attractors.sort(key=len, reverse=True)
        return self._attractors
示例#8
0
 def test_connected_raise(self):
     G = nx.Graph()
     with pytest.raises(NetworkXNotImplemented):
         next(nx.attracting_components(G))
     pytest.raises(NetworkXNotImplemented, nx.number_attracting_components,
                   G)
     pytest.raises(NetworkXNotImplemented, nx.is_attracting_component, G)
示例#9
0
 def states(self) :
     """
     Returns the number of states in all attractors.
     """
     if not self._attrs :
          self._attrs = nx.attracting_components(self.stg())
     return reduce(lambda x, y: x + len(y), self._attrs, 0) # equivalent to sum([len(attr) for attr in self._attrs])
示例#10
0
    def scc_dicts(self) :
        """
        bestimmt SCCs und gibt dictionary Knoten>SCC und SCC>[Knoten] zurueck
        SCCs, die nur aus einem Element bestehen, welches kein Attraktor (= nicht in attr)
        ist, werden in SCC 0 verschoben und tauchen in node2scc nicht auf
        """
        if not self._attrs :
             self._attrs = nx.attracting_components(self.stg())
        sccs=nx.strongly_connected_components(self.stg()) # erzeugt Liste SCC>[Knoten]
        node2scc={}
        scc2nodes={}
        attr_flattened=[item for sublist in [list(x) for x in self._attrs] for item in sublist]
        # Liste durchgehen und a) fuer jeden Knoten SCC und b) fuer jede SCC Knoten speichern
        for (i,nodes) in enumerate(sccs):
            for node in nodes:

                # pruefen, ob Knoten in trivialem SCC liegt und kein Attraktor ist
                if len(nodes)<=1 and (node not in attr_flattened):
                    scc_index=0 # in diesem Fall wird Knoten in SCC 0 verschoben
                else:
                    # ansonsten entspricht die SCC-Nummer dem Index+1
                    # +1, damit Index 0 fuer Sammlung trivialer SCCs zur Verfuegung steht
                    scc_index=i+1

                    node2scc[node]=scc_index # dictionary Knoten>SCC schreiben

                if scc_index not in scc2nodes: # pruefen, ob SCC bereits in dictionary SCC>[Knoten] vorhanden ist
                    scc2nodes[scc_index]=[] # ggf. Eintrag erstellen
                scc2nodes[scc_index].append(node) # und aktuellen Knoten hinzufuegen

        return(node2scc,scc2nodes,sccs)
示例#11
0
 def attrs(self) :
     """
     Returns the number of attractors.
     """
     if not self._attrs :
          self._attrs = nx.attracting_components(self.stg())
     return len(self._attrs)
示例#12
0
文件: siggi.py 项目: hgascon/siggi
def bag_of_attracting_components(graph):
    """ Bag of attracting components """
    # Hack to deal with broken nx implementation
    if len(graph.node) == 0:
        return {}
    comp = nx.attracting_components(graph)
    return __bag_of_components(graph, comp)
示例#13
0
文件: Core.py 项目: csbBSSE/CSB-SCLC
def AttractorAnalysis(nodes, StateTraj, inter_mat, folder):
    '''Identifying attractors using NetworkX and writes them to a file'''

    import networkx as nx

    for traj in load_data('traj.f'):
        StateTraj.add_edges_from(traj)
    attractors = list(nx.attracting_components(StateTraj))

    current_dir = os.getcwd()  #current working directory
    path = current_dir + "/OUTPUT/" + folder
    try:
        os.makedirs(path)  #If folder doesn't exist then create it
    except:
        pass

    import xlsxwriter as xlsxwt
    workbook = xlsxwt.Workbook(
        os.path.join('OUTPUT', folder, 'NetworkX_Sync.xls'))
    worksheet = workbook.add_worksheet("stable_states")
    cell_format = workbook.add_format()
    cell_format.set_bg_color('black')

    for i, node in enumerate(nodes):
        worksheet.write(i + 1, 0, node)  #Writing names of the nodes
    worksheet.write(len(nodes) + 2, 0, 'Frustration')
    worksheet.write(len(nodes) + 3, 0, 'Frequency')

    attract_list = []
    j = 0
    for states in attractors:
        for state in states:
            attract_list.append(state)
            if len(states) == 1:
                worksheet.write(0, j + 1, "Fixed Point")
            else:
                worksheet.write(0, j + 1,
                                "{} state oscillator".format(len(states)))
            j += 1

    for i in range(1, len(attract_list) + 1):
        state = num2vect(attract_list[i - 1], len(nodes)).tolist()
        for j, node_value in enumerate(state):
            if node_value == 1:
                worksheet.write(j + 1, i, node_value, cell_format)
            else:
                worksheet.write(j + 1, i, node_value)
        worksheet.write(
            len(nodes) + 2, i, Frustration(attract_list[i - 1], inter_mat))
        worksheet.write(
            len(nodes) + 3, i, StateTraj.degree[attract_list[i - 1]])

    worksheet.set_column(0, len(attractors) + 2, 15)

    workbook.close()
    print(
        "All the attractoors by Networks are found. Number of attractors found is %s"
        % len(attractors))
    print("Saving these states in %s/NetworkX_Sync.xls\n" % folder)
示例#14
0
def correlate_node_by_sync( cells ):
    global template_ , avg_
    for m, n in itertools.combinations( cells.nodes( ), 2 ):
        vec1, vec2 = cells.node[m]['timeseries'], cells.node[n]['timeseries']
        corr = sync_index( vec1, vec2 )
        rcorr = sync_index( vec2, vec1 )
        if corr > 0.6:
            cells.add_edge( m, n, weight = corr )
            cells.add_edge( n, m, weight = rcorr )

    outfile = 'final.png' 
    plt.figure( figsize = (12,8) )
    plt.subplot( 2, 2, 1 )
    plt.imshow( avg_, interpolation = 'none', aspect = 'auto' )
    plt.title( 'All frames averaged' )
    plt.colorbar( ) # orientation = 'horizontal' )

    syncImg = np.zeros( shape=template_.shape )
    syncDict = defaultdict( list )
    nx.write_gpickle( cells, 'cells.gpickle' )
    logger.info( 'Logging out after writing to graph.' )
    return 
    try:
        nx.drawing.nx_agraph.write_dot( cells, 'all_cell.dot' )
    except Exception as e:
        logger.warn( 'Failed to write dot file %s' % e )
    for i, c in enumerate( nx.attracting_components( cells ) ):
        if len(c) < 2:
            continue
        logger.info( 'Found attracting component of length %d' % len(c) )
        for p in c:
            cv2.circle( syncImg, (p[1], p[0]), 2, (i+1), 2 )
            # syncDict[str(c)].append( cells.node[p]['timeseries'] )

    plt.subplot( 2, 2, 2 )
    plt.imshow( timeseries_
            , interpolation = 'none', aspect = 'auto', cmap = 'seismic' )
    plt.colorbar(  ) #orientation = 'horizontal' )
    plt.title( 'Activity of each pixal' )

    plt.subplot( 2, 2, 3 )
    plt.imshow( syncImg, interpolation = 'none', aspect = 'auto' )
    plt.colorbar( ) #orientation = 'horizontal' )

    # Here we draw the synchronization.
    plt.subplot( 2, 2, 4 )
    # clusters = []
    # for c in syncDict:
        # clusters += syncDict[c]
        # # Append two empty lines to separate the clusters.
        # clusters += [ np.zeros( timeseries_.shape[1] ) ] 
    # try:
        # plt.imshow( np.vstack(clusters), interpolation = 'none', aspect = 'auto' )
        # plt.colorbar(  ) #orientation = 'horizontal' )
    # except Exception as e:
        # print( "Couldn't plot clusters %s" % e )
    plt.tight_layout( )
    plt.savefig( outfile )
    logger.info( 'Saved to file %s' % outfile )
示例#15
0
 def cAttrs(self) :
     """
     Returns the number of cyclic attractors.
     FIXED: [cAttraktoren == Attraktoren mit mehr als einem Zustand?] JA.
     """
     if not self._attrs :
          self._attrs = nx.attracting_components(self.stg())
     return len([fix for i,fix in enumerate(self._attrs) if len(self._attrs[i])>1])
示例#16
0
 def fixpoints(self) :
     """
     Returns the number of fixpoints.
     FIXED: [Fixpunkte == Attraktoren mit genau einem Zustand?] JA.
     """
     if not self._attrs :
          self._attrs = nx.attracting_components(self.stg())
     return len([fix for i,fix in enumerate(self._attrs) if len(self._attrs[i])==1])
示例#17
0
def states_assignments(graph: nx.Graph, density_node):
    dg = graph.to_directed()
    for n1, n2 in list(dg.edges):
        d1 = density_node[n1]
        d2 = density_node[n2]
        if d1 >= d2:
            dg.remove_edge(n1, n2)
    components = nx.attracting_components(dg)
    return components
示例#18
0
    def test_attracting_components(self):
        ac = list(nx.attracting_components(self.G1))
        assert {2} in ac
        assert {9} in ac
        assert {10} in ac

        ac = list(nx.attracting_components(self.G2))
        ac = [tuple(sorted(x)) for x in ac]
        assert ac == [(1, 2)]

        ac = list(nx.attracting_components(self.G3))
        ac = [tuple(sorted(x)) for x in ac]
        assert (1, 2) in ac
        assert (3, 4) in ac
        assert len(ac) == 2

        ac = list(nx.attracting_components(self.G4))
        assert ac == []
示例#19
0
 def getDestinySTG(self) :
     if not self._attrs :
         self._attrs = nx.attracting_components(self.stg())
     if not self._sccs :
         self._sccs = self.scc_dicts() # (Knoten>SCC , SCC>[Knoten])
     if not self._destiny :
         self._destiny = self.compute_destination()
     DestSTG=DSTG.DestinySTG(self.stg(), self._attrs, self._sccs, self._destiny)
     return DestSTG.getDestinySTG()
示例#20
0
    def computeSCCDAG(self,Isomorphy,NNF,GML=False) :
        if not self._attrs :
            self._attrs = nx.attracting_components(self.stg())
        if not self._sccs :
            self._sccs = self.scc_dicts() # (Knoten>SCC , SCC>[Knoten])
        if not self._destiny :
            self._destiny = self.compute_destination()
#        self._sccdag = SCCDAG(self.stg(), compute_nested_networks_for_gml, compute_nested_networks_for_nnf)
        self._sccdag = SCCDAG.SCCDAG(self.stg(), self._attrs, self._sccs, self._destiny, GML, NNF,Isomorphy)
示例#21
0
def remove_small_attractors(graph, min_size=3):
    """
    Remove all attractors smaller than a given size.

    Return a copy of `graph` with all nodes that were part of attractors
    (scc without outflux) smaller than `min_size` nodes removed.
    """
    graph = graph.copy()
    attractors = (comp for comp in nx.attracting_components(graph)
                  if len(comp) < min_size)
    graph.remove_nodes_from(it.chain.from_iterable(attractors))
    return graph
示例#22
0
    def find_no_motif_attractors(self):
        """Find attractors of the reduction that are not present in any of its
        subreductions.

        """
        if self.partial_STG is None:
            self.build_partial_STG()
        if len(list(self.partial_STG.nodes())) > 0:
            self.no_motif_attractors = list(
                nx.attracting_components(self.partial_STG))
        else:
            self.no_motif_attractors = []
示例#23
0
 def _remove_satisfied_attracting_components(graph):
     # Remove sets of attracting components where all components are satisfied
     fixed_point_reached = False
     done_something = False
     while not fixed_point_reached:
         fixed_point_reached = True
         for attracting_components in nx.attracting_components(graph):
             if all(c.is_satisfied() for c in attracting_components):
                 graph.remove_nodes_from(attracting_components)
                 fixed_point_reached = False
                 done_something = True
                 break
     return done_something
示例#24
0
def find_basis_words(wordnet):
    # components = nx.strongly_connected_components(wordnet)
    # scc_graph = nx.condensation(wordnet)

    #now we find sinks of the sccgraph
    sink_sccs = nx.attracting_components(wordnet)

    #now we simply choose a node from all sccsinks, and we can generate all definitions
    basis_words = set()
    for scc in sink_sccs:
        basis_words.add(scc.pop())

    return basis_words
	def fraction_pinned_attractors(self, pcstg_dict):
		"""Returns the Number of Accessible Attractors
		Args:
			pcstg_dict (dict of networkx.DiGraph) : The dictionary of Pinned Controlled State-Transition-Graphs.

		Returns:
			(int) : Number of Accessible Attractors
		"""
		reached_attractors = []
		for att, pcstg in pcstg_dict.items():
			pinned_att = list(nx.attracting_components(pcstg))
			print(set(att), pinned_att)
			reached_attractors.append(set(att) in pinned_att)
		return sum(reached_attractors) / float(len(pcstg_dict))
示例#26
0
def condense_small_attractors(graph, min_size=3):
    """
    Condense all attractors smaller than a given size.

    Return a copy of `graph` with all attractors (scc without outflux) smaller
    than `min_size` nodes condensed.
    """
    attractors = [
        comp for comp in nx.attracting_components(graph)
        if len(comp) < min_size
    ]
    to_condense = set(it.chain.from_iterable(attractors))
    attractors += [{n} for n in graph.nodes() if n not in to_condense]
    return (nx.condensation(graph, scc=attractors)
            if len(attractors) > 0 else graph.copy())
示例#27
0
	def fraction_pinned_configurations(self, pcstg_dict):
		"""Returns the Fraction of successfully Pinned Configurations
		
		Args:
			pcstg_dict (dict of networkx.DiGraph) : The dictionary of Pinned Controlled State-Transition-Graphs.
		
		Returns:
			(list) : the Fraction of successfully Pinned Configurations to each attractor
		"""
		pinned_configurations = []
		for att, pcstg in pcstg_dict.items():
			att_reached = False
			for wcc in nx.weakly_connected_components(pcstg):
				if set(att) in list(nx.attracting_components(pcstg.subgraph(wcc))):
					pinned_configurations.append(len(wcc)/ len(pcstg))
					att_reached = True
			if not att_reached:
				pinned_configurations.append(0)
		return pinned_configurations
示例#28
0
    def find_deletion_no_motif_attractors(self, max_stable_motifs=10000):
        """Identify motif-avoidant attractors in the deletion projection.

        Parameters
        ----------
        max_stable_motifs : int
            Maximum number of output lines for PyBoolNet to process from the
            AspSolver (the default is 10000).

        """
        if self.deletion_STG is None:
            self.build_deletion_STG(max_stable_motifs=max_stable_motifs)

        # Note: fixed points of the deletion system are fixed points of the
        # undeleted system, so we ignore these as they must contain stable motifs
        if len(list(self.deletion_STG.nodes())) > 0:
            candidates = [
                x for x in nx.attracting_components(self.deletion_STG)
                if len(x) > 1
            ]
        else:
            candidates = []
        self.deletion_no_motif_attractors = []

        # next, we see if any of these activate stable motifs
        names = sorted(self.delprimes)
        for att in candidates:
            no_motif = True
            for s in att:
                # The following check stems from the result that a stable motif
                # is active in an attractor of the original system iff its projection
                # is active in the projected attractor in the deletion-reduced system
                st = sm_format.statestring2dict(s, names)
                st.update(self.attractor_constants)
                if any(not sm_doi.fixed_excludes_implicant(st, sm)
                       for sm in self.stable_motifs):
                    no_motif = False
                    break

            if no_motif:
                self.deletion_no_motif_attractors.append(att)
示例#29
0
    def compute_destination(self):
        if not self._attrs :
             self._attrs = nx.attracting_components(self.stg())
        reverseSTG=self.stg().reverse(copy=True) # STG umdrehen
        destiny = {}
        for id,attractor in enumerate(self._attrs): # jeden Attraktor als Start waehlen
            visited=[]
            to_visit=[]
            # TODO: Einrueckung falsch!!?? > Kann sein, allerdings wuesste ich nicht, warum?
            for node in attractor: # Zustaende im aktuellen Attraktor zum Besuchen vormerken
                to_visit.append(node)

            while len(to_visit)>0: # solange noch Zustaende zu besuchen sind
                node=to_visit[0]
                if node not in destiny: # ggf. einen Eintrag in _destiny fuer aktuellen Knoten erstellen
                    destiny[node]=[]
                destiny[node].append(id) # und Liste um aktuellen Attraktor erweitern

                visited.append(node) # dann Knoten als besucht speichern
                to_visit.remove(node)
                for edge in reverseSTG.edges(node): # und die Nachfolger
                    if edge[1] not in visited and edge[1] not in to_visit: # ggf.
                        to_visit.append(edge[1]) # zum Besuchen vormerken
        return destiny
示例#30
0
def FindAttractors(Counts,folder='Data'):
    '''Identifying attractors and basins using NetworkX'''
    print 'Now identifying attractors, please wait...'
    results = {}
    TransNet = nx.DiGraph()
    for source, target in Counts:  # add source and target node to network objects
        TransNet.add_edge(source, target)
    TransNet.remove_edges_from(TransNet.selfloop_edges())
    attractors = nx.attracting_components(TransNet)  # find attractors (SCC with not out edge)
    ReTransNet = TransNet.reverse()  # reverse the directd graph to creat a tree with attactors are the roots
    try:
        os.mkdir(folder)
    except:
        pass
    print 'Now identifying basins for each attractor...'
    for attractor in attractors:
        basin_tree=nx.dfs_tree(ReTransNet,list(attractor)[0])  #just need to find the sons of the first node in attractor
        results[tuple(attractor)]=basin_tree.nodes()
        #results_origin[tuple(attractor)]=[leaf for leaf in basin if ReTransNet.out_degree(leaf) == 0] # record initial states of attractors
        #AttNet=TransNet.subgraph(attractor)
        #nx.write_edgelist(AttNet,'%s/Attractor%s.txt'%(folder,attractors.index(attractor)),data=False)
    print 'Writing out transition graph in %s/TransGraph.txt'%folder
    nx.write_edgelist(TransNet,'%s/TransGraph.txt'%folder,data=False)
    return results
示例#31
0
#### ΑΝΑΖΗΤΗΣΗ

number_of_components = 3
size_of_component = 3
counte = 0
mmin = 1000
while True:
    G = nx.gnm_random_graph(30, 25, directed=True)
    # G = nx.erdos_renyi_graph(15,0.07,directed=True)
    # G = nx.gnp_random_graph(20,0.1,directed=True)
    # pos=nx.spring_layout(G,k=0.15,iterations=10)
    if counte == mmin:
        print counte, 'c'
        mmin += 1000
    testy = nx.attracting_components(G)
    # print testy
    uu = 0
    for g in testy:
        if len(g) >= size_of_component:
            uu += 1
    if uu >= number_of_components:
        break
    else:
        counte += 1
        continue

pos = nx.spring_layout(G, k=0.15, iterations=10)
# pos=nx.graphviz_layout(G)
# pos=layout(G)
示例#32
0
 def _computeAttrInfos(self) :
     if not self._attrs :
         self._attrs = nx.attracting_components(self.stg())
     for id, attr in enumerate(self._attrs) :
         self._aInfos.append(AI.AttrInfo(self._mc, id, attr))
    trajs = generator.trajectories(num_walkers,
                                   MAX_TIME,
                                   start_nodes=start_nodes)
    trajs.to_parquet(str(out))

# %% Analyse graph structure: attractors and limit cycles
# =======================================================

network = SwitchingNetwork(graph, timescale=1, memory=False)
ccs = None
while not ccs or len(ccs[0]) == 1:
    digraph = nx.DiGraph()
    digraph.add_nodes_from(graph.nodes(data=True))
    digraph.add_edges_from(network.edges(0))

    ccs = sorted(nx.attracting_components(digraph), key=len, reverse=True)

for i in [6]:
    component = ccs[0]
    nodes = nx.ancestors(digraph, next(iter(component)))
    nodes |= component
    subgraph = digraph.subgraph(nodes)

    fig, ax = er.plot.graph.structure(graph)
    fig.set_size_inches((4, 4))
    pos = nx.get_node_attributes(graph, 'pos')
    pos = er.plot.graph.get_nodes_pos(digraph)
    nx.draw_networkx(subgraph,
                     nodelist=nodes,
                     pos=pos,
                     with_labels=False,
 def test_number_attacting_components(self):
     assert_equal(len(nx.attracting_components(self.G1)), 3)
     assert_equal(len(nx.attracting_components(self.G2)), 1)
     assert_equal(len(nx.attracting_components(self.G3)), 2)
示例#35
0
def attractors(f, synch=False):
    dG = sd_graph(f) if synch else ad_graph(f)
    return attracting_components(dG)
示例#36
0
def correlate_node_by_sync(cells):
    global template_, avg_
    for m, n in itertools.combinations(cells.nodes(), 2):
        vec1, vec2 = cells.node[m]['timeseries'], cells.node[n]['timeseries']
        corr = sync_index(vec1, vec2)
        rcorr = sync_index(vec2, vec1)
        if corr > 0.6:
            cells.add_edge(m, n, weight=corr)
            cells.add_edge(n, m, weight=rcorr)

    outfile = 'final.png'
    plt.figure(figsize=(12, 8))
    plt.subplot(2, 2, 1)
    plt.imshow(avg_, interpolation='none', aspect='auto')
    plt.title('All frames averaged')
    plt.colorbar()  # orientation = 'horizontal' )

    syncImg = np.zeros(shape=template_.shape)
    syncDict = defaultdict(list)
    nx.write_gpickle(cells, 'cells.gpickle')
    logger.info('Logging out after writing to graph.')
    return
    try:
        nx.drawing.nx_agraph.write_dot(cells, 'all_cell.dot')
    except Exception as e:
        logger.warn('Failed to write dot file %s' % e)
    for i, c in enumerate(nx.attracting_components(cells)):
        if len(c) < 2:
            continue
        logger.info('Found attracting component of length %d' % len(c))
        for p in c:
            cv2.circle(syncImg, (p[1], p[0]), 2, (i + 1), 2)
            # syncDict[str(c)].append( cells.node[p]['timeseries'] )

    plt.subplot(2, 2, 2)
    plt.imshow(timeseries_,
               interpolation='none',
               aspect='auto',
               cmap='seismic')
    plt.colorbar()  #orientation = 'horizontal' )
    plt.title('Activity of each pixal')

    plt.subplot(2, 2, 3)
    plt.imshow(syncImg, interpolation='none', aspect='auto')
    plt.colorbar()  #orientation = 'horizontal' )

    # Here we draw the synchronization.
    plt.subplot(2, 2, 4)
    # clusters = []
    # for c in syncDict:
    # clusters += syncDict[c]
    # # Append two empty lines to separate the clusters.
    # clusters += [ np.zeros( timeseries_.shape[1] ) ]
    # try:
    # plt.imshow( np.vstack(clusters), interpolation = 'none', aspect = 'auto' )
    # plt.colorbar(  ) #orientation = 'horizontal' )
    # except Exception as e:
    # print( "Couldn't plot clusters %s" % e )
    plt.tight_layout()
    plt.savefig(outfile)
    logger.info('Saved to file %s' % outfile)
示例#37
0
#### ΑΝΑΖΗΤΗΣΗ

number_of_components=3
size_of_component=3
counte=0
mmin=1000
while True:
    G = nx.gnm_random_graph(30,25,directed=True)
    # G = nx.erdos_renyi_graph(15,0.07,directed=True)
    # G = nx.gnp_random_graph(20,0.1,directed=True)
    # pos=nx.spring_layout(G,k=0.15,iterations=10)
    if counte== mmin:
        print counte,'c'
        mmin+=1000
    testy=nx.attracting_components(G)
    # print testy
    uu=0
    for g in testy:
        if len(g)>=size_of_component:
            uu+=1
    if uu>=number_of_components:
        break
    else:
        counte+=1
        continue



pos=nx.spring_layout(G,k=0.15,iterations=10)
# pos=nx.graphviz_layout(G)
示例#38
0
migrations = pd.read_csv("migration_2015.csv",
                         thousands=",").set_index("Unnamed: 0")

table_migrations = migrations.stack().reset_index()\
                                     .sort_values(0, ascending=False)\
                                     .groupby("Unnamed: 0").head(3)

table_migrations.columns = "From", "To", "weight"

G = nx.from_pandas_dataframe(table_migrations, "From", "To", 
                             edge_attr=["weight"],
                             create_using=nx.DiGraph())
nx.relabel_nodes(G, pd.read_csv("states.csv", header=None)\
                 .set_index(0)[2].to_dict(), copy=False)

print(sorted(nx.weakly_connected_components(G), key=len, reverse=True))
print(sorted(nx.strongly_connected_components(G), key=len, reverse=True))
attracting = sorted(nx.attracting_components(G), key=len, reverse=True)[0]
print(attracting)

pos = graphviz_layout(G)
dzcnapy.attrs["node_color"] = ["palegreen" if n in attracting 
                               else "pink" for n in G]

nx.draw_networkx_edges(G, pos, alpha=0.5, **dzcnapy.attrs)
nx.draw_networkx_nodes(G, pos, **dzcnapy.attrs)
nx.draw_networkx_labels(G, pos, **dzcnapy.attrs)

dzcnapy.set_extent(pos, plt)
dzcnapy.plot("migration", True)
示例#39
0
#list(nx.make_max_clique_graph(G))
#list(nx.make_clique_bipartite(G))
#nx.graph_clique_number(G)
#nx.graph_number_of_cliques(G)

# components
nx.is_strongly_connected(G)
nx.number_strongly_connected_components(G)
scc = nx.strongly_connected_components(G)
nx.strongly_connected_components_recursive(G)
nx.condensation(G, scc)

# attracting components
nx.is_attracting_component(G)
nx.number_attracting_components(G)
nx.attracting_components(G)

# directed acyclic graphs
nx.is_directed_acyclic_graph(G)
nx.is_aperiodic(G)

# distance measure  (all for connected graph)
nx.center(Gcc)
nx.diameter(Gcc)
nx.eccentricity(Gcc) 
nx.periphery(Gcc)
nx.radius(Gcc)

# flows (seg fault currently)
#nx.max_flow(Gcc, 1, 2)
#nx.min_cut(G, 1, 2)
示例#40
0
def basal_component_sizes(graph):
    """basal_component_sizes"""
    return [len(i) for i in nx.attracting_components(nx.reverse(graph))]
示例#41
0
def attracting_component_sizes(graph):
    """attracting_component_sizes"""
    return [len(i) for i in nx.attracting_components(graph)]
示例#42
0
 def test_number_attacting_components(self):
     assert_equal(len(nx.attracting_components(self.G1)), 3)
     assert_equal(len(nx.attracting_components(self.G2)), 1)
     assert_equal(len(nx.attracting_components(self.G3)), 2)
示例#43
0
#list(nx.make_max_clique_graph(G))
#list(nx.make_clique_bipartite(G))
#nx.graph_clique_number(G)
#nx.graph_number_of_cliques(G)

# components
nx.is_strongly_connected(G)
nx.number_strongly_connected_components(G)
scc = nx.strongly_connected_components(G)
nx.strongly_connected_components_recursive(G)
nx.condensation(G, scc)

# attracting components
nx.is_attracting_component(G)
nx.number_attracting_components(G)
nx.attracting_components(G)

# directed acyclic graphs
nx.is_directed_acyclic_graph(G)
nx.is_aperiodic(G)

# distance measure  (all for connected graph)
nx.center(Gcc)
nx.diameter(Gcc)
nx.eccentricity(Gcc)
nx.periphery(Gcc)
nx.radius(Gcc)

# flows (seg fault currently)
#nx.max_flow(Gcc, 1, 2)
#nx.min_cut(G, 1, 2)
示例#44
0
    def compute_features(self):

        self.add_feature(
            "is_connected",
            lambda graph: nx.is_connected(graph) * 1,
            "Whether the graph is connected or not",
            InterpretabilityScore(5),
        )

        self.add_feature(
            "num_connected_components",
            lambda graph: len(list(nx.connected_components(graph))),
            "The number of connected components",
            InterpretabilityScore(5),
        )

        @lru_cache(maxsize=None)
        def eval_connectedcomponents(graph):
            """this evaluates the main function and cach it for speed up."""
            return list(nx.connected_components(graph))

        self.add_feature(
            "largest_connected_component",
            lambda graph: len(eval_connectedcomponents(graph)[0]),
            "The size of the largest connected component",
            InterpretabilityScore(4),
        )

        def ratio_largest(graph):
            if len(eval_connectedcomponents(graph)) == 1:
                return 0
            return len(eval_connectedcomponents(graph)[0]) / len(
                eval_connectedcomponents(graph)[1]
            )

        self.add_feature(
            "ratio_largest_connected_components",
            ratio_largest,
            "The size ratio of the two largest connected components",
            InterpretabilityScore(4),
        )

        def ratio_min_max(graph):
            if len(eval_connectedcomponents(graph)) == 1:
                return 0
            return len(eval_connectedcomponents(graph)[0]) / len(
                eval_connectedcomponents(graph)[-1]
            )

        self.add_feature(
            "ratio_maxmin_connected_components",
            ratio_min_max,
            "The size ratio of the max and min largest connected components",
            InterpretabilityScore(4),
        )

        self.add_feature(
            "number_strongly_connected_components",
            lambda graph: nx.number_strongly_connected_components(graph),
            "A strongly connected component is a set of nodes in a directed graph such \
            that each node in the set is reachable from any other node in that set",
            InterpretabilityScore(3),
        )

        self.add_feature(
            "strongly_connected_component_sizes",
            lambda graph: [len(i) for i in nx.strongly_connected_components(graph)],
            "the distribution of strongly connected component sizes",
            InterpretabilityScore(3),
            statistics="centrality",
        )

        self.add_feature(
            "condensation_nodes",
            lambda graph: nx.condensation(graph).number_of_nodes(),
            "number of nodes in the condensation of the graph",
            InterpretabilityScore(3),
        )

        self.add_feature(
            "condensation_edges",
            lambda graph: nx.condensation(graph).number_of_edges(),
            "number of edges in the condensation of the graph",
            InterpretabilityScore(3),
        )

        self.add_feature(
            "number_weakly_connected_components",
            lambda graph: nx.number_weakly_connected_components(graph),
            "A weakly connected component is a set of nodes in a directed graph such that \
            there exists as edge between each node and at least one other node in the set",
            InterpretabilityScore(3),
        )

        self.add_feature(
            "weakly_connected_component_sizes",
            lambda graph: [len(i) for i in nx.weakly_connected_components(graph)],
            "the distribution of weakly connected component sizes",
            InterpretabilityScore(3),
            statistics="centrality",
        )

        self.add_feature(
            "number_attracting_components",
            lambda graph: nx.number_attracting_components(graph),
            "An attracting component is a set of nodes in a directed graph such that that \
            once in that set, all other nodes outside that set are not reachable",
            InterpretabilityScore(3),
        )

        self.add_feature(
            "attracting_component_sizes",
            lambda graph: [len(i) for i in nx.attracting_components(graph)],
            "the distribution of attracting component sizes",
            InterpretabilityScore(3),
            statistics="centrality",
        )

        self.add_feature(
            "number basal_components",
            lambda graph: nx.number_attracting_components(nx.reverse(graph)),
            "An basal component is a set of nodes in a directed graph such that there are no \
            edges pointing into that set",
            InterpretabilityScore(3),
        )

        self.add_feature(
            "basal_component_sizes",
            lambda graph: [len(i) for i in nx.attracting_components(nx.reverse(graph))],
            "the distribution of basal component sizes",
            InterpretabilityScore(3),
            statistics="centrality",
        )
示例#45
0
def connected_component_layout(g: nx.DiGraph):
    """
    lay out a graph with a single connected component,
    returns dictionary of positions and width/height of bounding box
    """

    # get attractor (fixed point or cycle)
    attractor_set = next(nx.attracting_components(g))
    cycle_len = len(attractor_set)

    # no guarantee the attractor set is in the proper order:
    base_point = next(iter(attractor_set))
    cycle = [base_point]
    # in python 3.8+ you have assignment expressions:
    # while (next_point := list(g.successors(cycle[-1]))[0]) != base_point:
    #    cycle.append(next_point)
    next_point = list(g.successors(cycle[-1]))[0]
    while next_point != base_point:
        cycle.append(next_point)
        next_point = list(g.successors(cycle[-1]))[0]

    pos = dict()

    visited_set = set()

    def get_num_leaves(parent):
        if parent in visited_set:
            return 0
        else:
            visited_set.add(parent)
        predecessors = [
            predecessor for predecessor in g.predecessors(parent)
            if predecessor != parent
        ]
        if len(predecessors) == 0:
            return 1
        else:
            return sum(
                get_num_leaves(predecessor) for predecessor in predecessors)

    def recurse_layout(successor, radius: float, max_theta: float,
                       min_theta: float):
        predecessors = [
            predecessor for predecessor in g.predecessors(successor)
            if predecessor != successor and predecessor not in pos
        ]
        if len(predecessors) == 0:
            return

        angles_recur = np.cumsum(
            np.array(
                [0.0] +
                [get_num_leaves(predecessor) for predecessor in predecessors],
                dtype=np.float64,
            ))
        angles_recur *= ((max_theta - min_theta) /
                         angles_recur[-1] if angles_recur[-1] != 0 else
                         (max_theta - min_theta))
        angles_recur += min_theta
        for m, predecessor in enumerate(predecessors):
            theta_n = (angles_recur[m + 1] + angles_recur[m]) / 2.0
            pos[predecessor] = radius * np.array(
                [np.cos(theta_n), np.sin(theta_n)])
            recurse_layout(predecessor, radius + 20, angles_recur[m + 1],
                           angles_recur[m])

    # lay out the cycle:
    if cycle_len == 1:
        pos[base_point] = np.array([0.0, 0.0])
        recurse_layout(base_point, 20, 2 * np.pi, 0)
    else:
        angles = np.cumsum(
            np.array([0] + [get_num_leaves(point) for point in cycle],
                     dtype=np.float64))
        angles *= 2 * np.pi / angles[-1] if angles[-1] != 0 else 2 * np.pi
        for n, point in enumerate(cycle):
            theta = (angles[n + 1] + angles[n]) / 2.0
            pos[point] = 20 * np.array([np.cos(theta), np.sin(theta)])
            recurse_layout(point, 20, angles[n + 1], angles[n])

    # move corner
    pos_array = np.array(list(pos.values()))
    offset = np.min(pos_array, axis=0)
    pos = {node: pt - offset for node, pt in pos.items()}
    return pos, np.max(pos_array, axis=0) - offset