Пример #1
0
def get_network_property(graph):
    """Returns various property of the graph.

    It calculates the richness coefficient, triangles and transitivity
    coefficient. To do so, it removes self-loops *in-place*. So, there
    is a possibility that the graph passed as parameter has been
    changed.
    """

    remove_self_loop(graph)

    # If number of nodes is less than three
    # no point in calculating these property.
    if len(graph.nodes()) < 3:
        return ({0: 0.0}, 0, 0)

    try:
        richness = nx.rich_club_coefficient(graph)
    except nx.NetworkXAlgorithmError:
        # NetworkXAlgorithmError is raised when
        # it fails achieve desired swaps after
        # maximum number of attempts. It happened
        # for a really small graph. But, just to
        # guard against those cases.
        richness = nx.rich_club_coefficient(graph, False)

    triangle = nx.triangles(graph)
    transitivity = nx.transitivity(graph)

    return (richness, triangle, transitivity)
def rich_club(G, R_list=None, n=10):
    '''
    This calculates the rich club coefficient for each degree
    value in the graph (G).

    Inputs:
        G ------ networkx graph
        R_list - list of random graphs with matched degree distribution
                   if R_list is None then a random graph is calculated
                   within the code
                   if len(R_list) is less than n then the remaining random graphs
                   are calculated within the code
                 Default R_list = None
        n ------ number of random graphs for which to calculate rich club
                   coefficients
                 Default n = 10

    Returns:
        rc ------ dictionary of rich club coefficients for the real graph
        rc_rand - array of rich club coefficients for the n random graphs
    '''
    # Import the modules you'll need
    import networkx as nx
    import numpy as np

    # First, calculate the rich club coefficient for the regular graph
    rc_dict = nx.rich_club_coefficient(G, normalized=False)

    # Save the degrees as a numpy array
    deg = np.array(rc_dict.keys())

    # Save the rich club coefficients as a numpy array
    rc = np.array(rc_dict.values())

    # Calculate n different random graphs and their
    # rich club coefficients

    # Start by creating an empty array that will hold
    # the n random graphs' rich club coefficients
    rc_rand = np.ones([len(rc), n])

    for i in range(n):
        # If you haven't already calculated random graphs
        # or you haven't given this function as many random
        # graphs as it is expecting then calculate a random
        # graph here
        if not R_list or len(R_list) <= i:
            R = random_graph(G)
        # Otherwise just use the one you already made
        else:
            R = R_list[i]

        # Calculate the rich club coefficient
        rc_rand_dict = nx.rich_club_coefficient(R, normalized=False)

        # And save the values to the numpy array you created earlier
        rc_rand[:, i] = rc_rand_dict.values()

    return deg, rc, rc_rand
def rich_club(G, R_list=None, n=10):
    '''
    This calculates the rich club coefficient for each degree
    value in the graph (G).

    Inputs:
        G ------ networkx graph
        R_list - list of random graphs with matched degree distribution
                   if R_list is None then a random graph is calculated
                   within the code
                   if len(R_list) is less than n then the remaining random graphs
                   are calculated within the code
                 Default R_list = None
        n ------ number of random graphs for which to calculate rich club
                   coefficients
                 Default n = 10

    Returns:
        rc ------ dictionary of rich club coefficients for the real graph
        rc_rand - array of rich club coefficients for the n random graphs
    '''
    # Import the modules you'll need
    import networkx as nx
    import numpy as np

    # First, calculate the rich club coefficient for the regular graph
    rc_dict = nx.rich_club_coefficient(G, normalized=False)

    # Save the degrees as a numpy array
    deg = np.array(rc_dict.keys())

    # Save the rich club coefficients as a numpy array
    rc = np.array(rc_dict.values())

    # Calculate n different random graphs and their
    # rich club coefficients

    # Start by creating an empty array that will hold
    # the n random graphs' rich club coefficients
    rc_rand = np.ones([len(rc), n])

    for i in range(n):
        # If you haven't already calculated random graphs
        # or you haven't given this function as many random
        # graphs as it is expecting then calculate a random
        # graph here
        if not R_list or len(R_list) <= i:
            R = random_graph(G)
        # Otherwise just use the one you already made
        else:
            R = R_list[i]

        # Calculate the rich club coefficient
        rc_rand_dict = nx.rich_club_coefficient(R, normalized=False)

        # And save the values to the numpy array you created earlier
        rc_rand[:, i] = rc_rand_dict.values()

    return deg, rc, rc_rand
Пример #4
0
def test_richclub4():
    G = nx.Graph()
    G.add_edges_from(
        [(0, 1), (0, 2), (0, 3), (0, 4), (4, 5), (5, 9), (6, 9), (7, 9), (8, 9)]
    )
    rc = nx.rich_club_coefficient(G, normalized=False)
    assert rc == {0: 18 / 90.0, 1: 6 / 12.0, 2: 0.0, 3: 0.0}
Пример #5
0
 def rich_club_coefficient(self):
     """
     Rich-club coefficient
     Returns: A dictionary, keyed by degree, with rich-club coefficient values
     """
     undirected_g = self.G.to_undirected()
     return nx.rich_club_coefficient(undirected_g, normalized=False)
Пример #6
0
    def rich_club(self, force=False):
        '''
        Calculate the rich club coefficient of G for each degree between 0 and
        ``max([degree(v) for v in G.nodes])``. The resulting dictionary of rich
        club coefficients can be accessed and manipulated as
        ``G.graph['rich_club']``

        Parameters
        ----------
        force : bool
            pass True to recalculate when a dictionary of rich club
            coefficients already exists

        Returns
        -------
        dict
            a dictionary mapping integer `x` to the rich club coefficient of
            G for degree `x`

        See Also
        --------
        :func:`rich_club`
        '''
        if ('rich_club' not in self.graph) or force:
            self.graph['rich_club'] = nx.rich_club_coefficient(
                self, normalized=False)
        return self.graph['rich_club']
Пример #7
0
def myrichclubcoefficient(G):
    dd = nx.rich_club_coefficient(G,normalized=False)
    d=dd.values()
    avgrich = np.average(d)
    stdrich = np.std(d)
    fatrich = fatness(d)
    return [avgrich,stdrich,fatrich]
Пример #8
0
def networkxReport(networkxObj,reportFileName=None):
    import collections
    report = {}
    
    report['density'] = networkx.density(networkxObj)
    report['degDist'] = {
        'decription' : "degree distribution",
        'value': collections.Counter([d for n, d in networkxObj.degree()])
    }
    report['RCC'] = {
        'decription': "For each degree k, the rich-club coefficient is the ratio of the number of actual to the number of potential edges for nodes with degree greater than k",
        'value': networkx.rich_club_coefficient(networkx.Graph(networkxObj), normalized=True)
    }
    report['s-Metric'] = {
        'decription': "The s-metric is defined as the sum of the products deg(u)*deg(v) for every edge (u,v) in G",
        'value' : networkx.s_metric(networkxObj, normalized=False)
    }
    report['degree-centrality'] = {
         'decription':"The degree centrality for a node v is the fraction of nodes it is connected to",
        'value' : networkx.degree_centrality(networkxObj)
    }

    if reportFileName:
        f = open(reportFileName, "w")
        f.write(json.dumps(report))
        f.close()
    
    return report
Пример #9
0
def rich_club_coefficient(G, bins=None, normalized=True, Q=1000, weight='weight'):
    import random
    for e in G.edges():
        G[e[0]][e[1]][weight] = float(G[e[0]][e[1]][weight])
    if not (G.is_multigraph() or G.is_directed()):
        return nx.rich_club_coefficient(G, normalized, Q)
    
    if bins is None:
        bins = np.logspace(0,4.6,num=30) # weight
        #bins = np.linspace(0,218,num=30) # count
        #bins = sorted(G.degree(weight='weight').values())[:-1]

    rc = map(lambda x:_phi_w(G,x,w=weight), bins)

    if normalized:
        R = G.copy()
        rcs = []
        for i in range(Q):
            for n in R.nodes_iter():
                keys = R.edge[n].keys()
                vals = map(lambda x: R.edge[n][x], keys)
                random.shuffle(vals)
                for k,v in zip(keys,vals):
                    R.edge[n][k] = v
            rcs.append(map(lambda x:_phi_w(R,x,w=weight), bins))
        rc_n = np.zeros(len(rcs[0]))
        for i in range(len(rc_n)):
            for j in range(len(rcs)):
                rc_n[i] += rcs[j][i]
            rc_n[i] = rc_n[i]/Q
        rc = map(lambda x,y:x/y, rc, rc_n)
    return rc, bins
Пример #10
0
def test_richclub4():
    G = nx.Graph()
    G.add_edges_from([(0,1),(0,2),(0,3),(0,4),(4,5),(5,9),(6,9),(7,9),(8,9)])
    rc = nx.rich_club_coefficient(G,normalized=False)
    assert_equal(rc,{0:18/90.0,
                     1:6/12.0,
                     2:0.0,
                     3:0.0})
Пример #11
0
def saveRichClubDistribution(G, N, limitDistance):
    rc = nx.rich_club_coefficient(G, normalized=True, Q=500)
    plt.plot(rc.keys(), rc.values())
    pdfName = "richclubModel" + "N" + str(
        N) + "limit" + str(limitDistance) + "rand" + str(
            random.randrange(1, 100)) + "_rich-club" + ".pdf"
    plt.savefig(pdfName, format='pdf')
    plt.close()
Пример #12
0
def compute_dict_measures(ntwk):
    """
    Returns a dictionary
    """
    iflogger.info('Computing measures which return a dictionary:')
    measures = {}
    iflogger.info('...Computing rich club coefficient...')
    measures['rich_club_coef'] = nx.rich_club_coefficient(ntwk)
    return measures
def plotRichClub(G):
    try:
        rc = nx.rich_club_coefficient(G, normalized=True, Q=500)
        plt.plot(rc.keys(), rc.values())
        # plt.show()
        pdfName = "rich-club.pdf"
        plt.savefig(pdfName, format='pdf')
        plt.close()
    except:
        print "new try"
        plotRichClub(G)
Пример #14
0
def rich_cl(Y):
    rich_club = []
    for one in Y:
        G = nx.from_numpy_array(one)
        rcv = nx.rich_club_coefficient(G, normalized=False)
        a = set(rcv.keys())
        b = set(range(68))
        add_keys = b.difference(a)
        for idx in add_keys:
            rcv[idx] = 0
        rich_club += [np.array(list(rcv.values()))]
    return np.array(rich_club)
def saveRichClub(G):
    try:
        rc = nx.rich_club_coefficient(G, normalized=True, Q=500)
        plt.plot(rc.keys(), rc.values())
        pdfName = "hyperbolicModel" + "N" + str(
            N) + "limit" + str(limitDistance) + "rand" + str(
                random.randrange(1, 100)) + "_rich-club" + ".pdf"
        plt.savefig(pdfName, format='pdf')
        plt.close()
    except:
        print "new try"
        saveRichClub(G)
 def rich_club(self, normalized=True):
     """ 
     Computes Rich club coefficient of network.
     """
     Gcc = sorted(nx.connected_components(self.G), key=len, reverse=True)
     G0 = self.G.subgraph(Gcc[0])
     if normalized:
         rich_club_coef = nx.rich_club_coefficient(G0,
                                                   normalized=normalized)
         title = 'Rich Club Normalized'
     else:
         rich_club_coef = nx.rich_club_coefficient(self.G,
                                                   normalized=normalized)
         title = 'Rich Club NonNormalized'
     rc_array = np.zeros(len(rich_club_coef))
     for i in np.arange(len(rich_club_coef)):
         rc_array[i] = rich_club_coef[i]
     plt.figure()
     plt.plot(rc_array)
     plt.title(title)
     plt.savefig(self.path + title)
     plt.close()
     return rc_array
Пример #17
0
def test_richclub3():
    #tests edgecase
    G = nx.karate_club_graph()
    rc = nx.rich_club_coefficient(G,normalized=False)
    assert_equal(rc,{0:156.0/1122,
                     1:154.0/1056,
                     2:110.0/462,
                     3:78.0/240,
                     4:44.0/90,
                     5:22.0/42,
                     6:10.0/20,
                     7:10.0/20,
                     8:10.0/20,
                     9:6.0/12,
                     10:2.0/6,
                     11:2.0/6,
                     12:0.0,
                     13:0.0,
                     14:0.0,
                     15:0.0,})
Пример #18
0
def rich_club(G):
    '''
    Calculate the rich club coefficient of G for each degree between 0 and
    ``max([degree(v) for v in G.nodes])``.

    Parameters
    ----------
    G : :class:`networkx.Graph`
        a binary graph

    Returns
    -------
    dict
        a dictionary mapping integer ``x`` to the rich club coefficient of G
        for degree ``x``

    See Also
    --------
    :func:`BrainNetwork.rich_club`
    '''
    return nx.rich_club_coefficient(G, normalized=False)
Пример #19
0
def rich_club_coefficient(G,
                          bins=None,
                          normalized=True,
                          Q=1000,
                          weight='weight'):
    import random
    for e in G.edges():
        G[e[0]][e[1]][weight] = float(G[e[0]][e[1]][weight])
    if not (G.is_multigraph() or G.is_directed()):
        return nx.rich_club_coefficient(G, normalized, Q)

    if bins is None:
        bins = np.logspace(0, 4.6, num=30)  # weight
        #bins = np.linspace(0,218,num=30) # count
        #bins = sorted(G.degree(weight='weight').values())[:-1]

    rc = map(lambda x: _phi_w(G, x, w=weight), bins)

    if normalized:
        R = G.copy()
        rcs = []
        for i in range(Q):
            for n in R.nodes_iter():
                keys = R.edge[n].keys()
                vals = map(lambda x: R.edge[n][x], keys)
                random.shuffle(vals)
                for k, v in zip(keys, vals):
                    R.edge[n][k] = v
            rcs.append(map(lambda x: _phi_w(R, x, w=weight), bins))
        rc_n = np.zeros(len(rcs[0]))
        for i in range(len(rc_n)):
            for j in range(len(rcs)):
                rc_n[i] += rcs[j][i]
            rc_n[i] = rc_n[i] / Q
        rc = map(lambda x, y: x / y, rc, rc_n)
    return rc, bins
Пример #20
0
def sim_runner(wgf):
    wg = wgf

    import pyNN.neuron as sim
    nproc = sim.num_processes()
    node = sim.rank()
    print(nproc)
    import matplotlib
    matplotlib.use('Agg')

    import matplotlib.pyplot as plt
    import matplotlib as mpl
    mpl.rcParams.update({'font.size':16})

    #import mpi4py
    #threads  = sim.rank()
    threads = 1
    rngseed  = 98765
    parallel_safe = False
    #extra = {'threads' : threads}
    import os
    import pandas as pd
    import sys
    import numpy as np
    from pyNN.neuron import STDPMechanism
    import copy
    from pyNN.random import RandomDistribution, NumpyRNG
    import pyNN.neuron as neuron
    from pyNN.neuron import h
    from pyNN.neuron import StandardCellType, ParameterSpace
    from pyNN.random import RandomDistribution, NumpyRNG
    from pyNN.neuron import STDPMechanism, SpikePairRule, AdditiveWeightDependence, FromListConnector, TsodyksMarkramSynapse
    from pyNN.neuron import Projection, OneToOneConnector
    from numpy import arange
    import pyNN
    from pyNN.utility import get_simulator, init_logging, normalized_filename
    import random
    import socket
    #from neuronunit.optimization import get_neab
    import networkx as nx
    sim = pyNN.neuron

    # Get some hippocampus connectivity data, based on a conversation with
    # academic researchers on GH:
    # https://github.com/Hippocampome-Org/GraphTheory/issues?q=is%3Aissue+is%3Aclosed
    # scrape hippocamome connectivity data, that I intend to use to program neuromorphic hardware.
    # conditionally get files if they don't exist.


    path_xl = '_hybrid_connectivity_matrix_20171103_092033.xlsx'
    if not os.path.exists(path_xl):
        os.system('wget https://github.com/Hippocampome-Org/GraphTheory/files/1657258/_hybrid_connectivity_matrix_20171103_092033.xlsx')

    xl = pd.ExcelFile(path_xl)
    dfEE = xl.parse()
    dfEE.loc[0].keys()
    dfm = dfEE.as_matrix()

    rcls = dfm[:,:1] # real cell labels.
    rcls = rcls[1:]
    rcls = { k:v for k,v in enumerate(rcls) } # real cell labels, cast to dictionary
    import pickle
    with open('cell_names.p','wb') as f:
        pickle.dump(rcls,f)
    import pandas as pd
    pd.DataFrame(rcls).to_csv('cell_names.csv', index=False)

    filtered = dfm[:,3:]
    filtered = filtered[1:]
    rng = NumpyRNG(seed=64754)
    delay_distr = RandomDistribution('normal', [2, 1e-1], rng=rng)
    weight_distr = RandomDistribution('normal', [45, 1e-1], rng=rng)


    sanity_e = []
    sanity_i = []

    EElist = []
    IIlist = []
    EIlist = []
    IElist = []

    for i,j in enumerate(filtered):
      for k,xaxis in enumerate(j):
        if xaxis == 1 or xaxis == 2:
          source = i
          sanity_e.append(i)
          target = k

        if xaxis ==-1 or xaxis == -2:
          sanity_i.append(i)
          source = i
          target = k

    index_exc = list(set(sanity_e))
    index_inh = list(set(sanity_i))
    import pickle
    with open('cell_indexs.p','wb') as f:
        returned_list = [index_exc, index_inh]
        pickle.dump(returned_list,f)

    import numpy
    a = numpy.asarray(index_exc)
    numpy.savetxt('pickles/'+str(k)+'excitatory_nunber_labels.csv', a, delimiter=",")
    import numpy
    a = numpy.asarray(index_inh)
    numpy.savetxt('pickles/'+str(k)+'inhibitory_nunber_labels.csv', a, delimiter=",")

    for i,j in enumerate(filtered):
      for k,xaxis in enumerate(j):
        if xaxis==1 or xaxis == 2:
          source = i
          sanity_e.append(i)
          target = k
          delay = delay_distr.next()
          weight = 1.0
          if target in index_inh:
             EIlist.append((source,target,delay,weight))
          else:
             EElist.append((source,target,delay,weight))

        if xaxis==-1 or xaxis == -2:
          sanity_i.append(i)

          source = i
          target = k
          delay = delay_distr.next()
          weight = 1.0
          if target in index_exc:
              IElist.append((source,target,delay,weight))
          else:
              IIlist.append((source,target,delay,weight))


    internal_conn_ee = sim.FromListConnector(EElist)
    ee = internal_conn_ee.conn_list

    ee_srcs = ee[:,0]
    ee_tgs = ee[:,1]

    internal_conn_ie = sim.FromListConnector(IElist)
    ie = internal_conn_ie.conn_list
    ie_srcs = set([ int(e[0]) for e in ie ])
    ie_tgs = set([ int(e[1]) for e in ie ])

    internal_conn_ei = sim.FromListConnector(EIlist)
    ei = internal_conn_ei.conn_list
    ei_srcs = set([ int(e[0]) for e in ei ])
    ei_tgs = set([ int(e[1]) for e in ei ])

    internal_conn_ii = sim.FromListConnector(IIlist)
    ii = internal_conn_ii.conn_list
    ii_srcs = set([ int(e[0]) for e in ii ])
    ii_tgs = set([ int(e[1]) for e in ii ])

    for e in internal_conn_ee.conn_list:
        assert e[0] in ee_srcs
        assert e[1] in ee_tgs

    for i in internal_conn_ii.conn_list:
        assert i[0] in ii_srcs
        assert i[1] in ii_tgs


    ml = len(filtered[1])+1
    pre_exc = []
    post_exc = []
    pre_inh = []
    post_inh = []


    rng = NumpyRNG(seed=64754)
    delay_distr = RandomDistribution('normal', [2, 1e-1], rng=rng)

    plot_EE = np.zeros(shape=(ml,ml), dtype=bool)
    plot_II = np.zeros(shape=(ml,ml), dtype=bool)
    plot_EI = np.zeros(shape=(ml,ml), dtype=bool)
    plot_IE = np.zeros(shape=(ml,ml), dtype=bool)

    for i in EElist:
        plot_EE[i[0],i[1]] = int(0)
        #plot_ss[i[0],i[1]] = int(1)

        if i[0]!=i[1]: # exclude self connections
            plot_EE[i[0],i[1]] = int(1)

            pre_exc.append(i[0])
            post_exc.append(i[1])



    assert len(pre_exc) == len(post_exc)
    for i in IIlist:
        plot_II[i[0],i[1]] = int(0)
        if i[0]!=i[1]:
            plot_II[i[0],i[1]] = int(1)
            pre_inh.append(i[0])
            post_inh.append(i[1])

    for i in IElist:
        plot_IE[i[0],i[1]] = int(0)
        if i[0]!=i[1]: # exclude self connections
            plot_IE[i[0],i[1]] = int(1)
            pre_inh.append(i[0])
            post_inh.append(i[1])

    for i in EIlist:
        plot_EI[i[0],i[1]] = int(0)
        if i[0]!=i[1]:
            plot_EI[i[0],i[1]] = int(1)
            pre_exc.append(i[0])
            post_exc.append(i[1])

    plot_excit = plot_EI + plot_EE
    plot_inhib = plot_IE + plot_II

    assert len(pre_inh) == len(post_inh)

    num_exc = [ i for i,e in enumerate(plot_excit) if sum(e) > 0 ]
    num_inh = [ y for y,i in enumerate(plot_inhib) if sum(i) > 0 ]

    # the network is dominated by inhibitory neurons, which is unusual for modellers.
    assert num_inh > num_exc
    assert np.sum(plot_inhib) > np.sum(plot_excit)
    assert len(num_exc) < ml
    assert len(num_inh) < ml
    # # Plot all the Projection pairs as a connection matrix (Excitatory and Inhibitory Connections)

    import pickle
    with open('graph_inhib.p','wb') as f:
       pickle.dump(plot_inhib,f, protocol=2)


    import pickle
    with open('graph_excit.p','wb') as f:
       pickle.dump(plot_excit,f, protocol=2)


    #with open('cell_names.p','wb') as f:
    #    pickle.dump(rcls,f)
    import pandas as pd
    pd.DataFrame(plot_EE).to_csv('ee.csv', index=False)

    import pandas as pd
    pd.DataFrame(plot_IE).to_csv('ie.csv', index=False)

    import pandas as pd
    pd.DataFrame(plot_II).to_csv('ii.csv', index=False)

    import pandas as pd
    pd.DataFrame(plot_EI).to_csv('ei.csv', index=False)


    from scipy.sparse import coo_matrix
    m = np.matrix(filtered[1:])

    bool_matrix = np.add(plot_excit,plot_inhib)
    with open('bool_matrix.p','wb') as f:
       pickle.dump(bool_matrix,f, protocol=2)

    if not isinstance(m, coo_matrix):
        m = coo_matrix(m)

    Gexc_ud = nx.Graph(plot_excit)
    avg_clustering = nx.average_clustering(Gexc_ud)#, nodes=None, weight=None, count_zeros=True)[source]

    rc = nx.rich_club_coefficient(Gexc_ud,normalized=False)
    print('This graph structure as rich as: ',rc[0])
    gexc = nx.DiGraph(plot_excit)

    gexcc = nx.betweenness_centrality(gexc)
    top_exc = sorted(([ (v,k) for k, v in dict(gexcc).items() ]), reverse=True)

    in_degree = gexc.in_degree()
    top_in = sorted(([ (v,k) for k, v in in_degree.items() ]))
    in_hub = top_in[-1][1]
    out_degree = gexc.out_degree()
    top_out = sorted(([ (v,k) for k, v in out_degree.items() ]))
    out_hub = top_out[-1][1]
    mean_out = np.mean(list(out_degree.values()))
    mean_in = np.mean(list(in_degree.values()))

    mean_conns = int(mean_in + mean_out/2)

    k = 2 # number of neighbouig nodes to wire.
    p = 0.25 # probability of instead wiring to a random long range destination.
    ne = len(plot_excit)# size of small world network
    small_world_ring_excit = nx.watts_strogatz_graph(ne,mean_conns,0.25)



    k = 2 # number of neighbouring nodes to wire.
    p = 0.25 # probability of instead wiring to a random long range destination.
    ni = len(plot_inhib)# size of small world network
    small_world_ring_inhib   = nx.watts_strogatz_graph(ni,mean_conns,0.25)


    nproc = sim.num_processes()
    nproc = 8
    host_name = socket.gethostname()
    node_id = sim.setup(timestep=0.01, min_delay=1.0)#, **extra)
    print("Host #%d is on %s" % (node_id + 1, host_name))
    rng = NumpyRNG(seed=64754)

    #pop_size = len(num_exc)+len(num_inh)
    #num_exc = [ i for i,e in enumerate(plot_excit) if sum(e) > 0 ]
    #num_inh = [ y for y,i in enumerate(plot_inhib) if sum(i) > 0 ]
    #pop_exc =  sim.Population(len(num_exc), sim.Izhikevich(a=0.02, b=0.2, c=-65, d=8, i_offset=0))
    #pop_inh = sim.Population(len(num_inh), sim.Izhikevich(a=0.02, b=0.25, c=-65, d=2, i_offset=0))


    #index_exc = list(set(sanity_e))
    #index_inh = list(set(sanity_i))
    all_cells = sim.Population(len(index_exc)+len(index_inh), sim.Izhikevich(a=0.02, b=0.2, c=-65, d=8, i_offset=0))
    #all_cells = None
    #all_cells = pop_exc + pop_inh
    pop_exc = sim.PopulationView(all_cells,index_exc)
    pop_inh = sim.PopulationView(all_cells,index_inh)
    #print(pop_exc)
    #print(dir(pop_exc))
    for pe in pop_exc:
        print(pe)
        #import pdb
        pe = all_cells[pe]
        #pdb.set_trace()
        #pe = all_cells[i]
        r = random.uniform(0.0, 1.0)
        pe.set_parameters(a=0.02, b=0.2, c=-65+15*r, d=8-r**2, i_offset=0)
        #pop_exc.append(pe)

    #pop_exc = sim.Population(pop_exc)
    for pi in index_inh:
        pi = all_cells[pi]
        #print(pi)
        #pi = all_cells[i]
        r = random.uniform(0.0, 1.0)
        pi.set_parameters(a=0.02+0.08*r, b=0.25-0.05*r, c=-65, d= 2, i_offset=0)
        #pop_inh.append(pi)
    #pop_inh = sim.Population(pop_inh)

    '''
    for pe in pop_exc:
        r = random.uniform(0.0, 1.0)
        pe.set_parameters(a=0.02, b=0.2, c=-65+15*r, d=8-r**2, i_offset=0)

    for pi in pop_inh:
        r = random.uniform(0.0, 1.0)
        pi.set_parameters(a=0.02+0.08*r, b=0.25-0.05*r, c=-65, d= 2, i_offset=0)
    '''
    NEXC = len(num_exc)
    NINH = len(num_inh)

    exc_syn = sim.StaticSynapse(weight = wg, delay=delay_distr)
    assert np.any(internal_conn_ee.conn_list[:,0]) < ee_srcs.size
    prj_exc_exc = sim.Projection(all_cells, all_cells, internal_conn_ee, exc_syn, receptor_type='excitatory')
    prj_exc_inh = sim.Projection(all_cells, all_cells, internal_conn_ei, exc_syn, receptor_type='excitatory')
    inh_syn = sim.StaticSynapse(weight = wg, delay=delay_distr)
    delay_distr = RandomDistribution('normal', [1, 100e-3], rng=rng)
    prj_inh_inh = sim.Projection(all_cells, all_cells, internal_conn_ii, inh_syn, receptor_type='inhibitory')
    prj_inh_exc = sim.Projection(all_cells, all_cells, internal_conn_ie, inh_syn, receptor_type='inhibitory')
    inh_distr = RandomDistribution('normal', [1, 2.1e-3], rng=rng)


    def prj_change(prj,wg):
        prj.setWeights(wg)
    prj_change(prj_exc_exc,wg)
    prj_change(prj_exc_inh,wg)
    prj_change(prj_inh_exc,wg)
    prj_change(prj_inh_inh,wg)

    def prj_check(prj):
        for w in prj.weightHistogram():
            for i in w:
                print(i)
    prj_check(prj_exc_exc)
    prj_check(prj_exc_inh)
    prj_check(prj_inh_exc)
    prj_check(prj_inh_inh)

    #print(rheobase['value'])
    #print(float(rheobase['value']),1.25/1000.0)
    '''Old values that worked
    noise = sim.NoisyCurrentSource(mean=0.85/1000.0, stdev=5.00/1000.0, start=0.0, stop=2000.0, dt=1.0)
    pop_exc.inject(noise)
    #1000.0 pA


    noise = sim.NoisyCurrentSource(mean=1.740/1000.0, stdev=5.00/1000.0, start=0.0, stop=2000.0, dt=1.0)
    pop_inh.inject(noise)
    #1750.0 pA
    '''

    noise = sim.NoisyCurrentSource(mean=0.74/1000.0, stdev=4.00/1000.0, start=0.0, stop=2000.0, dt=1.0)
    pop_exc.inject(noise)
    #1000.0 pA


    noise = sim.NoisyCurrentSource(mean=1.440/1000.0, stdev=4.00/1000.0, start=0.0, stop=2000.0, dt=1.0)
    pop_inh.inject(noise)

    ##
    # Setup and run a simulation. Note there is no current injection into the neuron.
    # All cells in the network are in a quiescent state, so its not a surprise that xthere are no spikes
    ##

    sim = pyNN.neuron
    arange = np.arange
    import re
    all_cells.record(['v','spikes'])  # , 'u'])
    all_cells.initialize(v=-65.0, u=-14.0)
    # === Run the simulation =====================================================
    tstop = 2000.0
    sim.run(tstop)
    data = None
    data = all_cells.get_data().segments[0]

    #print(len(data.analogsignals[0].times))
    with open('pickles/qi'+str(wg)+'.p', 'wb') as f:
        pickle.dump(data,f)
    # make data none or else it will grow in a loop
    all_cells = None
    data = None
    noise = None
	def create_summary(self,text,corenumber=3,pathsimilarity=0.8,graphtraversedsummary=False,shortestpath=True):
		if graphtraversedsummary==True:
			definitiongraph=RecursiveGlossOverlap_Classifier.RecursiveGlossOverlapGraph(text)
			#This has to be replaced by a Hypergraph Transversal but NetworkX does not have Hypergraphs yet.
			#Hence approximating the transversal with a k-core which is the Graph counterpart of
			#Hypergraph transversal. Other measures create a summary too : Vertex Cover is NP-hard while Edge Cover is Polynomial Time.
			richclubcoeff=nx.rich_club_coefficient(definitiongraph.to_undirected())
			print "Rich Club Coefficient of the Recursive Gloss Overlap Definition Graph:",richclubcoeff
			kcore=nx.k_core(definitiongraph,corenumber)
			print "Text Summarized by k-core(subgraph having vertices of degree atleast k) on the Recursive Gloss Overlap graph:"
			print "=========================="
			print "Dense subgraph edges:"
			print "=========================="
			print kcore.edges()
			print "=========================="
			if shortestpath == False:
				for e in kcore.edges():
					for s1 in wn.synsets(e[0]):
						for s2 in wn.synsets(e[1]):
								if s1.path_similarity(s2) > pathsimilarity:
									lowestcommonhypernyms=s1.lowest_common_hypernyms(s2)
									for l in lowestcommonhypernyms:
										for ln in l.lemma_names():
											print e[0]," and ",e[1]," are ",ln,".",
			else:
				#Following is the slightly modified version of shortest_path_distance() function
				#in NLTK wordnet - traverses the synset path between 2 synsets instead of distance
				summary={}
				intermediates=[]
				for e in kcore.edges():
					for s1 in wn.synsets(e[0]):
						for s2 in wn.synsets(e[1]):
							s1dict = s1._shortest_hypernym_paths(False)
							s2dict = s2._shortest_hypernym_paths(False)
							s2dictkeys=s2dict.keys()
							for s,d in s1dict.iteritems():
								if s in s2dictkeys:
									slemmanames=s.lemma_names()
									if slemmanames[0] not in intermediates:
										intermediates.append(slemmanames[0])
					if len(intermediates) > 3:
						sentence1=e[0] + " is a " + intermediates[0]
						summary[sentence1]=self.relevance_to_text(sentence1,text) 
						for i in xrange(len(intermediates)-2):
							sentence2= intermediates[i] + " is a " + intermediates[i+1] + "."
							if sentence2 not in summary:
								summary[sentence2]=self.relevance_to_text(sentence2,text)
						sentence3=intermediates[len(intermediates)-1] + " is a " + e[1]
						summary[sentence3]=self.relevance_to_text(sentence3,text)
						intermediates=[]
				sorted_summary=sorted(summary,key=operator.itemgetter(1), reverse=True)
				print "==================================================================="
				print "Sorted summary created from k-core dense subgraph of text RGO"
				print "==================================================================="
				for s in sorted_summary:
					print s,
			return (sorted_summary, len(sorted_summary))
		else:
			definitiongraph_merit=RecursiveGlossOverlap_Classifier.RecursiveGlossOverlapGraph(text)
			definitiongraph=definitiongraph_merit[0]
			richclubcoeff=nx.rich_club_coefficient(definitiongraph.to_undirected(),normalized=False)
			print "Rich Club Coefficient of the Recursive Gloss Overlap Definition Graph:",richclubcoeff
			textsentences=text.split(".")
			lensummary=0
			summary=[]
			definitiongraphclasses=RecursiveGlossOverlap_Classifier.RecursiveGlossOverlap_Classify(text)
			print "Text Summarized based on the Recursive Gloss Overlap graph classes the text belongs to:"
			prominentclasses=int(len(definitiongraphclasses[0])/2)
			print "Total number of classes:",len(definitiongraphclasses[0])
			print "Number of prominent classes:",prominentclasses
			for c in definitiongraphclasses[0][:prominentclasses]:
				if len(summary) > len(textsentences) * 0.5:
					return (summary,lensummary)
				for s in textsentences:
					classsynsets=wn.synsets(c[0])
					for classsynset in classsynsets:
						if self.relevance_to_text(classsynset.definition(), s) > 0.41:
							if s not in summary:
								summary.append(s)
								lensummary += len(s)
								print s,
			return (summary,lensummary)
Пример #22
0
#"""
xGraphs = [nx.Graph() for i in xrange(len(Graphs))]
xFakeGraphs = [nx.Graph() for i in xrange(len(FakeGraphs))]
for i in xrange(len(Graphs)):
	gt.remove_self_loops(Graphs[i])
	for e in Graphs[i].edges():
		xGraphs[i].add_edge(*e)
for i in xrange(len(FakeGraphs)):
	gt.remove_parallel_edges(FakeGraphs[i])
	for e in FakeGraphs[i].edges():
		xFakeGraphs[i].add_edge(*e)
#print nx.rich_club_coefficient(xGraphs[0], normalized = False)
RealClubs = []
print len(xGraphs)
for Graph in xGraphs:
	Coefficients = nx.rich_club_coefficient(Graph, normalized = False)
	print len(Coefficients)
	Dummy = np.zeros((len(Coefficients),))
	for i in xrange(len(Dummy)):
		Dummy[i] = Coefficients[i]
	RealClubs.append(Dummy)
FakeClubs = []
for Graph in xFakeGraphs:
	Coefficients = nx.rich_club_coefficient(Graph, normalized = False)
	print len(Coefficients)
	Dummy = np.zeros((len(Coefficients),))
	for i in xrange(len(Dummy)):
		Dummy[i] = Coefficients[i]
	FakeClubs.append(Dummy)
Lines = []
f,ax = plt.subplots()
GCnodes = max(nx.connected_components(H), key=len)
# giant component as a network
G = H.subgraph(GCnodes)

###### drawing the graph --- Kamada-Kawai layout
plt.figure(figsize=[9, 9])
pos = nx.kamada_kawai_layout(G, weight=None)  # positions for all nodes
nx.draw_networkx_nodes(G, pos, node_color='salmon', node_size=200)
nx.draw_networkx_edges(G, pos, edge_color='lightblue')
nx.draw_networkx_labels(G, pos, font_size=7, font_color='black')
plt.axis('off')
plt.title('Dolphin social network')
plt.show()

##### Rich club coefficient (original network)
RCdict = nx.rich_club_coefficient(G, normalized=False)

# extracting the degree and rich club coeff from the dictionary
K = [k for k, rc in RCdict.items()]
RCorig = [rc for k, rc in RCdict.items()]

##### Rich club coefficient (random network)
RCrand = np.zeros_like(RCorig)
nIter = 200  # number of random networks to be generated
print('Generating random networks ')
for iIter in range(nIter):
    print('.', end='')
    if (iIter + 1) % 20 == 0:
        print()

    # first generating a random network
Пример #24
0
def rich_club(G):
    rc = nx.rich_club_coefficient(G,normalized=False)
    return rc
Пример #25
0
def rich_club_norm(G):
    rc = nx.rich_club_coefficient(G,normalized=True)
    return rc
print k
#print degree[1]
deg = {}
deg2 = {}
sum = 0.0
for i in G_init.nodes():
    deg[i] = G_init.degree(i)
    deg2[i] = deg[i] * deg[i]
    sum += deg2[i]
degree_max = sorted(deg.items(), key=lambda d: d[1], reverse=True)
print 'max degree'
print degree_max[0]
print 'average_clustering'
print(nx.average_clustering(G_init))
print 'average_shortest_path_length'
D = max(nx.connected_component_subgraphs(G_init), key=len)
print(nx.average_shortest_path_length(D))
#print(nx.average_shortest_path_length(G_init))
print 'assortativity_coefficient'
print round(nx.degree_assortativity_coefficient(G_init), 3)
print "H"
H = round(sum / (float(k * k) * float(G_init.number_of_nodes())), 3)
print H
print "spreading lamida"
lamida = float(k) / (float(sum) / float(G_init.number_of_nodes()) - k)
print round(lamida, 3)
rc = nx.rich_club_coefficient(G_init, normalized=False)  #富人俱乐部
#print "rich_club"
print rc
largest_cc = len(max(nx.connected_components(G), key=len))
print largest_cc
Пример #27
0
    def makeMeasures(self, network, exclude):
        """Make the network measures"""
        # fazer condicional para cada medida, se não estiver na exclude[],
        # fazer medida de tempo e guardar como tupla no

        g = network.g
        gu = network.gu
        timings = []

        T = t.time()
        self.N = network.g.number_of_nodes()
        self.E = network.g.number_of_edges()
        self.E_ = network.gu.number_of_edges()
        self.edges = g.edges(data=True)
        self.nodes = g.nodes(data=True)
        timings.append((t.time() - T, "edges and nodes"))

        T = t.time()
        self.degrees = dict(g.degree())
        self.nodes_ = sorted(g.nodes(), key=lambda x: self.degrees[x])
        self.degrees_ = [self.degrees[i] for i in self.nodes_]
        self.in_degrees = dict(g.in_degree())
        self.in_degrees_ = [self.in_degrees[i] for i in self.nodes_]
        self.out_degrees = dict(g.out_degree())
        self.out_degrees_ = [self.out_degrees[i] for i in self.nodes_]
        timings.append((t.time() - T, "in_out_total_degrees"))

        T = t.time()
        self.strengths = dict(g.degree(weight="weight"))
        self.nodes__ = sorted(g.nodes(), key=lambda x: self.strengths[x])
        self.strengths_ = [self.strengths[i] for i in self.nodes_]
        self.in_strengths = dict(g.in_degree(weight="weight"))
        self.in_strengths_ = [self.in_strengths[i] for i in self.nodes_]
        self.out_strengths = dict(g.out_degree(weight="weight"))
        self.out_strengths_ = [self.out_strengths[i] for i in self.nodes_]
        timings.append((t.time() - T, "in_out_total_strengths"))

        # symmetry measures
        self.asymmetries = asymmetries = []
        self.disequilibrium = disequilibriums = []
        self.asymmetries_edge_mean = asymmetries_edge_mean = []
        self.asymmetries_edge_std = asymmetries_edge_std = []
        self.disequilibrium_edge_mean = disequilibrium_edge_mean = []
        self.disequilibrium_edge_std = disequilibrium_edge_std = []
        for node in self.nodes_:
            if not self.degrees[node]:
                asymmetries.append(0.)
                disequilibriums.append(0.)
                asymmetries_edge_mean.append(0.)
                asymmetries_edge_std.append(0.)
                disequilibrium_edge_mean.append(0.)
                disequilibrium_edge_std.append(0.)
            else:
                asymmetries.append(
                    (self.in_degrees[node] - self.out_degrees[node]) /
                    self.degrees[node])
                disequilibriums.append(
                    (self.in_strengths[node] - self.out_strengths[node]) /
                    self.strengths[node])
                edge_asymmetries = ea = []
                edge_disequilibriums = ed = []
                predecessors = g.predecessors(node)
                successors = g.successors(node)
                for pred in predecessors:
                    if pred in successors:
                        ea.append(0.)
                        ed.append(
                            (g[pred][node]['weight'] - g[node][pred]['weight'])
                            / self.strengths[node])
                    else:
                        ea.append(1.)
                        ed.append(g[pred][node]['weight'] /
                                  self.strengths[node])
                for suc in successors:
                    if suc in predecessors:
                        pass
                    else:
                        ea.append(-1.)
                        ed.append(-g[node][suc]['weight'] /
                                  self.strengths[node])
                asymmetries_edge_mean.append(n.mean(ea))
                asymmetries_edge_std.append(n.std(ea))
                disequilibrium_edge_mean.append(n.mean(ed))
                disequilibrium_edge_std.append(n.std(ed))

        if "weighted_directed_betweenness" not in exclude:
            T = t.time()
            self.weighted_directed_betweenness = x.betweenness_centrality(
                g, weight="weight")
            self.weighted_directed_betweenness_ = [
                self.weighted_directed_betweenness[i] for i in self.nodes_
            ]
            timings.append((t.time() - T, "weighted_directed_betweenness"))
        if "unweighted_directed_betweenness" not in exclude:
            T = t.time()
            self.unweighted_directed_betweenness = x.betweenness_centrality(g)
            timings.append((t.time() - T, "unweighted_directed_betweenness"))
        if "weighted_undirected_betweenness" not in exclude:
            T = t.time()
            self.weighted_undirected_betweenness = x.betweenness_centrality(
                gu, weight="weight")
            timings.append((t.time() - T, "weighted_undirected_betweenness"))
        if "unweighted_undirected_betweenness" not in exclude:
            T = t.time()
            self.weighted_undirected_betweenness = x.betweenness_centrality(gu)
            timings.append((t.time() - T, "unweighted_undirected_betweenness"))
        if "wiener" not in exclude:
            T = t.time()
            self.wiener = x.wiener_index(g, weight="weight")
            timings.append((t.time() - T, "weiner"))
        if "closeness" not in exclude:
            T = t.time()
            self.closeness = x.vitality.closeness_vitality(g, weight="weight")
            timings.append((t.time() - T, "closeness"))
        if "transitivity" not in exclude:
            T = t.time()
            self.transitivity = x.transitivity(g)
            timings.append((t.time() - T, "transitivity"))
        if "rich_club" not in exclude:
            T = t.time()
            self.rich_club = x.rich_club_coefficient(gu)
            timings.append((t.time() - T, "rich_club"))

        if "weighted_clustering" not in exclude:
            T = t.time()
            self.weighted_clusterings = x.clustering(network.gu,
                                                     weight="weight")
            self.weighted_clusterings_ = [
                self.weighted_clusterings[i] for i in self.nodes_
            ]
            timings.append((t.time() - T, "weighted_clustering"))
        if "clustering" not in exclude:
            T = t.time()
            self.clusterings = x.clustering(network.gu)
            self.clusterings_ = [self.clusterings[i] for i in self.clusterings]
            timings.append((t.time() - T, "clustering"))
        if "triangles" not in exclude:
            T = t.time()
            self.triangles = x.triangles(gu)
            timings.append((t.time() - T, "clustering"))
        if "n_weakly_connected_components" not in exclude:
            T = t.time()
            self.n_weakly_connected_components = x.number_weakly_connected_components(
                g)
            timings.append((t.time() - T, "n_weakly_connected_components"))
        if "n_strongly_connected_components" not in exclude:
            T = t.time()
            self.n_strongly_connected_components = x.number_strongly_connected_components(
                g)
            timings.append((t.time() - T, "n_strongly_connected_components"))
        T = t.time()
        foo = [i for i in x.connected_component_subgraphs(gu)]
        bar = sorted(foo, key=lambda x: x.number_of_nodes(), reverse=True)
        self.component = c = bar[0]
        timings.append((t.time() - T, "component"))
        T = t.time()
        self.diameter = x.diameter(c)
        self.radius = x.radius(c)
        self.center = x.center(c)
        self.periphery = x.periphery(c)
        timings.append((t.time() - T, "radius_diameter_center_periphery"))
        self.timings = timings

        T = t.time()
        self.n_connected_components = x.number_connected_components(gu)
        nodes = []
        nodes_components = [
            foo.nodes() for foo in x.connected_component_subgraphs(gu)
        ][:1]
        for nodes_ in nodes_components:
            nodes += nodes_
        self.periphery_ = nodes
        self.timings = timings
Пример #28
0
def _compute_rich_club_coefficient(G: nx.Graph) -> Dict[int, float]:
    """
    Source: https://networkx.github.io/documentation/stable/_modules/networkx/algorithms/richclub.html#rich_club_coefficient
    """
    return nx.rich_club_coefficient(G)
Пример #29
0
def test_rich_club_exception2():
    with pytest.raises(nx.NetworkXNotImplemented):
        G = nx.MultiGraph()
        nx.rich_club_coefficient(G)
Пример #30
0
    infomap_clust=gg.community_infomap()
    modularity_infomap=infomap_clust.modularity
    membership=infomap_clust.membership
    numpy.savetxt('/corral-repl/utexas/poldracklab/data/selftracking/analyses/rsfmri_analyses/infomap_assignments/infomap_sess%03d_%.04f.txt'%(sess,edge_density),membership)
    
    sizethresh=2
    labels=numpy.array(infomap_clust.membership)
    for x in numpy.unique(labels):
        if numpy.sum(labels==x)<sizethresh:
            labels[labels==x]=0

    pi=participation_index.participation_index(adj,labels)
    #mean_pi=numpy.mean(pi)
    
    try:
        rcc=networkx.rich_club_coefficient(G,normalized=True)
        rcc_cutoff=int(numpy.ceil(numpy.mean(degree) + numpy.std(degree)))
        rcc_at_cutoff=rcc[rcc_cutoff]
    except:
        rcc_at_cutoff=0.0

    # get small world coefficient
    #from the clustering coefficient (CC) and the average path length (PL) =
    # CC(actual network)/CC(random graph) divided by PL(actual network)/PL(random graph)
    # use just the largest connected component

    gcsize=G.number_of_nodes()
    apl=networkx.average_shortest_path_length(G)
    Gclust=networkx.average_clustering(G)

Пример #31
0
def data_dump(plot_inhib, plot_excit, plot_EE, plot_IE, plot_II, plot_EI,
              filtered):

    import pandas as pd
    import networkx as nx
    import pickle

    with open('graph_inhib.p', 'wb') as f:
        pickle.dump(plot_inhib, f, protocol=2)

    import pickle
    with open('graph_excit.p', 'wb') as f:
        pickle.dump(plot_excit, f, protocol=2)

    #with open('cell_names.p','wb') as f:
    #    pickle.dump(rcls,f)
    import pandas as pd
    pd.DataFrame(plot_EE).to_csv('ee.csv', index=False)

    import pandas as pd
    pd.DataFrame(plot_IE).to_csv('ie.csv', index=False)

    import pandas as pd
    pd.DataFrame(plot_II).to_csv('ii.csv', index=False)

    import pandas as pd
    pd.DataFrame(plot_EI).to_csv('ei.csv', index=False)

    from scipy.sparse import coo_matrix
    m = np.matrix(filtered[1:])

    bool_matrix = np.add(plot_excit, plot_inhib)
    with open('bool_matrix.p', 'wb') as f:
        pickle.dump(bool_matrix, f, protocol=2)

    if not isinstance(m, coo_matrix):
        m = coo_matrix(m)

    Gexc_ud = nx.Graph(plot_excit)
    avg_clustering = nx.average_clustering(
        Gexc_ud)  #, nodes=None, weight=None, count_zeros=True)[source]

    rc = nx.rich_club_coefficient(Gexc_ud, normalized=False)
    print('This graph structure as rich as: ', rc[0])
    gexc = nx.DiGraph(plot_excit)

    gexcc = nx.betweenness_centrality(gexc)
    top_exc = sorted(([(v, k) for k, v in dict(gexcc).items()]), reverse=True)

    in_degree = gexc.in_degree()
    top_in = sorted(([(v, k) for k, v in in_degree.items()]))
    in_hub = top_in[-1][1]
    out_degree = gexc.out_degree()
    top_out = sorted(([(v, k) for k, v in out_degree.items()]))
    out_hub = top_out[-1][1]
    mean_out = np.mean(list(out_degree.values()))
    mean_in = np.mean(list(in_degree.values()))

    mean_conns = int(mean_in + mean_out / 2)

    k = 2  # number of neighbouig nodes to wire.
    p = 0.25  # probability of instead wiring to a random long range destination.
    ne = len(plot_excit)  # size of small world network
    small_world_ring_excit = nx.watts_strogatz_graph(ne, mean_conns, 0.25)

    k = 2  # number of neighbouring nodes to wire.
    p = 0.25  # probability of instead wiring to a random long range destination.
    ni = len(plot_inhib)  # size of small world network
    small_world_ring_inhib = nx.watts_strogatz_graph(ni, mean_conns, 0.25)
Пример #32
0
def test_richclub_exception():
    G = nx.DiGraph()
    nx.rich_club_coefficient(G)
Пример #33
0
import networkx as nx
import matplotlib.pyplot as plt
import pylab

G = nx.DiGraph()
G = nx.read_edgelist('wiki.txt')

#print(nx.info(G))

#print(nx.number_of_nodes(G))
#print(nx.number_of_edges(G))

#print(nx.is_directed(G))

#Rich club
rc = nx.rich_club_coefficient(G, normalized=False)
plt.scatter(rc.keys(), rc.values(), marker='x', c='g', s=0.5)
plt.title('Rich-Club coefficient vs node degree')
plt.xlabel('k')
plt.ylabel('Rich-club coefficient of k')
pylab.savefig('rc_coefficient.png')

#Compute degree assortativity of graph.
dac = nx.degree_assortativity_coefficient(G)
print(dac)

#Nearest neighbrt drgree
#knn = nx.k_nearest_neighbors(G)
#plt.scatter(knn.keys(),knn.values(),marker='x', c='r',s = 0.5)
#plt.title('Nearest neghbours average degree vs node degree')
#plt.xlabel('k')
Пример #34
0
    e_weight = G[n1][n2]['weight']
    weight_dict[n1] += e_weight
    weight_dict[n2] += e_weight

############################################                   
print nx.attribute_assortativity_coefficient(G,'gender')

node_clustering = nx.clustering(G)
node_degree = nx.degree(G)

btw_list =  nx.betweenness_centrality(G)
closeness_list = nx.closeness_centrality(G)
core_list = nx.core_number(G) #not a googd measure
esize = effective_size(G)
core_list = nx.core_number(G)
rc = nx.rich_club_coefficient(G,normalized=False)
rc[23] = 1.0
print rc
print core_list

male_cc = []
male_deg = []
male_btw = []
male_closeness = []
male_esize = []
male_core = []
male_rc = []
male_strength = []

female_cc = []
female_deg = []
Пример #35
0
 def eval_rich_club(graph):
     # extracting feature matrix
     return list(
         nx.rich_club_coefficient(remove_selfloops(graph),
                                  normalized=False).values())
Пример #36
0
def analyzeGraph(filename, metrics , myformat = 'adjacency'):
    # format: adjacency e' la matrice di 0 e 1, valori spaziati da "," e righe termiante da ; DEFAULT
    # il formato matlab e' quello invece ce usa matlab per fare le matrici

    myfile = open(filename);
    #inizializzo la struttura dati
    matrix = []
    for line in myfile:
        print(line)
        if myformat == 'matlab' :
            line = line.replace('[','')
            line = line.replace(']','')
            line = line.replace(';','')
            print(line)
        matrix.append([int(i)for i in line.split(',')])
    myfile.close()
    topologicalmap = importFromMatlabJava2012FormatToIgraph(matrix)
    g = topologicalmap.graph

    gx = topologicalmap.gx


    Cs = g.vs.select(RC_label = 'C')
    Rs = g.vs.select(RC_label = 'R')
    indexC = [i.index for i in Cs]
    indexR = [i.index for i in Rs]
    data = dict()
    #######################################           Network X

    # degree_assortativity
    degree_assortativity = nx.degree_assortativity_coefficient(gx)
    data['degree_assortativity'] = degree_assortativity
    # eccentricity
    eccentricity = (len(indexR)+len(indexC))*[0]
    for k, v in nx.eccentricity(gx).items():
        eccentricity[k] = v
    data['eccentricity'] = eccentricity
    # mu_eccentricity
    data['mu_eccentricity'] = avg(eccentricity)
    # katz_centrality
    katz_centrality = (len(indexR)+len(indexC))*[0]
    for k, v in nx.katz_centrality(gx).items():
        katz_centrality[k] = v
    data['katz_centrality'] = katz_centrality
    # mu_katz_centrality
    data['mu_katz_centrality'] = avg(katz_centrality)
    # rich_club_coefficient
    rich_club_coefficient = nx.rich_club_coefficient(gx)
    data['rich_club_coefficient'] = rich_club_coefficient
    # mu_rich_club_coefficient
    data['mu_rich_club_coefficient'] = avg(rich_club_coefficient)


    #######################################           Igraph
    # numero di nodi
    data['nodes'] = len(g.vs())
    # numero di R
    data['R'] = len(indexR)
    # numero di C
    data['C'] = len(indexC)
    # average path len
    data['path_len'] = g.average_path_length()
    # diametro
    data['diameter'] = g.diameter()
    # average degree (densirt)
    data['density'] = g.density()
    # articulation points, quanti sono
    data['articulation_points'] = len(g.articulation_points())
    # betweenness
    betweenness =  g.betweenness()
    data['betweenness'] = betweenness
    # mean betweenness
    data['mu_betweenness'] = avg(betweenness)
    # scaled betweenness
    scaled_b = [ float(i)/(float(len(betweenness)-1))/(float(len(betweenness))-2) for i in betweenness ]
    data['scaled_betweenness'] = scaled_b
    # mean scaled betweenness
    data['mu_scaled_betweenness'] = avg(scaled_b)
    # betweenness scaled solo R
    data['Rbetweenness'] = selectLabelArray(scaled_b,indexR)

    # average eccentricity solo R
    print(eccentricity)
    data['Reccentricity'] = selectLabelArray(eccentricity,indexR)
    data['mu_Reccentricity'] = avg(data['Reccentricity'])
    # average eccentricity solo C
    data['Ceccentricity'] = selectLabelArray(eccentricity,indexC)
    data['mu_Ceccentricity'] = avg(data['Ceccentricity'])
    # average katz_centrality solo R
    data['Rkatz_centrality'] = selectLabelArray(katz_centrality,indexR)
    data['mu_Rkatz_centrality'] = avg(data['Rkatz_centrality'])
    # average katz_centrality solo C
    data['Ckatz_centrality'] = selectLabelArray(katz_centrality,indexC)
    data['mu_Ckatz_centrality'] = avg(data['Ckatz_centrality'])

    # average betweennes scaled solo R
    print(data['Rbetweenness'])
    data['mu_Rbetweenness'] = avg(data['Rbetweenness'])
    # betweenness scaled solo C
    data['Cbetweenness'] = selectLabelArray(scaled_b,indexC)
    # average betwenness scaled solo C
    data['mu_Cbetweenness'] = avg(data['Cbetweenness'])
    # closenesss
    closeness = g.closeness()
    data['closeness'] = closeness
    # average closeness
    data['mu_closeness'] = avg(closeness)
    # closeness solo R
    data['Rcloseness'] = selectLabelArray(closeness,indexR)
    # avg closeness solo R
    data['mu_Rcloseness'] = avg(data['Rcloseness'])
    # closeness solo C
    data['Ccloseness'] = selectLabelArray(closeness,indexC)
    # avg closeness solo C
    data['mu_Ccloseness'] = avg(data['Ccloseness'])
    # eigenvector centrality
    eigenvec = g.eigenvector_centrality()
    data['eig'] = eigenvec
    # mean eig
    data['mu_eig'] = avg(eigenvec)
    # eigenvec centrality R
    data['Reig'] = selectLabelArray(eigenvec,indexR)
    # mean eigenvec centrality R
    data['mu_Reig'] = avg(data['Reig'])
    # eigenvec centrality C
    data['Ceig'] = selectLabelArray(eigenvec,indexC)
    # mean eigenvec centrality C
    data['mu_Ceig'] = avg(data['Ceig'])
    # coreness
    coreness = g.coreness()
    data['coreness'] = coreness
    # mean coreness
    data['mu_coreness'] = avg(coreness)
    # eigenvec coreness R
    data['Rcoreness'] = selectLabelArray(coreness,indexR)
    # mean coreness R
    data['mu_Rcoreness'] = avg(data['Rcoreness'])
    # eigenvec coreness C
    data['Ccoreness'] = selectLabelArray(coreness,indexC)
    # mean coreness  C
    data['mu_Ccoreness'] = avg(data['Ccoreness'])


    #print ".".join(filename.split(".")[:-1])+  ".png"
    #plot(graph,".".join(filename.split(".")[:-1])+".png")
    order = dict()
    for i in range(len(metrics)) :
        order[str(i)] = metrics[i]
    stringa = str()
    for j in range(len(metrics)):
        i = order[str(j)]
        stringa+= str(i) + ":\n"
        stringa+= str(data[i]) + "\n"
    text_file = open(".".join(filename.split(".")[:-1])+"_aggregate_data.log", "w")
    text_file.write(str(stringa))
    text_file.close()
    return data
 def richClubCoefficientsFunction():
     return nx.rich_club_coefficient(Graph, normalized=True)
def get_graph_metrics(connectivity_vector) :
    
    # reshape into matrix
    connectivity_matrix = np.reshape(connectivity_vector, (90, 90))
    
    # convert to networkx graph
    connectivity_graph = nwx.from_numpy_matrix(connectivity_matrix)
    
    # convert to distance graph as some metrics need this instead
    distance_matrix = connectivity_matrix
    distance_matrix[distance_matrix == 0] = np.finfo(np.float32).eps
    distance_matrix = 1.0 / distance_matrix
    distance_graph = nwx.from_numpy_matrix(distance_matrix)
    
    # intialise vector of metrics
    metrics = np.zeros((21,))
    # fill the vector of metrics
    # 1 and 2: degree distribution
    degrees = np.sum(connectivity_matrix, axis = 1)
    metrics[0] = np.mean(degrees)
    metrics[1] = np.std(degrees)
    
    # 3 and 4: weight distribution
    weights = np.tril(connectivity_matrix, k = -1)
    metrics[2] = np.mean(weights)
    metrics[3] = np.std(weights)

    # 5: average shortest path length
    # transform weights to distances so this makes sense    
    metrics[4] = nwx.average_shortest_path_length(distance_graph, weight='weight')

    # 6: assortativity
    metrics[5] = nwx.degree_assortativity_coefficient(connectivity_graph, weight='None')
    
    # 7: clustering coefficient
    metrics[6] = nwx.average_clustering(connectivity_graph, weight='weight')
    
    # 8: transitivity
    metrics[7] = nwx.transitivity(connectivity_graph)
    
    # 9 & 10: local and global efficiency
    metrics[8] = np.mean(bct.efficiency_wei(connectivity_matrix, local=True))
    metrics[9] = bct.efficiency_wei(connectivity_matrix, local=False)
    
    # 11: Clustering coefficient
    metrics[10] = np.mean(nwx.clustering(connectivity_graph, weight='weight').values())
    
    # 12 & 13: Betweeness centrality
    metrics[11] = np.mean(nwx.betweenness_centrality(distance_graph, weight='weight').values())
    metrics[12] = np.mean(nwx.current_flow_betweenness_centrality(distance_graph, weight='weight').values())
    
    # 14: Eigenvector centrality
    metrics[13] = np.mean(nwx.eigenvector_centrality(distance_graph, weight='weight').values())
    
    # 15: Closeness centrality
    metrics[14] = np.mean(nwx.closeness_centrality(distance_graph, distance='weight').values())
    
    # 16: PageRank
    metrics[15] = np.mean(nwx.pagerank(connectivity_graph, weight='weight').values())
    
    # 17: Rich club coefficient
    metrics[16] = np.mean(nwx.rich_club_coefficient(connectivity_graph).values())
    
    # 18: Density    
    metrics[17] = bct.density_und(connectivity_matrix)[0]
    
    # 19, 20, 21: Eccentricity, radius, diameter
    spl_all = nwx.shortest_path_length(distance_graph, weight='weight')
    eccs = np.zeros(90,)
    for i in range(90) :
        
        eccs[i] = np.max(spl_all[i].values())
        
    metrics[18] = np.mean(eccs)
    metrics[19] = np.min(eccs)
    metrics[20] = np.max(eccs)  
    
    return metrics
Пример #39
0
def test_rich_club_exception2():
    G = nx.MultiGraph()
    nx.rich_club_coefficient(G)
Пример #40
0
#     len_cl = [len(e)for e in cl]
#     dict_cl = {e:len_cl.count(e)/float(len(len_cl))for e in set(len_cl)}
#     x = dict_cl.keys()
#     y = dict_cl.values()
#     plt.scatter(x,y,s=size[j],c=colors[j],label=labels[j],marker=markers[j],alpha=alphas[j])
#     plt.plot(x,y,c=colors[j])
#     j += 1
# plt.legend(loc='upper right')
# plt.xlabel('cliques')
# plt.ylabel('frequency')
# plt.show()
#==============================================================================


#富人俱乐部系数分布
l0 = nx.rich_club_coefficient(G,normalized=False)
l1 = nx.rich_club_coefficient(G1,normalized=False)
l2 = nx.rich_club_coefficient(G2,normalized=False)
j = 0
for l in [l0,l1,l2]:
    x = l.keys()
    y = l.values()
    plt.scatter(x,y,s=size[j],c=colors[j],label=labels[j],marker=markers[j],alpha=alphas[j])
    plt.plot(x,y,c=colors[j])
    j += 1
plt.legend(loc='upper left')
plt.xlabel('degree')
plt.ylabel('rich_club_coefficient')
plt.show()

#==============================================================================
Пример #41
0
def eval_rich_club(graph):
    """eval_rich_club"""
    return list(
        nx.rich_club_coefficient(remove_selfloops(graph),
                                 normalized=False).values())
Пример #42
0
            else:  # If no edges are longer than limit just simply connect the node
                nodes_to_connect = numpy.argsort(dists)[0:k]
                edges_to_add = itertools.product(nodes_to_connect, [curr_node])
                elist = list(edges_to_add)
                g.add_edges(elist)

        fileObject.write("   Degree of bridge nodes:\n    " +
                         str(g.degree(bridge_nodes)) + "\n")

        A = g.get_edgelist()
        #itt a networkx graph
        G = nx.Graph(A)

        fileObject.write("\n\n")
        rc = nx.rich_club_coefficient(G, normalized=True, Q=1000)
        fileObject.write("x: \n" + str(rc.keys()))
        fileObject.write("y: \n" + str(rc.values()))
        plt.plot(rc.keys(), rc.values())
        pdfName = "rich" + str(intervalCounter) + "loop" + str(
            loop) + "rand" + str(randrange(1, 100)) + ".pdf"
        plt.savefig(pdfName, format='pdf')
        plt.close()

        print(g.vcount())
        global_bridge_degree = bridgeTest(g, bridge_nodes)

        outfile = "test_bridge_degrees12.txt"
        with open(outfile, 'wb') as csvfile:
            writer = csv.writer(csvfile,
                                delimiter=' ',
 def richClubCoefficientsNoNormalisationFunction():
     return nx.rich_club_coefficient(Graph, normalized=False)
Пример #44
0
	def rich_club_coefficient(self):
		return nx.rich_club_coefficient(self.G)