Пример #1
0
def omega_sigma(matrix):
    """Returns the small-world coefficients (omega & sigma) of a graph.
    Omega ranges between -1 and 1. Values close to 0 mean the matrix
    features small-world characteristics.
    Values close to -1 mean the network has a lattice structure and values
    close to 1 mean G is a random network.

    A network is commonly classified as small-world if sigma > 1.

    Parameters
    ----------
    matrix : numpy.ndarray
        A weighted undirected graph.
    Returns
    -------
    smallworld : tuple of float
        The small-work coefficients (omega & sigma).
    Notes
    -----
    The implementation is adapted from the algorithm by Telesford et al. [1]_.
    References
    ----------
    .. [1] Telesford, Joyce, Hayasaka, Burdette, and Laurienti (2011).
           "The Ubiquity of Small-World Networks".
           Brain Connectivity. 1 (0038): 367-75.  PMC 3604768. PMID 22432451.
           doi:10.1089/brain.2011.0038.
    """
    transitivity_rand_list = []
    transitivity_latt_list = []
    path_length_rand_list = []
    for i in range(10):
        logging.debug('Generating random and lattice matrices, '
                      'iteration #{}.'.format(i))
        random = bct.randmio_und(matrix, 10)[0]
        lattice = bct.latmio_und(matrix, 10)[1]

        transitivity_rand_list.append(bct.transitivity_wu(random))
        transitivity_latt_list.append(bct.transitivity_wu(lattice))
        path_length_rand_list.append(avg_cast(bct.distance_wei(random)[0]))

    transitivity = bct.transitivity_wu(matrix)
    path_length = avg_cast(bct.distance_wei(matrix)[0])
    transitivity_rand = np.mean(transitivity_rand_list)
    transitivity_latt = np.mean(transitivity_latt_list)
    path_length_rand = np.mean(path_length_rand_list)

    omega = (path_length_rand / path_length) - \
        (transitivity / transitivity_latt)
    sigma = (transitivity / transitivity_rand) / \
        (path_length / path_length_rand)

    return float(omega), float(sigma)
Пример #2
0
def get_true_network_metrics(A):

    #control centrality
    c_c = control_centrality(A)

    cc_fake = np.zeros((100, 1))
    for i in range(0, 100):
        cc_fake[i] = np.mean(control_centrality(generate_fake_graph(A)))

    m_cc_fake = np.mean(cc_fake)
    cc_norm = c_c / m_cc_fake

    # Get identity of node with lowest control centrality
    min_cc_true = np.where(c_c == np.amin(c_c))[0]

    # get synchronizability
    sync = synchronizability(A)

    # normalized sync
    sync_fake = np.zeros((100, 1))
    for i in range(0, 100):
        sync_fake[i] = synchronizability(generate_fake_graph(A))

    m_sync_fake = np.mean(sync_fake)
    sync_norm = sync / m_sync_fake

    # get betweeness centrality
    bc = betweenness_centrality(A)
    bc_fake = np.zeros((100, 1))
    for i in range(0, 100):
        bc_fake[i] = np.mean(betweenness_centrality(generate_fake_graph(A)))

    m_bc_fake = np.mean(bc_fake)
    bc_norm = bc / m_bc_fake

    # Get identity of node with max bc
    max_bc_true = np.where(bc == np.amax(bc))[0]

    # get eigenvector centrality
    ec = bct.eigenvector_centrality_und(A)
    ec_fake = np.zeros((100, 1))
    for i in range(0, 100):
        ec_fake[i] = np.mean(
            bct.eigenvector_centrality_und(generate_fake_graph(A)))

    m_ec_fake = np.mean(ec_fake)
    ec_norm = ec / m_ec_fake

    # Get identity of node with max ec
    max_ec_true = np.where(ec == np.amax(ec))[0]

    # get edge betweeness centrality
    edge_bc, ignore = bct.edge_betweenness_wei(A)
    edge_bc_fake = np.zeros((100, 1))
    for i in range(0, 100):
        edge_bc_fake[i] = np.mean(
            bct.edge_betweenness_wei(generate_fake_graph(A))[0])
    m_edge_bc_fake = np.mean(edge_bc_fake)
    edge_bc_norm = edge_bc / m_edge_bc_fake

    # get clustering coeff
    clust = bct.clustering_coef_wu(A)
    clust_fake = np.zeros((100, 1))
    for i in range(0, 100):
        clust_fake[i] = np.mean(bct.clustering_coef_wu(generate_fake_graph(A)))

    m_clust_fake = np.mean(clust_fake)
    clust_norm = clust / m_clust_fake

    # Get identity of node with max clust
    max_clust_true = np.where(clust == np.amax(clust))[0]

    # get node strength
    ns = node_strength(A)
    ns_fake = np.zeros((100, 1))
    for i in range(0, 100):
        ns_fake[i] = np.mean(node_strength(generate_fake_graph(A)))

    m_ns_fake = np.mean(ns_fake)
    ns_norm = ns / m_ns_fake

    # Get identity of node with max clust
    max_ns_true = np.where(ns == np.amax(ns))[0]

    #Get true efficiency
    Ci, ignore = bct.modularity_und(A)
    par = bct.participation_coef(A, Ci)

    eff = bct.efficiency_wei(A, 0)
    eff_fake = np.zeros((100, 1))
    for i in range(0, 100):
        eff_fake[i] = (bct.efficiency_wei(generate_fake_graph(A)))

    m_eff_fake = np.mean(eff_fake)
    eff_norm = eff / m_eff_fake

    # Get true transistivity
    trans = bct.transitivity_wu(A)
    trans_fake = np.zeros((100, 1))
    for i in range(0, 100):
        trans_fake[i] = (bct.transitivity_wu(generate_fake_graph(A)))

    m_trans_fake = np.mean(trans_fake)
    trans_norm = trans / m_trans_fake

    # store output results in a dictionary
    #nodal
    results = {}
    results['control_centrality'] = c_c
    results['control_centrality_norm'] = cc_norm
    results['min_cc_node'] = min_cc_true

    # global measure
    results['sync'] = sync
    results['sync_norm'] = sync_norm

    # nodal
    results['bc'] = bc
    results['bc_norm'] = bc_norm
    results['max_bc_node'] = max_bc_true

    # nodal
    results['ec'] = ec
    results['ec_norm'] = ec_norm
    results['max_ec_node'] = max_ec_true

    # nodal
    results['clust'] = clust
    results['clust_norm'] = clust_norm
    results['max_clust_node'] = max_clust_true

    # nodal
    results['ns'] = ns
    results['ns_norm'] = ns_norm
    results['max_ns_node'] = max_ns_true

    # global
    results['eff'] = eff
    results['eff_norm'] = eff_norm

    # global
    results['trans'] = trans
    results['trans_norm'] = trans_norm

    # nodal
    results['par'] = par

    # edge
    results['edge_bc'] = edge_bc
    results['edge_bc_norm'] = edge_bc_norm

    return (results)
Пример #3
0
def test_transitivity_wu():
	x = load_sample(thres=.23)
	t = bct.transitivity_wu(x)
	assert np.allclose(t, 1.32927829)
Пример #4
0
def test_transitivity_signed():
	x = load_signed_sample(thres=.85)
	t = bct.transitivity_wu(x)
	assert np.imag(t)==0
Пример #5
0
                            # characteristic path length
                            cp = bct.charpath(thresh)
                            cp_s.append(cp[0])

                            # modularity
                            md = bct.modularity_louvain_und(thresh)
                            md_s.append(md[1])

                            # network measures of interest here
                            # global efficiency
                            at = bct.assortativity_wei(thresh)
                            at_s.append(at)

                            # modularity
                            tr = bct.transitivity_wu(thresh)
                            tr_s.append(tr)

                        df.at[(subject, session, task, conds[i], mask),
                              "assortativity"] = np.trapz(ge_s, dx=0.01)
                        df.at[(subject, session, task, conds[i], mask),
                              "transitivity"] = np.trapz(md_s, dx=0.01)
                        df.at[(subject, session, task, conds[i], mask),
                              "efficiency"] = np.trapz(ge_s, dx=0.01)
                        df.at[(subject, session, task, conds[i], mask),
                              "charpath"] = np.trapz(cp_s, dx=0.01)
                        df.at[(subject, session, task, conds[i], mask),
                              "modularity"] = np.trapz(md_s, dx=0.01)

                        # df.to_csv(join(sink_dir, 'resting-state_graphtheory_shen+craddock.csv'), sep=',')
                        lab_notebook.at[(subject, session, task, conds[i],
                corr_pln += [CorrelationAnalyzer(nits)]

            corr_cls_coef = [d.corrcoef for d in corr_cls]
            pr_cls_tmp = np.asarray([
                bct.centrality.pagerank_centrality(g, d=0.85)
                for g in corr_cls_coef
            ]).mean(axis=0)

            corr_pln_coef = [d.corrcoef for d in corr_pln]
            pr_pln_tmp = np.asarray([
                bct.centrality.pagerank_centrality(g, d=0.85)
                for g in corr_pln_coef
            ]).mean(axis=0)

            trans_cls_tmp = np.asarray(
                [bct.transitivity_wu(g) for g in corr_cls_coef]).mean(axis=0)
            trans_pln_tmp = np.asarray(
                [bct.transitivity_wu(g) for g in corr_pln_coef]).mean(axis=0)

            ge_cls_tmp = np.asarray(
                [bct.distance.charpath(g)[1] for g in corr_cls_coef]).mean(
                    axis=0)
            ge_pln_tmp = np.asarray(
                [bct.distance.charpath(g)[1] for g in corr_pln_coef]).mean(
                    axis=0)

            cp_cls_tmp = np.asarray(
                [bct.distance.charpath(g)[0] for g in corr_cls_coef]).mean(
                    axis=0)
            cp_pln_tmp = np.asarray(
                [bct.distance.charpath(g)[0] for g in corr_pln_coef]).mean(
                corr_pln += [CorrelationAnalyzer(nits)]

            corr_cls_coef = [d.corrcoef for d in corr_cls]
            pr_cls_tmp = np.asarray([
                bct.centrality.pagerank_centrality(g, d=0.85)
                for g in corr_cls_coef
            ]).mean(axis=0)

            corr_pln_coef = [d.corrcoef for d in corr_pln]
            pr_pln_tmp = np.asarray([
                bct.centrality.pagerank_centrality(g, d=0.85)
                for g in corr_pln_coef
            ]).mean(axis=0)

            trans_cls_tmp = np.asarray(
                [bct.transitivity_wu(g) for g in corr_cls_coef]).mean(axis=0)
            trans_pln_tmp = np.asarray(
                [bct.transitivity_wu(g) for g in corr_pln_coef]).mean(axis=0)

            ge_cls_tmp = np.asarray([
                bct.distance.charpath(g)[1] for g in corr_cls_coef
            ]).mean(axis=0)
            ge_pln_tmp = np.asarray([
                bct.distance.charpath(g)[1] for g in corr_pln_coef
            ]).mean(axis=0)

            cp_cls_tmp = np.asarray([
                bct.distance.charpath(g)[0] for g in corr_cls_coef
            ]).mean(axis=0)
            cp_pln_tmp = np.asarray([
                bct.distance.charpath(g)[0] for g in corr_pln_coef
Пример #8
0
def test_transitivity_wu():
    x = load_sample(thres=.23)
    t = bct.transitivity_wu(x)
    assert np.allclose(t, 1.32927829)
Пример #9
0
def test_transitivity_signed():
    x = load_signed_sample(thres=.85)
    t = bct.transitivity_wu(x)
    assert np.imag(t) == 0
Пример #10
0
def graph_estimates(cm, th):

    #dictionary for storing our results
    d = OrderedDict()

    #thresholding moved here for other matrices than MatLab matrices
    #removes negative weights
    cm = bct.threshold_absolute(cm, 0.0)

    cm = threshold_connected(cm, th)

    
    #for binarizing the connectivity matrices, 
    #we work with weighted so this is turned off
    #bin_cm = bct.binarize(cm)
    
    #invert the connectivity for computing shortest paths
    cm_inv = bct.invert(cm)

    #modularity_und is found in modularity.py
    modularity_und = bct.modularity_und(cm)

    #the community_affiliation vector that gets input to some of the functions
    community_affiliation = modularity_und[0]
    
    #distance_wei and charpath is found in distance.py
    distance_wei = bct.distance_wei(cm_inv)
    charpath = bct.charpath(distance_wei[0], False, False)

    #clustering_coef_wu is found in clustering.py
    clustering_coef_wu = bct.clustering_coef_wu(cm)
    avg_clustering_coef_wu = np.mean(clustering_coef_wu)


    #assortativity_wei is found in core.py
    d['assortativity_wei-r'] = bct.assortativity_wei(cm, flag=0)

    #just taking the average of clustering_coef_wu
    d['avg_clustering_coef_wu:C'] = avg_clustering_coef_wu

    d['charpath-lambda'] = charpath[0]
    #d['charpath-efficiency'] = charpath[1]   
    #d['charpath-ecc'] = charpath[2]           
    #d['charpath-radius'] = charpath[3]
    #d['charpath-diameter'] = charpath[4]

    d['clustering_coef_wu-C'] = clustering_coef_wu


    d['efficiency_wei-Eglob'] = bct.efficiency_wei(cm)
    #d['efficiency_wei-Eloc'] = bct.efficiency_wei(cm, True)

    #d['modularity_und-ci'] = modularity_und[0]
    d['modularity_und-Q'] = modularity_und[1]

    d['small_worldness:S'] = compute_small_worldness(cm,
                                                     avg_clustering_coef_wu,
                                                     charpath[0])

   
   #transitivity_wu can be found in clustering.py
    d['transitivity_wu-T'] = bct.transitivity_wu(cm)


    #EXAMPLES for local measures and binary measures. Comment in to use. 

    #VECTOR MEASURES
    #d['betweenness_wei-BC'] = bct.betweenness_wei(cm_inv)
    # d['module_degree_zscore-Z'] = bct.module_degree_zscore(cm, community_affiliation)
    #d['degrees_und-deg'] = bct.degrees_und(cm)
    #d['charpath-ecc'] = charpath[2]


    #BINARIES
    # d['clustering_coef_bu-C'] = bct.clustering_coef_bu(bin_cm)
    # d['efficiency_bin-Eglob'] = bct.efficiency_bin(bin_cm)
    # d['efficiency_bin-Eloc'] = bct.efficiency_bin(bin_cm, True)
    #  d['modularity_und_bin-ci'] = modularity_und_bin[0]
    #  d['modularity_und_bin-Q'] = modularity_und_bin[1]
    # d['transitivity_bu-T'] = bct.transitivity_bu(bin_cm)
    #  d['betweenness_bin-BC'] = bct.betweenness_bin(bin_cm)
    #  modularity_und_bin = bct.modularity_und(bin_cm)
    #d['participation_coef'] = bct.participation_coef(cm, community_affiliation)


    ######## charpath giving problems with ecc, radius and diameter
    # np.seterr(invalid='ignore')


    return d
for subject in subjects:
    cls = np.load(source_folder + "graph_data/%s_classic_pow_pln.npy" %
                  subject).item()

    pln = np.load(source_folder + "graph_data/%s_plan_pow_pln.npy" %
                  subject).item()

    cls_all.append(cls)
    pln_all.append(pln)

for k, band in enumerate(bands.keys()):
    data_cls = []
    for j in range(len(cls_all)):
        tmp = cls_all[j][band]
        data_cls.append(np.asarray([bct.transitivity_wu(g)
                                    for g in tmp]).mean(axis=0))
    data_pln = []
    for j in range(len(pln_all)):
        tmp = pln_all[j][band]
        data_pln.append(np.asarray([bct.transitivity_wu(g)
                                    for g in tmp]).mean(axis=0))

    data_cls = np.asarray(data_cls)
    data_pln = np.asarray(data_pln)

    X = np.vstack([data_cls, data_pln])
    y = np.concatenate([np.zeros(len(data_cls)), np.ones(len(data_pln))])

    cv = StratifiedShuffleSplit(y, test_size=0.1)
for subject in subjects:
    cls = np.load(source_folder +
                  "graph_data/%s_classic_pow_pln.npy" % subject).item()

    pln = np.load(source_folder +
                  "graph_data/%s_plan_pow_pln.npy" % subject).item()

    cls_all.append(cls)
    pln_all.append(pln)

for k, band in enumerate(bands.keys()):
    data_cls = []
    for j in range(len(cls_all)):
        tmp = cls_all[j][band]
        data_cls.append(
            np.asarray([bct.transitivity_wu(g) for g in tmp]).mean(axis=0))
    data_pln = []
    for j in range(len(pln_all)):
        tmp = pln_all[j][band]
        data_pln.append(
            np.asarray([bct.transitivity_wu(g) for g in tmp]).mean(axis=0))

    data_cls = np.asarray(data_cls)
    data_pln = np.asarray(data_pln)

    X = np.vstack([data_cls, data_pln])
    y = np.concatenate([np.zeros(len(data_cls)), np.ones(len(data_pln))])

    cv = StratifiedShuffleSplit(y, test_size=0.1)

    model = joblib.load(source_folder +
for subject in subjects:
    cls = np.load(source_folder + "graph_data/%s_classic_pow_post.npy" %
                  subject).item()

    pln = np.load(source_folder + "graph_data/%s_plan_pow_post.npy" %
                  subject).item()

    cls_all.append(cls)
    pln_all.append(pln)

for k, band in enumerate(bands.keys()):
    data_cls = []
    for j in range(len(cls_all)):
        tmp = cls_all[j][band]
        data_cls.append(np.asarray([bct.transitivity_wu(g)
                                    for g in tmp]).mean(axis=0))
    data_pln = []
    for j in range(len(pln_all)):
        tmp = pln_all[j][band]
        data_pln.append(np.asarray([bct.transitivity_wu(g)
                                    for g in tmp]).mean(axis=0))

    data_cls = np.asarray(data_cls)
    data_pln = np.asarray(data_pln)

    X = np.vstack([data_cls, data_pln])
    y = np.concatenate([np.zeros(len(data_cls)), np.ones(len(data_pln))])

    cv = StratifiedShuffleSplit(y, test_size=0.1)
Пример #14
0
def calc_graph_vector(filename, thresholds) :
    '''
    This function calculates graph measures for connectivity matrix loaded from textfile
    and save results under the same name with additional superscript +'_GV' (in same dir
    filename is located)
    
    Input arguments:                                               
        filename(str):     name of file containing connectivity matrix (txt extension)
        thresholds(list):  list containing thresholds of interest        #
    
    Kamil Bonna, 14.08.2018 
    '''
    #--- check inputs
    import os
    if not os.path.exists(filename):
        raise Exception('{} does not exist'.format(filename))
    if type(thresholds) != list: 
        raise Exception('thresholds should be a list!')
        
    import numpy as np
    import bct

    #=== inner variables
    N_rep_louvain = 10   # number of Louvain algorithm repetitions
    N_measures = 10      # number of graph measures
    gamma = 1            # Louvain resolution parameter
    
    #--- load matrix 
    A_raw = np.loadtxt(filename)
    N = A_raw.shape[0]   # number of nodes
    M_sat = N*(N-1)/2    # max number of connections 

    #=== calculate output
    graph_measures = np.zeros([ len(thresholds), N_measures ])  # create empty output matrix
    for thr in range(len(thresholds)) : 
        #--- thresholding 
        A = bct.threshold_proportional( A_raw, p=thresholds[thr], copy=True );
        A[np.nonzero(A<0)] = 0                                  # ensure only positive weights
        M_act = A[np.nonzero(A>0)].shape[0] / 2                 # actual number of nonzero connections
        #--- calculate measures
        #-- mean connection strenght 
        S = np.sum(A)/M_act
        #-- connection strenght std
        Svar = np.std(A[np.nonzero(A)])
        #-- modularity
        [M,Q] = bct.modularity_louvain_und(A, gamma)
        for i in range(N_rep_louvain) :
            [Mt,Qt] = bct.modularity_louvain_und(A, gamma)
            if Qt > Q :
                Q = Qt
                M = Mt
        #-- participation coefficient
        P = np.mean(bct.participation_coef_sign(A, M))
        #-- clustering 
        C = np.mean(bct.clustering_coef_wu(A))
        #-- transitivity 
        T = bct.transitivity_wu(A)
        #-- assortativity
        Asso = bct.assortativity_wei(A)
        #-- global & local efficiency 
        Eglo = bct.efficiency_wei(A)
        Eloc = np.mean(bct.efficiency_wei(A, local=True))
        #-- mean eigenvector centralit
        Eig = np.mean(bct.eigenvector_centrality_und(A))
        #--- write vector to matrix
        graph_measures[thr] = [ S, Svar, Q, P, C, T, Asso, Eglo, Eloc, Eig ]

    #=== save results to file
    np.savetxt( filename[:-4]+'_GV.txt', graph_measures )