Exemplo n.º 1
0
def test_partition_distance():
    q = load_sample_group_qball()
    d = load_sample_group_dsi()

    q = np.mean(q, axis=2)
    d = np.mean(d, axis=2)

    qi, _ = bct.modularity_und(q)
    di, _ = bct.modularity_und(d)

    vi, mi = bct.partition_distance(qi, di)

    print(vi, mi)
    assert np.allclose(vi, 0.1964, atol=0.01)
    assert np.allclose(mi, 0.6394, atol=0.01)
Exemplo n.º 2
0
def test_modularity_finetune_und():
    x = load_sample(thres=.4)

    seed = 94885236
    _, q = bct.modularity_finetune_und(x, seed=seed)
    assert np.allclose(q, .25879794)

    fails = 0
    for i in range(100):
        _, q = bct.modularity_finetune_und(x)
        try:
            assert np.allclose(q, .25, atol=0.03)
        except AssertionError:
            if fails >= 5:
                raise
            else:
                fails += 1

    seed = 71040925
    ci, oq = bct.modularity_louvain_und(x, seed=seed)
    _, q = bct.modularity_finetune_und(x, ci=ci, seed=seed)
    print(q, oq)
    # assert np.allclose(q, .25892588)
    assert np.allclose(q, .25856714)
    assert q - oq >= -1e6

    ci, oq = bct.modularity_und(x)
    for i in range(100):
        _, q = bct.modularity_finetune_und(x, ci=ci)
        assert np.allclose(q, .25, atol=0.002)
        assert q - oq >= -1e6
Exemplo n.º 3
0
def test_modularity_finetune_und():
    x = load_sample(thres=.4)

    seed = 94885236
    _, q = bct.modularity_finetune_und(x, seed=seed)
    assert np.allclose(q, .25879794)

    fails = 0
    for i in range(100):
        _, q = bct.modularity_finetune_und(x)
        try:
            assert np.allclose(q, .25, atol=0.03)
        except AssertionError:
            if fails >= 5:
                raise
            else:
                fails += 1

    seed = 71040925
    ci, oq = bct.modularity_louvain_und(x, seed=seed)
    _, q = bct.modularity_finetune_und(x, ci=ci, seed=seed)
    print(q, oq)
    # assert np.allclose(q, .25892588)
    assert np.allclose(q, .25856714)
    assert q - oq >= -1e6

    ci, oq = bct.modularity_und(x)
    for i in range(100):
        _, q = bct.modularity_finetune_und(x, ci=ci)
        assert np.allclose(q, .25, atol=0.002)
        assert q - oq >= -1e6
Exemplo n.º 4
0
def comod_test(a1, a2):
    ma, qa = bct.modularity_und(a1)
    mb, qb = bct.modularity_und(a2)

    n = len(ma)
    if len(mb) != n:
        raise BCTParamError('Comodularity must be done on equally sized '
                            'matrices')

    f, F = (0,) * 2

    for e1 in xrange(n):
        for e2 in xrange(n):
            if e2 >= e1:
                continue

            # node pairs
            comod_a = ma[e1] == ma[e2]
            comod_b = mb[e1] == mb[e2]

            # node pairs sharing a module in at least one graph
            if comod_a or comod_b:
                F += 1
            # node pairs sharing a module in both graphs
            if comod_a and comod_b:
                f += 1

    m1 = np.max(ma)
    m2 = np.max(mb)
    eta = []
    gamma = []
    for i in xrange(m1):
        eta.append(np.size(np.where(ma == i + 1)))
    for i in xrange(m2):
        gamma.append(np.size(np.where(mb == i + 1)))

    scale, conscale = (0,) * 2
    for h in eta:
        for g in gamma:
            # print h,g
            conscale += (h * g) / (n * (h + g) - h * g)
            scale += (h * h * g * g) / (n ** 3 * (h + g) - n * h * g)

    print m1, m2
#	print conscale
    print scale
    return (f / F) / scale
Exemplo n.º 5
0
def comod_test(a1, a2):
    ma, qa = bct.modularity_und(a1)
    mb, qb = bct.modularity_und(a2)

    n = len(ma)
    if len(mb) != n:
        raise BCTParamError('Comodularity must be done on equally sized '
                            'matrices')

    f, F = (0, ) * 2

    for e1 in xrange(n):
        for e2 in xrange(n):
            if e2 >= e1:
                continue

            # node pairs
            comod_a = ma[e1] == ma[e2]
            comod_b = mb[e1] == mb[e2]

            # node pairs sharing a module in at least one graph
            if comod_a or comod_b:
                F += 1
            # node pairs sharing a module in both graphs
            if comod_a and comod_b:
                f += 1

    m1 = np.max(ma)
    m2 = np.max(mb)
    eta = []
    gamma = []
    for i in xrange(m1):
        eta.append(np.size(np.where(ma == i + 1)))
    for i in xrange(m2):
        gamma.append(np.size(np.where(mb == i + 1)))

    scale, conscale = (0, ) * 2
    for h in eta:
        for g in gamma:
            # print h,g
            conscale += (h * g) / (n * (h + g) - h * g)
            scale += (h * h * g * g) / (n**3 * (h + g) - n * h * g)

    print m1, m2
    #	print conscale
    print scale
    return (f / F) / scale
Exemplo n.º 6
0
	def calculate_modules(self,thres):
		import bct
		thres_adj=self.adj.copy()
		thres_adj[thres_adj < thres] = 0
		self.verbose_msg('Threshold for modularity calculation: %s'%str(thres))
		modvec,_=bct.modularity_und(thres_adj)
		self.modules = bct.ci2ls(modvec)
		self.nr_modules = len(self.modules)
Exemplo n.º 7
0
def test_consensus():
    x = load_sample(thres=.38)
    ci = bct.consensus_und(x, .1, reps=50)
    print(np.max(ci), 4)
    assert np.max(ci) == 4
    _, q = bct.modularity_und(x, kci=ci)
    print(q, 0.27)
    assert np.allclose(q, 0.27, atol=.01)
Exemplo n.º 8
0
def test_consensus():
    x = load_sample(thres=.38)
    ci = bct.consensus_und(x, .1, reps=50)
    print(np.max(ci), 4)
    assert np.max(ci) == 4
    _, q = bct.modularity_und(x, kci=ci)
    print(q, 0.27)
    assert np.allclose(q, 0.27, atol=.01)
Exemplo n.º 9
0
def modularity_and_efficiency(data):
    mod_scores = []
    eff_scores = []
    for i, x in enumerate(data):
        matrix = mp.preprocess_matrix(x)
        mod_score = bct.modularity_und(matrix)[1]
        eff_score = bct.efficiency_wei(matrix)

        mod_scores.append(mod_score)
        eff_scores.append(eff_score)

    return mod_scores, eff_scores
Exemplo n.º 10
0
def do_opt(adj,mods,option):
    if option=='global efficiency':
        return bct.efficiency_wei(adj)
    elif option=='local efficiency':
        return bct.efficiency_wei(adj,local=True)
    elif option=='average strength':
        return bct.strengths_und(adj)
    elif option=='clustering coefficient':
        return bct.clustering_coef_wu(adj)
    elif option=='eigenvector centrality':
        return bct.eigenvector_centrality_und(adj)
    elif option=='binary kcore':
        return bct.kcoreness_centrality_bu(adj)[0]

    elif option=='modularity':
        return bct.modularity_und(adj,mods)[1]
    elif option=='participation coefficient':
        return bct.participation_coef(adj,mods)
    elif option=='within-module degree':
        return bct.module_degree_zscore(adj,mods)
Exemplo n.º 11
0
def do_opt(adj, mods, option):
    if option == 'global efficiency':
        return bct.efficiency_wei(adj)
    elif option == 'local efficiency':
        return bct.efficiency_wei(adj, local=True)
    elif option == 'average strength':
        return bct.strengths_und(adj)
    elif option == 'clustering coefficient':
        return bct.clustering_coef_wu(adj)
    elif option == 'eigenvector centrality':
        return bct.eigenvector_centrality_und(adj)
    elif option == 'binary kcore':
        return bct.kcoreness_centrality_bu(adj)[0]

    elif option == 'modularity':
        return bct.modularity_und(adj, mods)[1]
    elif option == 'participation coefficient':
        return bct.participation_coef(adj, mods)
    elif option == 'within-module degree':
        return bct.module_degree_zscore(adj, mods)
Exemplo n.º 12
0
def comodularity_und(a1, a2):
    '''
    Returns the comodularity, an experimental measure I am developing.
    The comodularity evaluates the correspondence between two community
    structures A and B.  Let F be the set of nodes that are co-modular (in the
    same module) in at least one of these community structures.  Let f be the
    set of nodes that are co-modular in both of these community structures.
    The comodularity is |f|/|F|

    This is actually very similar to the Jaccard index which turns out not
    to be a terribly useful property. At high similarity a the variability
    of information is better. It may be that the degenerate cross modularity
    is even better though.
    '''

    ma, qa = bct.modularity_und(a1)
    mb, qb = bct.modularity_und(a2)

    n = len(ma)
    if len(mb) != n:
        raise bct.BCTParamError('Comodularity must be done on equally sized '
                                'matrices')

    E, F, f, G, g, H, h = (0,) * 7

    for e1 in xrange(n):
        for e2 in xrange(n):
            if e2 >= e1:
                continue

            # node pairs
            comod_a = ma[e1] == ma[e2]
            comod_b = mb[e1] == mb[e2]

            # node pairs sharing a module in at least one graph
            if comod_a or comod_b:
                F += 1
            # node pairs sharing a module in both graphs
            if comod_a and comod_b:
                f += 1

            # edges in either graph common to any module
            if a1[e1, e2] != 0 or a2[e1, e2] != 0:
                # edges that exist in at least one graph which prepend a shared
                # module in at least one graph:
                # EXTREMELY NOT USEFUL SINCE THE SHARED MODULE MIGHT BE THE OTHER
                # GRAPH WITH NO EDGE!
                if comod_a or comod_b:
                    G += 1
                # edges that exist in at least one graph which prepend a shared
                # module in both graphs:
                if comod_a and comod_b:
                    g += 1

                # edges that exist at all
                E += 1

            # edges common to a module in both graphs
            if a1[e1, e2] != 0 and a2[e1, e2] != 0:
                # edges that exist in both graphs which prepend a shared module
                # in at least one graph
                if comod_a or comod_b:
                    H += 1
                # edges that exist in both graphs which prepend a shared module
                # in both graphs
                if comod_a and comod_b:
                    h += 1

    m1 = np.max(ma)
    m2 = np.max(mb)
    P = m1 + m2 - 1

    # print f,F
    print m1, m2
    print 'f/F', f / F
    print '(f/F)*p', f * P / F
    print 'g/E', g / E
    print '(g/E)*p', g * P / E
    print 'h/E', h / E
    print '(h/E)*p', h * P / E
    print 'h/H', h / H
    print '(h/H)*p', h * P / E
    print 'q1, q2', qa, qb
    # print 'f/F*sqrt(qa*qb)', f*np.sqrt(qa*qb)/F
    return f / F
Exemplo n.º 13
0
import numpy as np
import bct as BCT
import sys



fn = raw_input()

print(fn)

M = np.loadtxt(fn)

Q_vec = np.zeros(len(np.arange(0.02, 0.21, 0.01)))

for i, th in enumerate(np.arange(0.02, 0.21, 0.01)):
    Q_vec[i] = BCT.modularity_und(BCT.threshold_proportional(M, th))[1]

Q = Q_vec.mean()

print(Q)
Exemplo n.º 14
0
import numpy as np
import bct as BCT
import sys

fn = raw_input()

print(fn)

M = np.loadtxt(fn)

Q_vec = np.zeros(len(np.arange(0.02, 0.21, 0.01)))

for i, th in enumerate(np.arange(0.02, 0.21, 0.01)):
    Q_vec[i] = BCT.modularity_und(BCT.threshold_proportional(M, th))[1]

Q = Q_vec.mean()

print(Q)
Exemplo n.º 15
0
10 34
14 34
15 34
16 34
19 34
20 34
21 34
23 34
24 34
27 34
28 34
29 34
30 34
31 34
32 34
33 34
""".strip()

arr = np.zeros((34, 34), dtype=np.uint8)
for row in s.split('\n'):
    first, second = row.split(' ')
    arr[int(first) - 1, int(second) - 1] += 1

arr = bct.binarize(arr + arr.T)

np.random.seed(1991)

eff = bct.efficiency_bin(arr)
mod = bct.modularity_und(arr)
rand = bct.randmio_und_connected(arr, 5)
Exemplo n.º 16
0
def test_modularity_und():
    x = load_sample(thres=.4)
    _, q = bct.modularity_und(x)
    print(q)
    assert np.allclose(q, 0.24097717)
Exemplo n.º 17
0
def resample_network(A, n_perm, e_f, type_to_remove, final_electrodes):

    # type to remove : 1 for white matter, 0 for grey matter

    # e_f : Following Erin + John convention. This is the fraction of nodes to
    # keep in the network (ex.. ef=.2 means we remove 80% of the nodes)
    e_f = np.array(e_f)
    nch = A.shape[0]
    n_f = e_f.shape[0]

    # create sub dataframes for only the grey and white matter elec
    wm = final_electrodes[final_electrodes.iloc[:, 2] > 0]
    gm = final_electrodes[final_electrodes.iloc[:, 2] == 0]

    # numbers of each electrode type
    numWhite = wm.shape[0]
    numGrey = gm.shape[0]

    # fraction to remove
    if (type_to_remove == 1):
        e_n = numWhite - np.ceil(e_f * numWhite)
    else:
        e_n = numGrey - np.ceil(e_f * numGrey)

    # control centrality
    all_c_c = np.zeros((nch, n_f, n_perm))
    all_c_c[:] = np.nan
    cc_reg = np.zeros((nch, n_f, n_perm))
    cc_reg[:] = np.nan

    all_cc_norm = np.zeros((nch, n_f, n_perm))
    all_cc_norm[:] = np.nan

    #init node strengths
    all_ns = np.zeros((nch, n_f, n_perm))
    all_ns[:] = np.nan

    all_ns_norm = np.zeros((nch, n_f, n_perm))
    all_ns_norm[:] = np.nan

    # init betweenness centrality
    all_bc = np.zeros((nch, n_f, n_perm))
    all_bc[:] = np.nan

    all_bc_norm = np.zeros((nch, n_f, n_perm))
    all_bc_norm[:] = np.nan

    # synch
    all_sync = np.zeros((n_f, n_perm))
    all_sync[:] = np.nan

    all_sync_norm = np.zeros((n_f, n_perm))
    all_sync_norm[:] = np.nan

    # efficiency
    all_eff = np.zeros((n_f, n_perm))
    all_eff[:] = np.nan

    all_eff_norm = np.zeros((n_f, n_perm))
    all_eff_norm[:] = np.nan

    # eigenvector centrality
    all_ec = np.zeros((nch, n_f, n_perm))
    all_ec[:] = np.nan

    all_ec_norm = np.zeros((nch, n_f, n_perm))
    all_ec_norm[:] = np.nan

    # clustering coeff
    all_clust = np.zeros((nch, n_f, n_perm))
    all_clust[:] = np.nan

    all_clust_norm = np.zeros((nch, n_f, n_perm))
    all_clust_norm[:] = np.nan

    # participation coeff
    all_par = np.zeros((nch, n_f, n_perm))
    all_par[:] = np.nan

    # transistivity
    all_trans = np.zeros((n_f, n_perm))
    all_trans[:] = np.nan

    all_trans_norm = np.zeros((n_f, n_perm))
    all_trans_norm[:] = np.nan

    # edge bc
    all_edge_bc = []
    all_edge_bc_norm = []

    # get true particpation
    Ci, ignore = bct.modularity_und(A)
    true_par = bct.participation_coef(A, Ci)
    avg_par_removed = np.zeros((n_f, n_perm))
    avg_par_removed[:] = np.nan

    # get the true bc
    true_bc = betweenness_centrality(A)
    avg_bc_removed = np.zeros((n_f, n_perm))
    avg_bc_removed[:] = np.nan

    # loop over all removal fractions and permutations
    for f in range(0, n_f):
        all_edge_bc_cur_fraction = []
        all_edge_bc_norm_cur_fraction = []
        for i_p in range(0, n_perm):

            if (i_p % 100 == 0):
                print(
                    "Doing permutation {0} for removal of fraction {1}".format(
                        i_p, f))

            # make a copy of the adjacency matrix (we will edit this each time)
            A_tmp = A.copy()

            # picking the nodes to remove
            if (type_to_remove == 1):
                to_remove = wm.sample(int(e_n[f])).iloc[:, 0]
            else:
                to_remove = gm.sample(int(e_n[f])).iloc[:, 0]

            # take these electrodes out of the adjacency matrix
            A_tmp = np.delete(A_tmp, to_remove, axis=0)
            A_tmp = np.delete(A_tmp, to_remove, axis=1)

            # create a new array to hold the identity of the channels
            ch_ids = np.arange(0, nch)
            ch_ids = np.delete(ch_ids, to_remove)

            # get the new metrics from A_tmp
            r = get_true_network_metrics(A_tmp)

            # edge metric
            all_edge_bc_cur_fraction.append(r['edge_bc'])
            all_edge_bc_norm_cur_fraction.append(r['edge_bc_norm'])
            # populate the nodal measures
            for i in range(0, ch_ids.shape[0]):
                all_c_c[ch_ids[i], f, i_p] = r['control_centrality'][i]
                all_ns[ch_ids[i], f, i_p] = r['ns'][i]
                all_bc[ch_ids[i], f, i_p] = r['bc'][i]
                all_par[ch_ids[i], f, i_p] = r['par'][i]
                all_ec[ch_ids[i], f, i_p] = r['ec'][i]
                all_clust[ch_ids[i], f, i_p] = r['clust'][i]

                all_cc_norm[ch_ids[i], f,
                            i_p] = r['control_centrality_norm'][i]
                all_ns_norm[ch_ids[i], f, i_p] = r['ns_norm'][i]
                all_bc_norm[ch_ids[i], f, i_p] = r['bc_norm'][i]
                all_ec_norm[ch_ids[i], f, i_p] = r['ec_norm'][i]
                all_clust_norm[ch_ids[i], f, i_p] = r['clust_norm'][i]

            # populate the global measures
            all_sync[f, i_p] = r['sync']
            all_sync_norm[f, i_p] = r['sync_norm']

            all_eff[f, i_p] = r['eff']
            all_eff_norm[f, i_p] = r['eff_norm']

            all_trans[f, i_p] = r['trans']
            all_trans_norm[f, i_p] = r['trans_norm']

        all_edge_bc.append(all_edge_bc_cur_fraction)
        all_edge_bc_norm.append(all_edge_bc_norm_cur_fraction)

    # construct the output dictionary from a resampling

    #nodal
    results = {}
    results['control_centrality'] = all_c_c
    results['control_centrality_norm'] = all_cc_norm

    # global measure
    results['sync'] = all_sync
    results['sync_norm'] = all_sync_norm

    # nodal
    results['bc'] = all_bc
    results['bc_norm'] = all_bc_norm

    # nodal
    results['ec'] = all_ec
    results['ec_norm'] = all_ec_norm

    # nodal
    results['clust'] = all_clust
    results['clust_norm'] = all_clust_norm

    # nodal
    results['ns'] = all_ns
    results['ns_norm'] = all_ns_norm

    # global
    results['eff'] = all_eff
    results['eff_norm'] = all_eff_norm

    # global
    results['trans'] = all_trans
    results['trans_norm'] = all_trans_norm

    # nodal
    results['par'] = all_par

    #edge
    results['edge_bc'] = all_edge_bc
    results['edge_bc_norm'] = all_edge_bc_norm

    return (results)
Exemplo n.º 18
0
threshVal = 0.0
threshVal = input("Enter threshold: ")

#testDir = '/Users/heiland/Documents/Heiland/BioVis14/contest_subject_networks/'
#fname = 'U050E1IO_adjacency_matrix_pcc.txt'
fname = '1X4I9DF0_adjacency_matrix_pcc.txt'
#d = np.genfromtxt(testDir + fname, delimiter='\t')
try:
    A = np.genfromtxt(fname, delimiter='\t')
except:
    print "Error opening " + fname

bct.threshold_absolute(A, threshVal)

# Calculate the network's modularity
[Ci, Q] = bct.modularity_und(A)

print Ci
for i in range(len(Ci)):
    for j in range(i + 1, len(Ci)):
        #    print i,j
        if Ci[i] > Ci[j]:
            temp = Ci[i]
            Ci[i] = Ci[j]
            Ci[j] = temp
            print str(i) + ' <-> ' + str(j)
            A[[i, j]] = A[[j, i]]  # swap rows
            A[:, [i, j]] = A[:, [j, i]]  # swap cols
print Ci

#Ci=array([1, 1, 3, 3, 1, 3, 3, 3, 1, 3, 3, 1, 1, 1, 1, 1, 3, 3, 4, 3, 1, 3, 3, ...
Exemplo n.º 19
0
root_fname = sys.argv[1]
#nw_fname = sys.argv[2]
nw_fname = "1X4I9DF0_adjacency_matrix_pcc.txt"
threshold = float(sys.argv[2])
print 'threshold=',threshold

adjMtx = np.genfromtxt(nw_fname, delimiter='\t')

"""Rubinov,Sporns 2010:  all self-connections or negative connections
(such as functional anticorrelations) must currently be removed
from the networks prior to analysis"""

bct.threshold_absolute(adjMtx,threshold)

# NB! be sure to put before masking the matrix
[Ci,Q]=bct.modularity_und(adjMtx)

roi_legend = 'roi_legend.txt'
try:
  fp_roi =open(roi_legend, 'r')
except:
  print("Error opening " + roi_legend)

# Create a dictionary mapping ID --> RGB
vc = {}  # vertex color dictionary
count = 0
# colors (rgb) for modules (Q: what's the max # of modules we'll have??)
# rf. http://www.rapidtables.com/web/color/RGB_Color.htm
# Red,Green,Yellow,Blue,...
module_rgb_dict = {1:'255 0 0', 2: '0 255 0', 3: '255 255 0', 4: '0 0 255', 5: '255 0 255', 6: '51 255 255'}
count = 0
Exemplo n.º 20
0
def graph_estimates(cm, th):

    #dictionary for storing our results
    d = OrderedDict()

    #thresholding moved here for other matrices than MatLab matrices
    #removes negative weights
    cm = bct.threshold_absolute(cm, 0.0)

    cm = threshold_connected(cm, th)

    
    #for binarizing the connectivity matrices, 
    #we work with weighted so this is turned off
    #bin_cm = bct.binarize(cm)
    
    #invert the connectivity for computing shortest paths
    cm_inv = bct.invert(cm)

    #modularity_und is found in modularity.py
    modularity_und = bct.modularity_und(cm)

    #the community_affiliation vector that gets input to some of the functions
    community_affiliation = modularity_und[0]
    
    #distance_wei and charpath is found in distance.py
    distance_wei = bct.distance_wei(cm_inv)
    charpath = bct.charpath(distance_wei[0], False, False)

    #clustering_coef_wu is found in clustering.py
    clustering_coef_wu = bct.clustering_coef_wu(cm)
    avg_clustering_coef_wu = np.mean(clustering_coef_wu)


    #assortativity_wei is found in core.py
    d['assortativity_wei-r'] = bct.assortativity_wei(cm, flag=0)

    #just taking the average of clustering_coef_wu
    d['avg_clustering_coef_wu:C'] = avg_clustering_coef_wu

    d['charpath-lambda'] = charpath[0]
    #d['charpath-efficiency'] = charpath[1]   
    #d['charpath-ecc'] = charpath[2]           
    #d['charpath-radius'] = charpath[3]
    #d['charpath-diameter'] = charpath[4]

    d['clustering_coef_wu-C'] = clustering_coef_wu


    d['efficiency_wei-Eglob'] = bct.efficiency_wei(cm)
    #d['efficiency_wei-Eloc'] = bct.efficiency_wei(cm, True)

    #d['modularity_und-ci'] = modularity_und[0]
    d['modularity_und-Q'] = modularity_und[1]

    d['small_worldness:S'] = compute_small_worldness(cm,
                                                     avg_clustering_coef_wu,
                                                     charpath[0])

   
   #transitivity_wu can be found in clustering.py
    d['transitivity_wu-T'] = bct.transitivity_wu(cm)


    #EXAMPLES for local measures and binary measures. Comment in to use. 

    #VECTOR MEASURES
    #d['betweenness_wei-BC'] = bct.betweenness_wei(cm_inv)
    # d['module_degree_zscore-Z'] = bct.module_degree_zscore(cm, community_affiliation)
    #d['degrees_und-deg'] = bct.degrees_und(cm)
    #d['charpath-ecc'] = charpath[2]


    #BINARIES
    # d['clustering_coef_bu-C'] = bct.clustering_coef_bu(bin_cm)
    # d['efficiency_bin-Eglob'] = bct.efficiency_bin(bin_cm)
    # d['efficiency_bin-Eloc'] = bct.efficiency_bin(bin_cm, True)
    #  d['modularity_und_bin-ci'] = modularity_und_bin[0]
    #  d['modularity_und_bin-Q'] = modularity_und_bin[1]
    # d['transitivity_bu-T'] = bct.transitivity_bu(bin_cm)
    #  d['betweenness_bin-BC'] = bct.betweenness_bin(bin_cm)
    #  modularity_und_bin = bct.modularity_und(bin_cm)
    #d['participation_coef'] = bct.participation_coef(cm, community_affiliation)


    ######## charpath giving problems with ecc, radius and diameter
    # np.seterr(invalid='ignore')


    return d
Exemplo n.º 21
0
def entropic_similarity(a1, a2):
    ma, _ = bct.modularity_und(a1)
    mb, _ = bct.modularity_und(a2)

    vi, _ = bct.partition_distance(ma, mb)
    return 1 - vi
Exemplo n.º 22
0
#connected component:
# G = nx.erdos_renyi_graph(100, 0.03, seed=seed)
# Adj = np.asarray(nx.to_numpy_matrix(G))
# a = bct.get_components(Adj)
# print a[0], len(a[0]) #component number
# print a[1], len(a[1]) #vector of component sizes

# community detection:
# ml = bct.community_louvain(Adj, 1, B='modularity')  # gamma=1 for 4 cluster
# print ml[0] #communities
# print ml[1] #modularity
# print max(ml[0]) #n_community
# print bct.transitivity_bu(Adj)
# print np.mean(bct.clustering_coef_bu(Adj))

mod = bct.modularity_und(Adj, gamma=1)
# print mod[0]
# print mod[1]  # maximized modularity metric

#shortest path
# dist = bct.distance_bin(Adj)
# np.savetxt('distance.txt', dist, fmt='%d')
#characteristic path length
# ch = bct.charpath(dist)
# print  ch[0] # lambda
# print ch[1] #efficiency
# # print ch[2] #ecc
# print ch[3] #radius
# print ch[4] #diameter

# ecc = np.max(dist, axis=1)
def process(data):
    ci, q = bct.modularity_und(data, gamma=1)
    return ci  # TODO: report q?
Exemplo n.º 24
0
def test_modularity_und():
    x = load_sample(thres=.4)
    _, q = bct.modularity_und(x)
    print(q)
    assert np.allclose(q, 0.24097717)
Exemplo n.º 25
0
root_fname = sys.argv[1]
#nw_fname = sys.argv[2]
nw_fname = "1X4I9DF0_adjacency_matrix_pcc.txt"
threshold = float(sys.argv[2])
print 'threshold=', threshold

adjMtx = np.genfromtxt(nw_fname, delimiter='\t')
"""Rubinov,Sporns 2010:  all self-connections or negative connections
(such as functional anticorrelations) must currently be removed
from the networks prior to analysis"""

bct.threshold_absolute(adjMtx, threshold)

# NB! be sure to put before masking the matrix
[Ci, Q] = bct.modularity_und(adjMtx)

roi_legend = 'roi_legend.txt'
try:
    fp_roi = open(roi_legend, 'r')
except:
    print("Error opening " + roi_legend)

# Create a dictionary mapping ID --> RGB
vc = {}  # vertex color dictionary
count = 0
# colors (rgb) for modules (Q: what's the max # of modules we'll have??)
# rf. http://www.rapidtables.com/web/color/RGB_Color.htm
# Red,Green,Yellow,Blue,...
module_rgb_dict = {
    1: '255 0 0',
Exemplo n.º 26
0
def entropic_similarity(a1, a2):
    ma, _ = bct.modularity_und(a1)
    mb, _ = bct.modularity_und(a2)

    vi, _ = bct.partition_distance(ma, mb)
    return 1 - vi
Exemplo n.º 27
0
threshVal=0.0
threshVal=input("Enter threshold: ")

#testDir = '/Users/heiland/Documents/Heiland/BioVis14/contest_subject_networks/'
#fname = 'U050E1IO_adjacency_matrix_pcc.txt'
fname = '1X4I9DF0_adjacency_matrix_pcc.txt'
#d = np.genfromtxt(testDir + fname, delimiter='\t')
try:
  A = np.genfromtxt(fname, delimiter='\t')
except:
  print "Error opening " + fname

bct.threshold_absolute(A,threshVal)

# Calculate the network's modularity
[Ci,Q]=bct.modularity_und(A)

print Ci
for i in range(len(Ci)):
  for j in range(i+1,len(Ci)):
#    print i,j
    if Ci[i] > Ci[j]:
      temp = Ci[i]
      Ci[i] = Ci[j]
      Ci[j] = temp
      print str(i) + ' <-> ' + str(j)
      A[[i,j]] = A[[j,i]]  # swap rows
      A[:,[i,j]] = A[:,[j,i]]  # swap cols
print Ci

#Ci=array([1, 1, 3, 3, 1, 3, 3, 3, 1, 3, 3, 1, 1, 1, 1, 1, 3, 3, 4, 3, 1, 3, 3, ...
Exemplo n.º 28
0
def get_true_network_metrics(A):

    #control centrality
    c_c = control_centrality(A)

    cc_fake = np.zeros((100, 1))
    for i in range(0, 100):
        cc_fake[i] = np.mean(control_centrality(generate_fake_graph(A)))

    m_cc_fake = np.mean(cc_fake)
    cc_norm = c_c / m_cc_fake

    # Get identity of node with lowest control centrality
    min_cc_true = np.where(c_c == np.amin(c_c))[0]

    # get synchronizability
    sync = synchronizability(A)

    # normalized sync
    sync_fake = np.zeros((100, 1))
    for i in range(0, 100):
        sync_fake[i] = synchronizability(generate_fake_graph(A))

    m_sync_fake = np.mean(sync_fake)
    sync_norm = sync / m_sync_fake

    # get betweeness centrality
    bc = betweenness_centrality(A)
    bc_fake = np.zeros((100, 1))
    for i in range(0, 100):
        bc_fake[i] = np.mean(betweenness_centrality(generate_fake_graph(A)))

    m_bc_fake = np.mean(bc_fake)
    bc_norm = bc / m_bc_fake

    # Get identity of node with max bc
    max_bc_true = np.where(bc == np.amax(bc))[0]

    # get eigenvector centrality
    ec = bct.eigenvector_centrality_und(A)
    ec_fake = np.zeros((100, 1))
    for i in range(0, 100):
        ec_fake[i] = np.mean(
            bct.eigenvector_centrality_und(generate_fake_graph(A)))

    m_ec_fake = np.mean(ec_fake)
    ec_norm = ec / m_ec_fake

    # Get identity of node with max ec
    max_ec_true = np.where(ec == np.amax(ec))[0]

    # get edge betweeness centrality
    edge_bc, ignore = bct.edge_betweenness_wei(A)
    edge_bc_fake = np.zeros((100, 1))
    for i in range(0, 100):
        edge_bc_fake[i] = np.mean(
            bct.edge_betweenness_wei(generate_fake_graph(A))[0])
    m_edge_bc_fake = np.mean(edge_bc_fake)
    edge_bc_norm = edge_bc / m_edge_bc_fake

    # get clustering coeff
    clust = bct.clustering_coef_wu(A)
    clust_fake = np.zeros((100, 1))
    for i in range(0, 100):
        clust_fake[i] = np.mean(bct.clustering_coef_wu(generate_fake_graph(A)))

    m_clust_fake = np.mean(clust_fake)
    clust_norm = clust / m_clust_fake

    # Get identity of node with max clust
    max_clust_true = np.where(clust == np.amax(clust))[0]

    # get node strength
    ns = node_strength(A)
    ns_fake = np.zeros((100, 1))
    for i in range(0, 100):
        ns_fake[i] = np.mean(node_strength(generate_fake_graph(A)))

    m_ns_fake = np.mean(ns_fake)
    ns_norm = ns / m_ns_fake

    # Get identity of node with max clust
    max_ns_true = np.where(ns == np.amax(ns))[0]

    #Get true efficiency
    Ci, ignore = bct.modularity_und(A)
    par = bct.participation_coef(A, Ci)

    eff = bct.efficiency_wei(A, 0)
    eff_fake = np.zeros((100, 1))
    for i in range(0, 100):
        eff_fake[i] = (bct.efficiency_wei(generate_fake_graph(A)))

    m_eff_fake = np.mean(eff_fake)
    eff_norm = eff / m_eff_fake

    # Get true transistivity
    trans = bct.transitivity_wu(A)
    trans_fake = np.zeros((100, 1))
    for i in range(0, 100):
        trans_fake[i] = (bct.transitivity_wu(generate_fake_graph(A)))

    m_trans_fake = np.mean(trans_fake)
    trans_norm = trans / m_trans_fake

    # store output results in a dictionary
    #nodal
    results = {}
    results['control_centrality'] = c_c
    results['control_centrality_norm'] = cc_norm
    results['min_cc_node'] = min_cc_true

    # global measure
    results['sync'] = sync
    results['sync_norm'] = sync_norm

    # nodal
    results['bc'] = bc
    results['bc_norm'] = bc_norm
    results['max_bc_node'] = max_bc_true

    # nodal
    results['ec'] = ec
    results['ec_norm'] = ec_norm
    results['max_ec_node'] = max_ec_true

    # nodal
    results['clust'] = clust
    results['clust_norm'] = clust_norm
    results['max_clust_node'] = max_clust_true

    # nodal
    results['ns'] = ns
    results['ns_norm'] = ns_norm
    results['max_ns_node'] = max_ns_true

    # global
    results['eff'] = eff
    results['eff_norm'] = eff_norm

    # global
    results['trans'] = trans
    results['trans_norm'] = trans_norm

    # nodal
    results['par'] = par

    # edge
    results['edge_bc'] = edge_bc
    results['edge_bc_norm'] = edge_bc_norm

    return (results)
Exemplo n.º 29
0
def comodularity_und(a1, a2):
    '''
    Returns the comodularity, an experimental measure I am developing.
    The comodularity evaluates the correspondence between two community
    structures A and B.  Let F be the set of nodes that are co-modular (in the
    same module) in at least one of these community structures.  Let f be the
    set of nodes that are co-modular in both of these community structures.
    The comodularity is |f|/|F|

    This is actually very similar to the Jaccard index which turns out not
    to be a terribly useful property. At high similarity a the variability
    of information is better. It may be that the degenerate cross modularity
    is even better though.
    '''

    ma, qa = bct.modularity_und(a1)
    mb, qb = bct.modularity_und(a2)

    n = len(ma)
    if len(mb) != n:
        raise bct.BCTParamError('Comodularity must be done on equally sized '
                                'matrices')

    E, F, f, G, g, H, h = (0, ) * 7

    for e1 in xrange(n):
        for e2 in xrange(n):
            if e2 >= e1:
                continue

            # node pairs
            comod_a = ma[e1] == ma[e2]
            comod_b = mb[e1] == mb[e2]

            # node pairs sharing a module in at least one graph
            if comod_a or comod_b:
                F += 1
            # node pairs sharing a module in both graphs
            if comod_a and comod_b:
                f += 1

            # edges in either graph common to any module
            if a1[e1, e2] != 0 or a2[e1, e2] != 0:
                # edges that exist in at least one graph which prepend a shared
                # module in at least one graph:
                # EXTREMELY NOT USEFUL SINCE THE SHARED MODULE MIGHT BE THE OTHER
                # GRAPH WITH NO EDGE!
                if comod_a or comod_b:
                    G += 1
                # edges that exist in at least one graph which prepend a shared
                # module in both graphs:
                if comod_a and comod_b:
                    g += 1

                # edges that exist at all
                E += 1

            # edges common to a module in both graphs
            if a1[e1, e2] != 0 and a2[e1, e2] != 0:
                # edges that exist in both graphs which prepend a shared module
                # in at least one graph
                if comod_a or comod_b:
                    H += 1
                # edges that exist in both graphs which prepend a shared module
                # in both graphs
                if comod_a and comod_b:
                    h += 1

    m1 = np.max(ma)
    m2 = np.max(mb)
    P = m1 + m2 - 1

    # print f,F
    print m1, m2
    print 'f/F', f / F
    print '(f/F)*p', f * P / F
    print 'g/E', g / E
    print '(g/E)*p', g * P / E
    print 'h/E', h / E
    print '(h/E)*p', h * P / E
    print 'h/H', h / H
    print '(h/H)*p', h * P / E
    print 'q1, q2', qa, qb
    # print 'f/F*sqrt(qa*qb)', f*np.sqrt(qa*qb)/F
    return f / F