예제 #1
0
def actorAssortativity(graph):
    aaGraph = actorActorGraph(graph)
    raceAssortativity = nx.attribute_assortativity_coefficient(aaGraph, 'race')
    blackWhiteAAGraph = actorActorGraph(getBlackWhiteGraph(graph))
    blackWhiteAssortativity = nx.attribute_assortativity_coefficient(
        blackWhiteAAGraph, 'race')
    genderAssortativity = nx.attribute_assortativity_coefficient(
        aaGraph, 'gender')
    return (raceAssortativity, blackWhiteAssortativity, genderAssortativity)
예제 #2
0
def intra_organizational():
    edges = []
    with open('1_3.txt') as f:
        for line in f:
            a = line.split()
            edges.append((int(a[0]), int(a[1])))
    G1 = nx.Graph()
    G1.add_edges_from(edges)
    with open('gender.txt') as f:
        for line in f:
            a = line.split()
            b = []
            for item in a:
                b.append('male' if item == '1' else 'female')
            gender = dict(zip(range(1, len(G1) + 1), b))
            nx.set_node_attributes(G1, gender, 'att1')

    with open('gender.txt') as f:
        for line in f:
            a = line.split()
            b = []
            for item in a:
                b.append('male' if item == '2' else 'female')
            gender = dict(zip(range(1, len(G1) + 1), b))
            nx.set_node_attributes(G1, gender, 'att2')

    with open('gender.txt') as f:
        for line in f:
            a = line.split()
            b = []
            for item in a:
                b.append('male' if item == '3' else 'female')
            gender = dict(zip(range(1, len(G1) + 1), b))
            nx.set_node_attributes(G1, gender, 'att3')
    with open('gender.txt') as f:
        for line in f:
            a = line.split()
            b = []
            for item in a:
                b.append('male' if item == '4' else 'female')
            gender = dict(zip(range(1, len(G1) + 1), b))
            nx.set_node_attributes(G1, gender, 'att4')

    nx.write_gpickle(G1, '2.gpickle')

    print(nx.attribute_assortativity_coefficient(G1, 'att1'))
    print(nx.attribute_assortativity_coefficient(G1, 'att2'))
    print(nx.attribute_assortativity_coefficient(G1, 'att3'))
    print(nx.attribute_assortativity_coefficient(G1, 'att4'))
예제 #3
0
파일: analyze.py 프로젝트: ml-lab/NovelNets
def analyze_assortativity(G):
    # Degree Assortativity
    result = nx.degree_assortativity_coefficient(G, weight="weight")
    print("Degree Assortativity Assorativity: {}".format(result))
    print()

    # Gender Associativity
    result = nx.attribute_assortativity_coefficient(G, "gender")
    print("Gender Assortativity Assorativity: {}".format(result))
    print()

    # Association Associativity
    result = nx.attribute_assortativity_coefficient(G, "association")
    print("Association Assortativity Assorativity: {}".format(result))
    print()
def analytic_exponent(G,maj_fraction):

    # a is majorities and b is minorities
    assortativity = nx.attribute_assortativity_coefficient(G,'gender')
    daa = 0.5*(assortativity+1)
    dab = 1 - daa

    pa = maj_fraction
    pb = 1- pa

    A = -1
    B = 2 + 2*pa*daa - pa*dab - dab*pb + 2*dab 
    C = -4*pa*daa + 2*pa*dab -4*dab - 2*daa*dab*pa + 2 * (dab**2)*pb
    D = +4*daa*dab*pa
    
    p = [A,B,C,D]
    ca = np.roots(p)[1]

    qa = (daa*pa*(2-ca) + ca *dab*pb) / (ca*(2-ca))
    qb = 1 - (pb*(1-qa))/(2-2*qa-pa)

    exponent_a = (1+(1./qa))
    exponent_b = (1+(1./qb))

    return exponent_a , exponent_b
예제 #5
0
def getAssortativityCoefficient(outputDir, scenarioName, seed, ageLim):
    G = nx.Graph()
    householdFile = os.path.join(outputDir, scenarioName + "_" + str(seed),
                                 "households.csv")
    with open(householdFile) as csvfile:
        reader = csv.DictReader(csvfile)
        personID = 0
        for row in reader:
            personsImmunity = (row["susceptible"])[1:-1].split(", ")
            personsImmunity = [int(x) for x in personsImmunity]
            personAges = (row["ages"])[1:-1].split(", ")
            personAges = [int(x) for x in personAges]
            personIDs = []
            for p_i in range(len(personsImmunity)):
                # Add person node to graph
                if personAges[p_i] < ageLim:
                    G.add_node(personID, susceptible=personsImmunity[p_i])
                    personIDs.append(personID)
                    personID += 1
            for p1 in personIDs:
                for p2 in personIDs:
                    G.add_edge(p1, p2)
    assortativityCoeff = nx.attribute_assortativity_coefficient(
        G, "susceptible")
    return assortativityCoeff
def create_2colors_per_comm(G,layer1,layer2,layer3,slayer1,slayer2,pos,fig,subp=122):
    sl11=random.sample(slayer1,len(slayer1)/2)
    sl12=list(set(slayer1)-set(sl11))
    # print G.nodes(data=True)
    for i in sl11:
        G.add_node(i,color='y',attr_dict=G.node[i],fattr='1')
    for i in sl12:
        G.add_node(i,color='grey',attr_dict=G.node[i],fattr='2')
    sl21=random.sample(slayer2,len(slayer2)/2)
    sl22=list(set(slayer2)-set(sl21))
    for i in sl21:
        G.add_node(i,color='m',attr_dict=G.node[i],fattr='3')
    for i in sl22:
        G.add_node(i,color='c',attr_dict=G.node[i],fattr='4')
    fig.add_subplot(subp)
    # for i in G.nodes():
    #     if i in la
    # for i in G.nodes(data=True):
    #     print i
    rr= nx.attribute_assortativity_coefficient(G,'fattr')
    s_title='Discrete vertex attributes\nAssortativity_coef(4_attibutes) = %.2f' %rr
    plt.title(s_title)#,{'size': '20'})
    nodecolor=[i[1]['color'] for i in G.nodes(data=True)]
    nx.draw_networkx_nodes(G,pos=pos,node_color=nodecolor,alpha=0.3)
    # nx.draw_networkx_nodes(G,pos=pos,nodelist=list(sets[1]),node_color='gold')
    nx.draw_networkx_labels(G,pos=pos)
    nx.draw_networkx_edges(G,pos=pos,alpha=0.2)


    plt.axis("off")
예제 #7
0
def plot_initial_bgraph(G, subp=121):
    fig = plt.figure(num=1, figsize=(16, 12))
    fig.add_subplot(subp)
    sets = bipartite.sets(G)
    pos = {}
    for i, v in enumerate(sets[0]):
        pos[v] = (0., i)
    for i, v in enumerate(sets[1]):
        pos[v] = (1, i)

    rr = nx.attribute_assortativity_coefficient(G, 'bipartite')
    s_title = 'Bipartite Graph\nAssortativity_coef(bipartition) = %.2f' % rr
    plt.title(s_title)  #,{'size': '20'})
    nx.draw_networkx_nodes(G,
                           pos=pos,
                           nodelist=list(sets[0]),
                           node_color='grey',
                           alpha=0.3)
    nx.draw_networkx_nodes(G,
                           pos=pos,
                           nodelist=list(sets[1]),
                           node_color='gold')
    nx.draw_networkx_labels(G, pos=pos)
    nx.draw_networkx_edges(G, pos=pos, alpha=0.2)
    plt.axis("off")
    # plt.show()
    return pos, fig
예제 #8
0
def create_colors_per_comm(G,
                           layer1,
                           layer2,
                           layer3,
                           slayer1,
                           slayer2,
                           pos,
                           fig,
                           subp=(122)):
    for i in layer1:
        G.add_node(i, color='r', attr_dict=G.node[i], best_partition_comm='1')
    for i in layer2:
        G.add_node(i, color='g', attr_dict=G.node[i], best_partition_comm='2')
    for i in layer3:
        G.add_node(i, color='b', attr_dict=G.node[i], best_partition_comm='3')
    fig.add_subplot(subp)
    # for i in G.nodes(data=True):
    #     print i
    # rr=nx.attribute_assortativity_coefficient(G,'color')
    rra = nx.attribute_assortativity_coefficient(G, 'best_partition_comm')
    s_title = 'Community Partition\nAssortativity_coef(3_communities) = %.2f ' % (
        rra)
    plt.title(s_title)  #,{'size': '20'})
    nodecolor = [i[1]['color'] for i in G.nodes(data=True)]
    nx.draw_networkx_nodes(G, pos=pos, node_color=nodecolor, alpha=0.3)
    # nx.draw_networkx_nodes(G,pos=pos,nodelist=list(sets[1]),node_color='gold')
    nx.draw_networkx_labels(G, pos=pos)
    nx.draw_networkx_edges(G, pos=pos, alpha=0.2)

    plt.axis("off")
예제 #9
0
def batch_synthetic_generator_runner():
    # frac = np.linspace(0, 1, 21, endpoint=True) * 100
    frac = np.linspace(0, 100, 11, endpoint=True,
                       dtype=int)  # change it to increments of 10 for now
    names = [f'toy-comm-{f}' for f in frac]
    # names = ['karate', 'football', 'polbooks', 'eucore', 'flights', 'chess', 'polblogs']
    num_graphs = 5
    outdir = '/data/ssikdar/attributed-vrg/dumps'
    use_pickle = True
    save_snapshots = False
    shuffle = 'edges'

    args = []
    for name in names:
        # input_graph, attr_name = get_graph(name)
        input_graph, attr_name = nx.read_gexf(
            f'./input/shuffled/{shuffle}/{name}.gexf', node_type=int), 'block'
        name = f'{name}-{shuffle}'
        if attr_name == '':
            mix_dict, inp_deg_ast, inp_attr_ast = None, None, None
        else:
            mix_dict = get_mixing_dict(input_graph, attr_name=attr_name)
            inp_deg_ast = nx.degree_assortativity_coefficient(input_graph)
            inp_attr_ast = nx.attribute_assortativity_coefficient(
                input_graph, attr_name)

        for grammar_filename in glob(f'{outdir}/grammars/{name}/*'):
            grammar = load_pickle(grammar_filename)
            if isinstance(grammar, AttributedVRG):
                grammar_type = 'AVRG'
                fancy = True
                args.append((name, grammar, num_graphs, grammar_type, outdir,
                             mix_dict, attr_name, fancy, inp_deg_ast,
                             inp_attr_ast, use_pickle, save_snapshots))

                grammar_type = 'AVRG-greedy'
                # args.append((name, grammar, num_graphs, grammar_type, outdir, mix_dict, attr_name, fancy,
                #              inp_deg_ast, inp_attr_ast, use_pickle, save_snapshots))
                for alpha in (0, 0.5, 1):
                    args.append(
                        (name, grammar, num_graphs, grammar_type, outdir,
                         mix_dict, attr_name, fancy, inp_deg_ast, inp_attr_ast,
                         use_pickle, save_snapshots, alpha))
            else:
                assert isinstance(grammar, VRG)
                grammar_type = 'VRG'
                fancy = None
                args.append((name, grammar, num_graphs, grammar_type, outdir,
                             mix_dict, attr_name, fancy, inp_deg_ast,
                             inp_attr_ast, use_pickle, save_snapshots))

    parallel_async(func=generate_graphs, args=args, num_workers=10)
    # generate_graphs(grammar: Union[VRG, NCE, AttributedVRG], num_graphs: int, grammar_type: str, outdir: str = 'dumps',
    #                 mixing_dict: Union[None, Dict] = None, attr_name: Union[str, None] = None, fancy = None,
    #                 inp_deg_ast: float = None, inp_attr_ast: float = None)

    return
예제 #10
0
def my_attribute_assortativity_coefficient(G, feature):
    '''
    GOAL:
    ----------------------------------------------
    INPUT:
    - 'G' is an instantiated of a networkX graph
    - 'feature' is a string of an attribute/feature you want to explore
    OUTPUT: the assortativity coefficient for a specific attribute
    '''
    return nx.attribute_assortativity_coefficient(G, feature)
예제 #11
0
def get_assorts(G, labels):
    assorts = []

    for label in labels:
        assorts.append(nx.attribute_assortativity_coefficient(G, label))
        print(label, ': ', assorts[-1])

    assorts.append(nx.degree_assortativity_coefficient(G))

    return assorts
예제 #12
0
def assortativity(G2):
    for v in G2.nodes:
        if v in group:
            G2.nodes[v]["subgroup"] = 1
        else:
            G2.nodes[v]["subgroup"] = 0
    assortativity_G2 = nx.degree_assortativity_coefficient(G2)
    print("Degree Assortativity:")
    print(assortativity_G2)
    attrassort_G2 = nx.attribute_assortativity_coefficient(G2, "subgroup")
    print("Subgroup Assortativity:")
    print(attrassort_G2)
    def assortativitySummary(self, attributes_to_analyse=None):
        '''
        Retorna um dicionário coma  assortatividade para cada atributo da rede
        '''
        if(attributes_to_analyse is None):
            attributes_to_analyse = self.getNodeAttributeNames()

        attribute_assortativity = {}
        for attribute in attributes_to_analyse:
            assortativity = nx.attribute_assortativity_coefficient(self.graph, attribute)
            attribute_assortativity[attribute] = assortativity

        return attribute_assortativity
예제 #14
0
def attribute_info():
    c_list, m_list, p_list = util.get_lists()
    cmp_list = util.append_arrays(c_list, m_list, p_list)

    interaction_types = ['mentions', 'replies', 'retweets']
    for interaction_type in interaction_types:
        edge_list = util.get_edge_list(interaction_type)
        g = create_graph_edge_weights(edge_list)
        cmp_g = create_graph_subset(g, cmp_list)
        add_types(cmp_g)
        print('{} Assortativity: '.format(interaction_type),
              nx.attribute_assortativity_coefficient(cmp_g, 'type'))
        print('{} Mixing: '.format(interaction_type),
              nx.attribute_mixing_dict(cmp_g, 'type', normalized=True))
예제 #15
0
def analyze_graph(graph):
    # https://networkx.github.io/documentation/latest/reference/algorithms/generated/networkx.algorithms.cluster.triangles.html
    # Triangles per nodes, we should analyse the average per graph
    triangles = np.average(list(nx.triangles(graph).values()))
    # https://networkx.github.io/documentation/latest/reference/algorithms/generated/networkx.algorithms.cluster.transitivity.html
    transitivity = nx.transitivity(graph)
    # https://networkx.github.io/documentation/latest/reference/algorithms/generated/networkx.algorithms.cluster.clustering.html
    # clustering = nx.clustering(graph, weight='weight').values()
    # https://networkx.github.io/documentation/latest/reference/algorithms/generated/networkx.algorithms.cluster.average_clustering.html
    average_clustering = nx.average_clustering(graph,
                                               weight='weight',
                                               count_zeros=False)
    # https://networkx.github.io/documentation/latest/reference/algorithms/generated/networkx.algorithms.bipartite.centrality.closeness_centrality.html
    closeness = nx.closeness_centrality(graph).values()
    # https://networkx.github.io/documentation/latest/reference/algorithms/generated/networkx.algorithms.bipartite.centrality.betweenness_centrality.html
    betweenness = nx.betweenness_centrality(graph).values()
    # https://networkx.github.io/documentation/latest/reference/algorithms/generated/networkx.algorithms.assortativity.degree_assortativity_coefficient.html
    homophily = nx.degree_assortativity_coefficient(graph, weight='weight')
    # https://networkx.github.io/documentation/networkx-1.9.1/reference/generated/networkx.algorithms.assortativity.attribute_assortativity_coefficient.html
    # Homophily by citations
    homophily_citations = nx.attribute_assortativity_coefficient(
        graph, 'citations')
    # Homophily by university
    homophily_university = nx.attribute_assortativity_coefficient(
        graph, 'university')

    return {
        'triangles': np.round(triangles, 2),
        'transitivity': transitivity,
        # 'clustering': clustering,
        'average_clustering': average_clustering,
        'closeness': list(closeness),
        'betweenness': list(betweenness),
        'homophily': homophily,
        'homophily_citations': homophily_citations,
        'homophily_university': homophily_university
    }
def update_weights_assortativity(pos,
                                 cands,
                                 G,
                                 hungerian,
                                 fitness,
                                 final_matched=[],
                                 cands2=[]):
    if cands2 == []: cands2 = cands
    asst = nx.attribute_assortativity_coefficient(G, 'att')
    if asst < 0:
        method = 2
    else:
        method = 1
    if cands2 == []: cands2 = cands
    gender = nx.get_node_attributes(G, 'att')
    scores = {}
    for u in pos:
        neigh = G[u]
        m = 0
        f = 0
        for v in neigh:
            if gender[v] == 'male':
                m += 1
            elif gender[v] == 'female':
                f += 1
        scores.update({u: ((m, f))})

    edges = []
    for u in pos:
        (m, f) = scores[u]

        for v in cands:
            if (v, u) in fitness:  # fitness[(v, u)] == 1:
                if (cands2[v] == 'male'
                        and method == 1) or (cands2[v] == 'female'
                                             and method == 2):
                    if f != 0 or hungerian:
                        edges.append((u, v, f))
                elif (cands2[v] == 'male'
                      and method == 2) or (cands2[v] == 'female'
                                           and method == 1):
                    if m != 0 or hungerian:
                        edges.append((u, v, m))

    edge_weights = {}
    for (u, v, s) in edges:
        edge_weights.update({(u, v): s})
    return edge_weights
예제 #17
0
def expAssortNew(bucket):
    bucket = str(bucket)
    folder = getFolder(bucket)
    r = [
    ]  #proportion of users in some list - experienced, in wiki, and so forth
    #slice=[]
    for file in os.listdir(folder):
        if file != '.DS_Store':
            path = folder + file
            G = getJsonNet(path)
            G = nx.Graph(G)
            if G.number_of_edges() > 0:
                r.append(nx.attribute_assortativity_coefficient(G, 'status'))
            else:
                r.append(-100)
    return r
예제 #18
0
def expAssort(bucket):
    bucket = str(bucket)
    folder = getExpFolder(bucket)
    r = [
    ]  #proportion of users in some list - experienced, in wiki, and so forth
    #slice=[]
    for file in os.listdir(folder):
        if file != '.DS_Store':
            G = nx.read_gpickle(folder + file)
            G = nx.Graph(G)
            if G.number_of_edges() > 0:
                r.append(
                    nx.attribute_assortativity_coefficient(G, 'experienced'))
            else:
                r.append(-100)
    return r
예제 #19
0
def batched_graphs_generator(basedir, clusterings, name, mus=None):
    # num_graphs = 5 if 'polblogs' in name else 10
    num_graphs = 10
    use_pickle = True
    save_snapshots = False
    attr_name = 'value'
    mus = [5]
    alpha = None
    input_graphs = read_batched_graphs(basedir=basedir, name=name)
    extract_types = ['mu_random']

    args = []
    for i, input_graph in enumerate(input_graphs):
        mix_dict = get_mixing_dict(input_graph, attr_name=attr_name)
        inp_deg_ast = nx.degree_assortativity_coefficient(input_graph)
        inp_attr_ast = nx.attribute_assortativity_coefficient(
            input_graph, attr_name)

        for grammar_filename in glob(
                f'{basedir}/output/grammars/{name}/*_{i}.pkl'):
            grammar = load_pickle(grammar_filename)
            if grammar.mu not in mus or grammar.clustering not in clusterings or grammar.extract_type not in extract_types:
                continue

            extract_type = grammar.extract_type.replace('_', '-')
            if isinstance(grammar, AttributedVRG):
                for gen_type, fancy in zip(('AVRG-regular', 'AVRG-fancy'),
                                           (False, True)):
                    graphs_filename = f'{basedir}/output/graphs/{name}/{gen_type}_{extract_type}_{grammar.clustering}_{grammar.mu}_{num_graphs}_{i}.pkl'
                    args.append((name, grammar, num_graphs, extract_type,
                                 gen_type, basedir, graphs_filename, mix_dict,
                                 attr_name, fancy, inp_deg_ast, inp_attr_ast,
                                 use_pickle, save_snapshots, alpha))

                for alpha, gen_type in zip(
                    (0, 0.5, 1),
                    ('AVRG-greedy-attr', 'AVRG-greedy-50', 'AVRG-greedy-deg')):
                    graphs_filename = f'{basedir}/output/graphs/{name}/{gen_type}_{extract_type}_{grammar.clustering}_{grammar.mu}_{num_graphs}_{i}.pkl'
                    args.append((name, grammar, num_graphs, extract_type,
                                 gen_type, basedir, graphs_filename, mix_dict,
                                 attr_name, fancy, inp_deg_ast, inp_attr_ast,
                                 use_pickle, save_snapshots, alpha))

    # random.shuffle(args)
    parallel_async(func=generate_graphs, args=args, num_workers=8)
    return
def post_process(G, P_set, matched, F):

    score = 0
    for (u, v) in matched:
        if u in P_set:
            p = u
            c = v
        else:
            c = u
            p = v
        if (c, p) in F:
            score += float(F[(c, p)])
        else:
            x = 0

    support = []
    att_dic = nx.get_node_attributes(G, 'att')

    for p in list(P_set):
        c_att = att_dic[p]
        neigh_list = G.neighbors(p)
        temp = 0
        for neigh in neigh_list:
            att = att_dic[neigh]
            if att == c_att:
                temp += 1
        support.append(temp)
    '''
    if method ==1:
        ass = nx.attribute_assortativity_coefficient(G, 'att')
        div = ass
    elif method ==2:
        iu = IU.InformationUnfairness(G)
        div = iu
    elif method ==3:
        ec = EC.EchoChamber(G)
        div = ec
    '''

    average1, mean1 = support_group(G)
    return round(nx.attribute_assortativity_coefficient(G, 'att'),2),\
           round(IU.InformationUnfairness(G),2), \
           round(entropy(G),2),\
           round(score,2),\
           round(average1,2), round(mean1,2)
예제 #21
0
def create_random_discrete_attributes(G,k):
    F=nx.Graph()
    for ed in G.edges():
        # print ed
        attr_dic=G.edge[ed[0]][ed[1]]
        F.add_edge(ed[0],ed[1],attr_dict=attr_dic)
    range_list=range(k)
    # print F.nodes(data=True)
    for nd in G.nodes():
        attr_dic=G.node[nd]
        if len(range_list)!=0:
            raa=random.choice(range_list)
            range_list.remove(raa)
        else:
            range_list=range(k)
            raa=random.choice(range_list)
        F.add_node(nd,attr_dict=attr_dic,discrete_attribute=raa)
    return F,nx.attribute_assortativity_coefficient(F,'discrete_attribute')
def evaluate(G, P_set, F, matched=[], success=True):
    score=0
    if success:
        for (u, v) in matched:
            if u in P_set:
                p = u
                c = v
            else:
                c = u
                p = v
            if (c, p) in F:
                score += float(F[(c, p)])


    average1, mean1, communities = support_group_score(G)
    return round(nx.attribute_assortativity_coefficient(G, 'att'),2),\
           round(score,2),\
           round(average1,2)
예제 #23
0
def create_random_discrete_attributes(G, k):
    F = nx.Graph()
    for ed in G.edges():
        # print ed
        attr_dic = G.edge[ed[0]][ed[1]]
        F.add_edge(ed[0], ed[1], attr_dict=attr_dic)
    range_list = range(k)
    # print F.nodes(data=True)
    for nd in G.nodes():
        attr_dic = G.node[nd]
        if len(range_list) != 0:
            raa = random.choice(range_list)
            range_list.remove(raa)
        else:
            range_list = range(k)
            raa = random.choice(range_list)
        F.add_node(nd, attr_dict=attr_dic, discrete_attribute=raa)
    return F, nx.attribute_assortativity_coefficient(F, 'discrete_attribute')
예제 #24
0
def makeReport(netlist,genus,processes):
    ses = computeEdgeSEs(netlist) #get the mean and SE for all edge weights
    maxnodes = max([n.number_of_nodes() for n in netlist])
    basenet = [n for n in netlist if n.number_of_nodes() == maxnodes][0] #network with the most nodes, i.e. the one with all OTUs in the genus in it
    nx.set_edge_attributes(basenet,ses) #set the network to have the edge values be the mean across all bootstraps
    #Create a dictionary of statistics for the set of networks
    report = {'genus':genus}
    report['degree'] = diffStats_total(netlist,nx.degree,'edict',processes)
    report['mean_edge_weight'] = diffStats_total(netlist,nx.degree,'subgraph',processes)
    report['clustering'] = diffStats_total(netlist,nx.clustering,'list',processes)
    report['centrality'] = diffStats_total(netlist,nx.eigenvector_centrality_numpy,'ndict',processes)
    report['assortativity'] = [nx.attribute_assortativity_coefficient(net,'isCRISPR') \
                               for net in tqdm(netlist,desc='assortativity')]
    report['modularity'] = [modularity(net) for net in tqdm(netlist,desc='modularity')]
    if genus in toolonggenera:
        report['closeness_vitaltiy'] = {'crispr_mean':-1, 'crispr_sem':-1, 'non-crispr_mean':-1, 'non-crispr_sem':-1, 'mw_stat':-1, 'mw_pval':-1}
    else:
        report['closeness_vitaltiy'] = diffStats_total(netlist,nx.closeness_vitality,'ndict',processes)
    return basenet,report
def plot_initial_bgraph(G,subp=121):
    fig=plt.figure(num=1,figsize=(16,12))
    fig.add_subplot(subp)
    sets=bipartite.sets(G)
    pos={}
    for i,v in enumerate(sets[0]):
        pos[v]= (0.,i)
    for i,v in enumerate(sets[1]):
        pos[v]= (1, i)

    rr=nx.attribute_assortativity_coefficient(G,'bipartite')
    s_title='Bipartite Graph\nAssortativity_coef(bipartition) = %.2f' %rr
    plt.title(s_title)#,{'size': '20'})
    nx.draw_networkx_nodes(G,pos=pos,nodelist=list(sets[0]),node_color='grey',alpha=0.3)
    nx.draw_networkx_nodes(G,pos=pos,nodelist=list(sets[1]),node_color='gold')
    nx.draw_networkx_labels(G,pos=pos)
    nx.draw_networkx_edges(G,pos=pos,alpha=0.2)
    plt.axis("off")
    # plt.show()  
    return pos,fig
예제 #26
0
def compute_network_stats(G, inst):
    print 'RECIP:%.5f' % reciprocity(G)
    print 'MEAN_DEGREE:%.5f' % mean_degree(G)
    print 'MEAN_NB_DEGREE:%.5f' % mean_nb_degree(G)

    Gu = G.to_undirected()
    print 'AVG_CLUSTER:%.5f' % nx.average_clustering(Gu)
    print 'DEGREE_ASSORT:%.5f' % nx.degree_assortativity_coefficient(Gu)
    print 'MEAN_GEODESIC:%.5f' % nx.average_shortest_path_length(Gu)
    mg, d = mean_max_geodesic(Gu)
    print 'MEAN_GEODESIC:%.5f' % mg
    print 'DIAMETER:%d' % int(d)

    keep = []
    for n in Gu.nodes_iter():
        if n in inst:
            Gu.node[n]['region'] = inst[n]['Region']
            keep.append(n)
    
    H = Gu.subgraph(keep)
    print 'MOD_REGION:%.5f' % (nx.attribute_assortativity_coefficient(H, 'region'))
def stats(G, P_set, matched, F, method=1):

    score = 0
    for (u, v) in matched:
        if u in P_set:
            p = u
            c = v
        else:
            c = u
            p = v
        if (c, p) in F:
            score += float(F[(c, p)])
        else:
            x = 0

    support = []
    att_dic = nx.get_node_attributes(G, 'att')

    for p in list(P_set):
        c_att = att_dic[p]
        neigh_list = G.neighbors(p)
        temp = 0
        for neigh in neigh_list:
            att = att_dic[neigh]
            if att == c_att:
                temp += 1
        support.append(temp)

    if method == 1:
        ass = nx.attribute_assortativity_coefficient(G, 'att')
        div = ass
    elif method == 2:
        iu = IU.InformationUnfairness(G)
        div = iu
    elif method == 3:
        ec = EC.EchoChamber(G)
        div = ec

    return round(score, 3), round(div, 3), round(st.mean(support), 3)
def create_colors_per_comm(G,layer1,layer2,layer3,slayer1,slayer2,pos,fig,subp=(122)):
    for i in layer1:
        G.add_node(i,color='r',attr_dict=G.node[i],best_partition_comm='1')
    for i in layer2:
        G.add_node(i,color='g',attr_dict=G.node[i],best_partition_comm='2')
    for i in layer3:
        G.add_node(i,color='b',attr_dict=G.node[i],best_partition_comm='3')    
    fig.add_subplot(subp)
    # for i in G.nodes(data=True):
    #     print i
    # rr=nx.attribute_assortativity_coefficient(G,'color')
    rra=nx.attribute_assortativity_coefficient(G,'best_partition_comm')
    s_title='Community Partition\nAssortativity_coef(3_communities) = %.2f ' %(rra)
    plt.title(s_title)#,{'size': '20'})
    nodecolor=[i[1]['color'] for i in G.nodes(data=True)]
    nx.draw_networkx_nodes(G,pos=pos,node_color=nodecolor,alpha=0.3)
    # nx.draw_networkx_nodes(G,pos=pos,nodelist=list(sets[1]),node_color='gold')
    nx.draw_networkx_labels(G,pos=pos)
    nx.draw_networkx_edges(G,pos=pos,alpha=0.2)


    plt.axis("off")
예제 #29
0
 def assortativity(self, G, attr_assorttype):
     tab = Table()
     tdata = []
     assorttypes = {
         'attribute':
         lambda x, y: nx.attribute_assortativity_coefficient(x, y),
         'numeric': lambda x, y: nx.numeric_assortativity_coefficient(x, y)
     }
     rescale_func = {
         'attribute':
         lambda x, xsetsorted: x,
         'numeric':
         lambda x, setsorted: self.classify(x, setsorted, 100)
         if max(setsorted) > 1000 else x
     }
     for attr, assorttype in attr_assorttype.iteritems():
         start = timeit.default_timer()
         node_attr = nx.get_node_attributes(G, attr)
         setsorted = set(node_attr.values())
         if len(setsorted) > 1:
             if max(setsorted) > 1000:
                 node_attr = self.classify(node_attr, 100)
             G_assort = nx.Graph(G.subgraph(node_attr.keys()))
             nx.set_node_attributes(G_assort, attr, node_attr)
             coef = assorttypes[assorttype](G_assort, attr)
         else:
             coef = np.nan
         stop = timeit.default_timer()
         tdata.append((assorttype, attr, coef, (stop - start)))
     start = timeit.default_timer()
     coef = nx.degree_pearson_correlation_coefficient(G)
     deg = set(nx.degree(G).values())
     stop = timeit.default_timer()
     tdata.append(('degree', '', coef, (stop - start)))
     tab.from_tuples(tdata,
                     columns=['Type', 'Attribute', 'Coef.', 'Time (sec)'])
     tab.sort_values(by=['Coef.'], ascending=False)
     tab.display()
예제 #30
0
def create_2colors_per_comm(G,
                            layer1,
                            layer2,
                            layer3,
                            slayer1,
                            slayer2,
                            pos,
                            fig,
                            subp=122):
    sl11 = random.sample(slayer1, len(slayer1) / 2)
    sl12 = list(set(slayer1) - set(sl11))
    # print G.nodes(data=True)
    for i in sl11:
        G.add_node(i, color='y', attr_dict=G.node[i], fattr='1')
    for i in sl12:
        G.add_node(i, color='grey', attr_dict=G.node[i], fattr='2')
    sl21 = random.sample(slayer2, len(slayer2) / 2)
    sl22 = list(set(slayer2) - set(sl21))
    for i in sl21:
        G.add_node(i, color='m', attr_dict=G.node[i], fattr='3')
    for i in sl22:
        G.add_node(i, color='c', attr_dict=G.node[i], fattr='4')
    fig.add_subplot(subp)
    # for i in G.nodes():
    #     if i in la
    # for i in G.nodes(data=True):
    #     print i
    rr = nx.attribute_assortativity_coefficient(G, 'fattr')
    s_title = 'Discrete vertex attributes\nAssortativity_coef(4_attibutes) = %.2f' % rr
    plt.title(s_title)  #,{'size': '20'})
    nodecolor = [i[1]['color'] for i in G.nodes(data=True)]
    nx.draw_networkx_nodes(G, pos=pos, node_color=nodecolor, alpha=0.3)
    # nx.draw_networkx_nodes(G,pos=pos,nodelist=list(sets[1]),node_color='gold')
    nx.draw_networkx_labels(G, pos=pos)
    nx.draw_networkx_edges(G, pos=pos, alpha=0.2)

    plt.axis("off")
예제 #31
0
def convert():
    G = nx.read_gpickle('2.gpickle')
    print(nx.attribute_assortativity_coefficient(G, 'att1'))
    print(nx.attribute_assortativity_coefficient(G, 'att2'))
    print(nx.attribute_assortativity_coefficient(G, 'att3'))

    edges = G.edges()

    G1 = nx.Graph()
    G1.add_edges_from(edges)
    att = nx.get_node_attributes(G, 'att1')
    nx.set_node_attributes(G1, att, 'att')
    print(nx.attribute_assortativity_coefficient(G1, 'att'))

    G2 = nx.Graph()
    G2.add_edges_from(edges)
    att = nx.get_node_attributes(G, 'att2')
    nx.set_node_attributes(G2, att, 'att')
    print(nx.attribute_assortativity_coefficient(G2, 'att'))

    G3 = nx.Graph()
    G3.add_edges_from(edges)
    att = nx.get_node_attributes(G, 'att3')
    nx.set_node_attributes(G3, att, 'att')
    print(nx.attribute_assortativity_coefficient(G3, 'att'))

    G4 = nx.Graph()
    G4.add_edges_from(edges)
    att = nx.get_node_attributes(G, 'att4')
    nx.set_node_attributes(G4, att, 'att')
    print(nx.attribute_assortativity_coefficient(G4, 'att'))

    nx.write_gpickle(G1, 'intra2_1.gpickle')
    nx.write_gpickle(G2, 'intra2_2.gpickle')
    nx.write_gpickle(G3, 'intra2_3.gpickle')
    nx.write_gpickle(G4, 'intra2_4.gpickle')
예제 #32
0
            if m != n:
                if G.has_edge(m,n):
                    G[m][n]['weight'] += 1
                else:
                    G.add_edge(m,n, weight = 1)
 
#### measuring node strength ##############
weight_dict = defaultdict(int)
for e in G.edges():
    n1,n2 = e
    e_weight = G[n1][n2]['weight']
    weight_dict[n1] += e_weight
    weight_dict[n2] += e_weight

############################################                   
print nx.attribute_assortativity_coefficient(G,'gender')

node_clustering = nx.clustering(G)
node_degree = nx.degree(G)

btw_list =  nx.betweenness_centrality(G)
closeness_list = nx.closeness_centrality(G)
core_list = nx.core_number(G) #not a googd measure
esize = effective_size(G)
core_list = nx.core_number(G)
rc = nx.rich_club_coefficient(G,normalized=False)
rc[23] = 1.0
print rc
print core_list

male_cc = []
def plot_graph_bip_2set_3comms(G,broken_graph,broken_partition,npartition,layer1,layer2,layer3,fig,d1=1.5,d2=5.,d3=0,d4=.8,nodesize=1000,withlabels=True,edgelist=[],layout=True,alpha=0.5):
    
    if layout:
        pos=nx.spring_layout(G)
    else:
        pos=nx.random_layout(G)

    top_set=set()
    bottom_set=set()
    middle_set=set()
    down=[]
    right=[]
    left=[]

    mlayer_part={}
    for i in broken_partition:
        ii=i.split('_')
        if ii[1] not in mlayer_part:
            mlayer_part[ii[1]]=set([ii[2]])
        else:
            mlayer_part[ii[1]].add(ii[2])

    layers_m=Counter()
    for k,v in mlayer_part.items():
        if len(v)==1:
            layers_m[1]+=1
        elif len(v)==2:
            layers_m[2]+=1
        elif len(v)==3:
            layers_m[3]+=1
        else:
            print k,v

    broken_pos={}
    singles=0

    for i,v in broken_partition.items():   
        name=i.split('_')
        if name[-1]=='s':
            singles+=1
        ndnd=random.choice(v)
        npos=pos[ndnd]
        if ndnd in layer1:
            broken_pos[i]=[d2*(npos[0]-d1),d2*(npos[1]+d1)] 
            top_set.add(i)
            left.append(broken_pos[i])
        elif ndnd in layer2:
            broken_pos[i]=[d2*(npos[0]+d1),d2*(npos[1]+d1)] 
            bottom_set.add(i)
            right.append(broken_pos[i])
        # else:
        #     broken_pos[i]=[d2*npos[0],d2*(npos[1]-d1)] 
        #     middle_set.add(i)
        #     down.append(broken_pos[i])
    # print top_set
    # print bottom_set 
    xleft=[i[0] for i in left]
    yleft=[i[1] for i in left]

    aleft = [min(xleft)-d1/2.,max(yleft)+d1/2.+d3]
    bleft = [max(xleft)+d1/2.,max(yleft)+d1/2.+3*d3]
    cleft = [max(xleft)+d1/2.,min(yleft)-d1/2.-3*d3]
    dleft = [min(xleft)-d1/2.,min(yleft)-d1/2.-d3]

    xright=[i[0] for i in right]
    yright=[i[1] for i in right]

    aright = [min(xright)-d1/2.,max(yright)+d1/2.+d3]
    bright = [max(xright)+d1/2.,max(yright)+d1/2.+3*d3]
    cright = [max(xright)+d1/2.,min(yright)-d1/2.-3*d3]
    dright = [min(xright)-d1/2.,min(yright)-d1/2.-d3]

    # xdown=[i[0] for i in down]
    # ydown=[i[1] for i in down]

    # adown = [min(xdown)-d1/2.,max(ydown)+d1/2.+d3]
    # bdown = [max(xdown)+d1/2.,max(ydown)+d1/2.+3*d3]
    # cdown = [max(xdown)+d1/2.,min(ydown)-d1/2.-3*d3]
    # ddown = [min(xdown)-d1/2.,min(ydown)-d1/2.-d3]

    # fig=plt.figure(figsize=(20,20))
        # plt.subplot(1,2,1)

    ax=fig.add_subplot(122)

    ax.add_patch(Polygon([aleft,bleft,cleft,dleft],color='grey',alpha=0.1)) 
    plt.plot([aleft[0],bleft[0],cleft[0],dleft[0],aleft[0]],[aleft[1],bleft[1],cleft[1],dleft[1],aleft[1]],'-',color='grey')

    ax.add_patch(Polygon([aright,bright,cright,dright],color='gold',alpha=0.1)) 
    plt.plot([aright[0],bright[0],cright[0],dright[0],aright[0]],[aright[1],bright[1],cright[1],dright[1],aright[1]],'-',color='gold')

    # ax.add_patch(Polygon([adown,bdown,cdown,ddown],color='g',alpha=0.1)) 
    # plt.plot([adown[0],bdown[0],cdown[0],ddown[0],adown[0]],[adown[1],bdown[1],cdown[1],ddown[1],adown[1]],'-g')

    nodeSize=[nodesize*len(broken_partition[i]) for i in list(top_set)]
    nodeColor=[broken_graph.node[i]['color'] for i in list(top_set) ]
    
    nx.draw_networkx_nodes(broken_graph,broken_pos, nodelist=list(top_set),node_shape='s',node_color=nodeColor,alpha=1,node_size=nodeSize)
    nodeSize=[nodesize*len(broken_partition[i]) for i in list(middle_set)]
    nodeColor=[broken_graph.node[i]['color'] for i in list(middle_set) ]
    
    nx.draw_networkx_nodes(broken_graph,broken_pos, nodelist=list(middle_set),node_shape='s',node_color=nodeColor,alpha=1,node_size=nodeSize)
    nodeSize=[nodesize*len(broken_partition[i]) for i in list(bottom_set)]
    nodeColor=[broken_graph.node[i]['color'] for i in list(bottom_set) ]

    nx.draw_networkx_nodes(broken_graph,broken_pos,nodelist=list(bottom_set),node_shape='s',node_color=nodeColor,alpha=1,node_size=nodeSize)
    
    if withlabels:
        nx.draw_networkx_labels(G,pos)
    
    lay1_edges=[ed for ed in G.edges() if ed[0] in layer1 and ed[1] in layer1]
    lay2_edges=[ed for ed in G.edges() if ed[0] in layer2 and ed[1] in layer2]
    lay3_edges=[ed for ed in G.edges() if ed[0] in layer3 and ed[1] in layer3]
    # print G.nodes()
    # print npartition
    for i,v in enumerate(npartition):
        for nd in v:
            G.add_node(nd,part=i)


 #    print 'bbbbbb'
    # print G.nodes(data=True)
 #      print 'aaaaa'
    rrd=nx.attribute_assortativity_coefficient(G,'part')
    nx.draw_networkx_edges(broken_graph,broken_pos,alpha=0.3) #0.15
    orr=nx.attribute_assortativity_coefficient(broken_graph,'color')
    for i,v in broken_partition.items():
        for nd in v:
            atrr=G.node[nd]
            G.add_node(nd,attr_dict=atrr,asso=i)
    # print G.nodes(data=True)
    rr=nx.attribute_assortativity_coefficient(G,'asso')
    # print 'Community partition attribute assortativity coefficient wrt bipartition = %f' %orr
    # title_s='Bipartite graph with bipartition as 2-layers (%i 2-layered, %i 1-layered)\n Discrete assortativity coefficient of the joint partition of communities and bipartition = %f\n(Community partition attribute assortativity coefficient = %f)' %(layers_m[2],layers_m[1],rr,rrd)  
    title_s='Bipartite graph with bipartition as 2-layers (%i 2-layered, %i 1-layered)\n Joint_Assortativity_coef(bipartition,3_communities) = %.2f' %(layers_m[2],layers_m[1],rr)  

    # title_s='%i Three vertex attributes (%i 3-layered, %i 2-layered, %i 1-layered)' %(len(npartition),layers_m[3],layers_m[2],layers_m[1])
    plt.title(title_s,{'size': '12'})
    plt.axis('off')
    plt.show()
예제 #34
0
tmp = hp.get_partition()
partitions = tmp[0]
groups = tmp[1]

# Read in the networks
project = "584"
FF_all = nx.read_edgelist('data/networks/%s_FF.edgelist' % project, nodetype=str, data=(('weight',float),),create_using=nx.DiGraph()) 
AT_all = nx.read_edgelist('data/networks/%s_AT.edgelist' % project, nodetype=str, data=(('weight',float),),create_using=nx.DiGraph()) 
RT_all = nx.read_edgelist('data/networks/%s_RT.edgelist' % project, nodetype=str, data=(('weight',float),),create_using=nx.DiGraph())

# Add dummy nodes if they are missing in the networks
i = 0
for partition in partitions:
    for node in partition:
        FF_all.add_node(node, group =  groups[i])
        AT_all.add_node(node, group =  groups[i])
        RT_all.add_node(node, group =  groups[i])
    i += 1


# Compute Assortativity in Friendships
aFF = nx.attribute_assortativity_coefficient(FF_all,'group')
aAT = nx.attribute_assortativity_coefficient(AT_all,'group')
aRT = nx.attribute_assortativity_coefficient(RT_all,'group')

# Output
csv_writer.writerow([aFF,aAT,aRT])


## TODO Compute the average between ties that are inside the group and ties that are between groups
예제 #35
0
 def test_attribute_assortativity_directed(self):
     r = nx.attribute_assortativity_coefficient(self.D, "fish")
     assert r == 1.0 / 3.0
예제 #36
0
all_nodes['name'] = all_nodes['name'].str.strip()

# Ensure that all "Bearers" do not become a single node
all_nodes['name'].replace(
    to_replace=[r'MRS?\.\s+', r'\.', r'\s+', 'LIMITED', 'THE BEARER'], 
    value=['', '', ' ', 'LTD', np.nan], 
    inplace=True, regex=True)

# The network is ready to use
# As an exercise, let's have a look at Mr.Roldugin's or Mr. Poroshenko's assets
# seeds = [12079386, 12096275, 12180773] # Roldugin
seeds = [12129717, 13001828] # Poroshenko
nodes_of_interest = set.union(*[set(nx.single_source_shortest_path_length(F, x, cutoff=4).keys()) for x in seeds])

# Extract the subgraph and relabel it
ego = nx.subgraph(F, nodes_of_interest)

nodes = all_nodes.ix[ego.nodes()]
nx.set_node_attributes(ego, "cc", nodes.country_codes)
ego = nx.relabel_nodes(ego, nodes[nodes.name.notnull()].name)
ego = nx.relabel_nodes(ego, nodes[nodes.address.notnull() 
                                    & nodes.name.isnull()].address)

# Must be negative: that's what OFFSHORES are about!
print("Country code assortativity:",
      nx.attribute_assortativity_coefficient(ego,'cc'))

# Save and proceed to Gephi
with open('ego.graphml', 'wb') as ofile: 
    nx.write_graphml(ego, ofile)
 def test_attribute_assortativity_directed(self):
     r=nx.attribute_assortativity_coefficient(self.D,'fish')
     assert_equal(r,1.0/3.0)
 def test_attribute_assortativity_undirected(self):
     r=nx.attribute_assortativity_coefficient(self.G,'fish')
     assert_equal(r,6.0/22.0)
예제 #39
0
파일: rewire.py 프로젝트: sfi72hs/sixteen
def get_inf_assortativity(mx, is_infected):
    G=nx.from_numpy_matrix(mx)
    nx.set_node_attributes(G, "infected", dict(enumerate(is_infected)))
    return nx.attribute_assortativity_coefficient(G, 'infected')
 def test_attribute_assortativity_multigraph(self):
     r=nx.attribute_assortativity_coefficient(self.M,'fish')
     assert_equal(r,1.0)
def plot_graph_bip_3comms_2set(G,broken_graph,broken_partition,npartition,layer1,layer2,layer3,d1=1.5,d2=5.,d3=0,d4=.8,nodesize=1000,withlabels=True,edgelist=[],layout=True,alpha=0.5):
    
    if layout:
        pos=nx.spring_layout(G)
    else:
        pos=nx.random_layout(G)

    top_set=set()
    bottom_set=set()
    middle_set=set()
    down=[]
    right=[]
    left=[]

    mlayer_part={}
    for i in broken_partition:
        ii=i.split('_')
        if ii[1] not in mlayer_part:
            mlayer_part[ii[1]]=set([ii[2]])
        else:
            mlayer_part[ii[1]].add(ii[2])

    layers_m=Counter()
    for k,v in mlayer_part.items():
        if len(v)==1:
            layers_m[1]+=1
        elif len(v)==2:
            layers_m[2]+=1
        elif len(v)==3:
            layers_m[3]+=1
        else:
            print k,v

    broken_pos={}
    singles=0

    for i,v in broken_partition.items():   
        name=i.split('_')
        if name[-1]=='s':
            singles+=1
        ndnd=random.choice(v)
        npos=pos[ndnd]
        if ndnd in layer1:
            broken_pos[i]=[d2*(npos[0]-d1),d2*(npos[1]+d1)] 
            top_set.add(i)
            left.append(broken_pos[i])
        elif ndnd in layer2:
            broken_pos[i]=[d2*(npos[0]+d1),d2*(npos[1]+d1)] 
            bottom_set.add(i)
            right.append(broken_pos[i])
        else:
            broken_pos[i]=[d2*npos[0],d2*(npos[1]-d1)] 
            middle_set.add(i)
            down.append(broken_pos[i])
        
    xleft=[i[0] for i in left]
    yleft=[i[1] for i in left]

    aleft = [min(xleft)-d1/2.,max(yleft)+d1/2.+d3]
    bleft = [max(xleft)+d1/2.,max(yleft)+d1/2.+3*d3]
    cleft = [max(xleft)+d1/2.,min(yleft)-d1/2.-3*d3]
    dleft = [min(xleft)-d1/2.,min(yleft)-d1/2.-d3]

    xright=[i[0] for i in right]
    yright=[i[1] for i in right]

    aright = [min(xright)-d1/2.,max(yright)+d1/2.+d3]
    bright = [max(xright)+d1/2.,max(yright)+d1/2.+3*d3]
    cright = [max(xright)+d1/2.,min(yright)-d1/2.-3*d3]
    dright = [min(xright)-d1/2.,min(yright)-d1/2.-d3]

    xdown=[i[0] for i in down]
    ydown=[i[1] for i in down]

    adown = [min(xdown)-d1/2.,max(ydown)+d1/2.+d3]
    bdown = [max(xdown)+d1/2.,max(ydown)+d1/2.+3*d3]
    cdown = [max(xdown)+d1/2.,min(ydown)-d1/2.-3*d3]
    ddown = [min(xdown)-d1/2.,min(ydown)-d1/2.-d3]

    fig=plt.figure(figsize=(20,20))
    ax=fig.add_subplot(111)

    ax.add_patch(Polygon([aleft,bleft,cleft,dleft],color='r',alpha=0.1)) 
    plt.plot([aleft[0],bleft[0],cleft[0],dleft[0],aleft[0]],[aleft[1],bleft[1],cleft[1],dleft[1],aleft[1]],'-r')

    ax.add_patch(Polygon([aright,bright,cright,dright],color='b',alpha=0.1)) 
    plt.plot([aright[0],bright[0],cright[0],dright[0],aright[0]],[aright[1],bright[1],cright[1],dright[1],aright[1]],'-b')

    ax.add_patch(Polygon([adown,bdown,cdown,ddown],color='g',alpha=0.1)) 
    plt.plot([adown[0],bdown[0],cdown[0],ddown[0],adown[0]],[adown[1],bdown[1],cdown[1],ddown[1],adown[1]],'-g')

    nodeSize=[nodesize*len(broken_partition[i]) for i in list(top_set)]
    nodeColor=[broken_graph.node[i]['color'] for i in list(top_set) ]
    
    nx.draw_networkx_nodes(broken_graph,broken_pos, nodelist=list(top_set),node_shape='s',node_color=nodeColor,alpha=1,node_size=nodeSize)
    nodeSize=[nodesize*len(broken_partition[i]) for i in list(middle_set)]
    nodeColor=[broken_graph.node[i]['color'] for i in list(middle_set) ]
    
    nx.draw_networkx_nodes(broken_graph,broken_pos, nodelist=list(middle_set),node_shape='s',node_color=nodeColor,alpha=1,node_size=nodeSize)
    nodeSize=[nodesize*len(broken_partition[i]) for i in list(bottom_set)]
    nodeColor=[broken_graph.node[i]['color'] for i in list(bottom_set) ]

    nx.draw_networkx_nodes(broken_graph,broken_pos,nodelist=list(bottom_set),node_shape='s',node_color=nodeColor,alpha=1,node_size=nodeSize)
    
    if withlabels:
        nx.draw_networkx_labels(G,pos)
    
    lay1_edges=[ed for ed in G.edges() if ed[0] in layer1 and ed[1] in layer1]
    lay2_edges=[ed for ed in G.edges() if ed[0] in layer2 and ed[1] in layer2]
    lay3_edges=[ed for ed in G.edges() if ed[0] in layer3 and ed[1] in layer3]
    
    nx.draw_networkx_edges(broken_graph,broken_pos,alpha=0.3) #0.15
    rr=nx.attribute_assortativity_coefficient(broken_graph,'color')
    title_s='%i Three vertex attributes (%i 3-layered, %i 2-layered, %i 1-layered)\n Attribute assortativity coefficient wrt layer partition = %f' %(len(npartition),layers_m[3],layers_m[2],layers_m[1],rr)  

    # title_s='%i Three vertex attributes (%i 3-layered, %i 2-layered, %i 1-layered)' %(len(npartition),layers_m[3],layers_m[2],layers_m[1])
    plt.title(title_s,{'size': '20'})
    plt.axis('off')
    plt.show()
예제 #42
0
def metrics(G, state ="before"):#Lets break this up into node level metrics vs. network level metrics
    nodeMets = [] #Node level Metrics
    nodeMetNames = []
    nodes = nx.nodes(G)
    if state == "after":
        nodes.insert(16,16)
    nodeMets.append(nodes)
    nodeMetNames.append('Node')
    if state == "after":
        intIn = np.zeros((G.order()+1,1))
        intOut = np.zeros((G.order()+1,1))
    else:
        intIn = np.zeros((G.order(),1))
        intOut = np.zeros((G.order(),1))
    #print len(nx.edges(G))
    for edge in nx.edges(G):
        intIn[edge[1]] += G[edge[0]][edge[1]]['weight']
        intOut[edge[0]] += G[edge[0]][edge[1]]['weight']
    intIn = intIn.flatten().tolist()
    intOut = intOut.flatten().tolist()
    #print sum(intIn), sum(intOut)
    nodeMets.append(intIn)
    nodeMetNames.append('InteractionIn')
    nodeMets.append(intOut)
    nodeMetNames.append('InteractionOut')
    degreeIn = []
    degreeOut = []
    for node in G.in_degree_iter():
        degreeIn.append(node[1])
    for node in G.out_degree_iter():
        degreeOut.append(node[1])
    if state == "after":
        degreeIn.insert(16,999)
    nodeMets.append(degreeIn)
    nodeMetNames.append('DegreeIn')
    if state == "after":
        degreeOut.insert(16,999)
    nodeMets.append(degreeOut)
    nodeMetNames.append('DegreeOut')
    nodeTrans = nx.clustering(G.to_undirected()).values()
    if state == "after":
        nodeTrans.insert(16,999)
    nodeMets.append(nodeTrans)
    nodeMetNames.append('Transitivity')
    pageRank = nx.pagerank(G, weight = 'weight').values()
    if state == "after":
        pageRank.insert(16,999)
    nodeMets.append(pageRank)
    nodeMetNames.append('PageRank')
    hitsHubs, hitsAuths = nx.hits(G)[0].values(), nx.hits(G)[1].values()
    if state == "after":
        hitsHubs.insert(16,999)
    nodeMets.append(hitsHubs)
    nodeMetNames.append('Hubs')
    if state == "after":
        hitsAuths.insert(16,999)
    nodeMets.append(hitsAuths)
    nodeMetNames.append('Authorities')
    nodesOutput = zip(nodeMetNames, nodeMets)

    #Network level metrics
    netMets = []
    netMetNames = []
    for each in G.nodes_iter():
        G.node[each]['totalInt'] = intIn[each] + intOut[each]
    assort = nx.attribute_assortativity_coefficient(G, 'totalInt')
    netMets.append(assort)
    netMetNames.append('Assortativity (total Int)')

    diameter = nx.diameter(G.to_undirected())
    netMets.append(diameter)
    netMetNames.append('Diameter')
    connect = float(sum(nx.degree(G).values()))/float(len(nodes))
    #print "Nodes: " + str(len(nodes))
    netMets.append(connect)
    netMetNames.append("Average Node Degree")
    transit = nx.transitivity(G)
    netMets.append(transit)
    netMetNames.append('Transitivity')
    density = nx.density(G)
    netMets.append(density)
    netMetNames.append('Density')
    beta = float(len(nx.edges(G)))/float(len(nx.nodes(G)))
    netMets.append(beta)
    netMetNames.append('BetaIndex')
    gamma = float(len(nx.edges(G)))/float(len(nx.nodes(G))**2)
    netMets.append(gamma)
    netMetNames.append("GammaIndex")
    netOutput = zip(netMetNames, netMets)
    #print nodeMetNames
    return nodeMets, netMets
def plot_graph(G,broken_graph,broken_partition,npartition,layer1,layer2,layer3,d1=1.5,d2=5.,d3=0,d4=.8,nodesize=1000,withlabels=True,edgelist=[],layout=True,alpha=0.5):
    
    if layout:
        pos=nx.spring_layout(G)
    else:
        pos=nx.random_layout(G)

    top_set=set()
    bottom_set=set()
    middle_set=set()
    down=[]
    right=[]
    left=[]

    mlayer_part={}
    for i in broken_partition:
        ii=i.split('_')
        if ii[1] not in mlayer_part:
            mlayer_part[ii[1]]=set([ii[2]])
        else:
            mlayer_part[ii[1]].add(ii[2])

    layers_m=Counter()
    for k,v in mlayer_part.items():
        if len(v)==1:
            layers_m[1]+=1
        elif len(v)==2:
            layers_m[2]+=1
        elif len(v)==3:
            layers_m[3]+=1
        else:
            print k,v

    broken_pos={}
    singles=0
    for i,v in broken_partition.items():       
        name=i.split('_')
        if name[-1]=='s':
            singles+=1
        ndnd=random.choice(v)
        npos=pos[ndnd]
        if ndnd in layer1:
            broken_pos[i]=[d2*(npos[0]-d1),d2*(npos[1]+d1)] 
            top_set.add(i)
            left.append(broken_pos[i])
        elif ndnd in layer2:
            broken_pos[i]=[d2*(npos[0]+d1),d2*(npos[1]+d1)] 
            bottom_set.add(i)
            right.append(broken_pos[i])
        else:
            broken_pos[i]=[d2*npos[0],d2*(npos[1]-d1)] 
            middle_set.add(i)
            down.append(broken_pos[i])
       
    xleft=[i[0] for i in left]
    yleft=[i[1] for i in left]

    aleft = [min(xleft)-d1/2.,max(yleft)+d1/2.+d3]
    bleft = [max(xleft)+d1/2.,max(yleft)+d1/2.+3*d3]
    cleft = [max(xleft)+d1/2.,min(yleft)-d1/2.-3*d3]
    dleft = [min(xleft)-d1/2.,min(yleft)-d1/2.-d3]

    xright=[i[0] for i in right]
    yright=[i[1] for i in right]

    aright = [min(xright)-d1/2.,max(yright)+d1/2.+d3]
    bright = [max(xright)+d1/2.,max(yright)+d1/2.+3*d3]
    cright = [max(xright)+d1/2.,min(yright)-d1/2.-3*d3]
    dright = [min(xright)-d1/2.,min(yright)-d1/2.-d3]

    xdown=[i[0] for i in down]
    ydown=[i[1] for i in down]

    adown = [min(xdown)-d1/2.,max(ydown)+d1/2.+d3]
    bdown = [max(xdown)+d1/2.,max(ydown)+d1/2.+3*d3]
    cdown = [max(xdown)+d1/2.,min(ydown)-d1/2.-3*d3]
    ddown = [min(xdown)-d1/2.,min(ydown)-d1/2.-d3]

    fig=plt.figure(figsize=(20,20))
    ax=fig.add_subplot(111)

    ax.add_patch(Polygon([aleft,bleft,cleft,dleft],color='r',alpha=0.1)) 
    plt.plot([aleft[0],bleft[0],cleft[0],dleft[0],aleft[0]],[aleft[1],bleft[1],cleft[1],dleft[1],aleft[1]],'-r')

    ax.add_patch(Polygon([aright,bright,cright,dright],color='b',alpha=0.1)) 
    plt.plot([aright[0],bright[0],cright[0],dright[0],aright[0]],[aright[1],bright[1],cright[1],dright[1],aright[1]],'-b')

    ax.add_patch(Polygon([adown,bdown,cdown,ddown],color='g',alpha=0.1)) 
    plt.plot([adown[0],bdown[0],cdown[0],ddown[0],adown[0]],[adown[1],bdown[1],cdown[1],ddown[1],adown[1]],'-g')

    nodeSize=[nodesize*len(broken_partition[i]) for i in list(top_set)]
    nodeColor=[broken_graph.node[i]['color'] for i in list(top_set) ]
    
    nx.draw_networkx_nodes(broken_graph,broken_pos, nodelist=list(top_set),node_shape='s',node_color=nodeColor,alpha=1,node_size=nodeSize)
    nodeSize=[nodesize*len(broken_partition[i]) for i in list(middle_set)]
    nodeColor=[broken_graph.node[i]['color'] for i in list(middle_set) ]
    
    nx.draw_networkx_nodes(broken_graph,broken_pos, nodelist=list(middle_set),node_shape='s',node_color=nodeColor,alpha=1,node_size=nodeSize)
    nodeSize=[nodesize*len(broken_partition[i]) for i in list(bottom_set)]
    nodeColor=[broken_graph.node[i]['color'] for i in list(bottom_set) ]

    nx.draw_networkx_nodes(broken_graph,broken_pos,nodelist=list(bottom_set),node_shape='s',node_color=nodeColor,alpha=1,node_size=nodeSize)
    
    if withlabels:
        nx.draw_networkx_labels(G,pos)
    
    lay1_edges=[ed for ed in G.edges() if ed[0] in layer1 and ed[1] in layer1]
    lay2_edges=[ed for ed in G.edges() if ed[0] in layer2 and ed[1] in layer2]
    lay3_edges=[ed for ed in G.edges() if ed[0] in layer3 and ed[1] in layer3]
    

    nx.draw_networkx_edges(broken_graph,broken_pos,alpha=0.3) #0.15
    # orr=nx.attribute_assortativity_coefficient(broken_graph,'color')
    for i,v in broken_partition.items():
        for nd in v:
            atrr=G.node[nd]
            G.add_node(nd,attr_dict=atrr,asso=i)
    rr=nx.attribute_assortativity_coefficient(G,'asso')
    # print 'Attribute assortativity coefficient wrt layer partition (old) = %f' %orr
    title_s='%i connected components (%i 3-layered, %i 2-layered, %i 1-layered)\n  Discrete assortativity coefficient of the joint partition for connected_components and 3 layers = %f ' %(len(npartition),layers_m[3],layers_m[2],layers_m[1],rr)
    plt.title(title_s,{'size': '20'})
    
    plt.axis('off')
    
    plt.show()




# n = 50
# p = 0.05
# r1 = 0.333
# r2 = 0.333
# r3 = 0.333
# G, layer1, layer2, layer3, edgeList = analyticThreeLayerGraph(n,p,r1,r2,r3,G_isolates=False)
# # # print G.nodes()
# # # print layer1
# # # print layer2
# # # print layer3
# broken_graph,broken_partition,npartition=create_node_conncomp_graph(G,layer1,layer2,layer3)
# # # print broken_partition
# print broken_graph.nodes(data=True)
# plot_graph(G,broken_graph,broken_partition,npartition,layer1,layer2,layer3,withlabels=False,nodesize=10,layout=False)
예제 #44
0
 def test_attribute_assortativity_undirected(self):
     r = nx.attribute_assortativity_coefficient(self.G, "fish")
     assert r == 6.0 / 22.0
예제 #45
0
	def get_topology_dict(self, series):
		#for d, g in series.items():
		while len(series):
			g = series.pop()
			d = datetime.datetime.strptime(g.__str__(), "%Y-%m-%d")
			self.table[d] = {'size': g.size(), 'order': g.order()}
			g_order = self.table[d]['order']
			self.table[d].__setitem__('degree', average(g.degree().values()))
			sg = g.subgraph(nx.get_node_attributes(g, 'family').keys())

			if sg.__len__():
				self.table[d].__setitem__('f_size', sg.size())
				self.table[d].__setitem__('f_order', sg.order())
				self.table[d].__setitem__('f_degree', sum(sg.degree().values()) / float(sg.order()))
				sg_order = self.table[d]['f_order']
			else:
				sg_order = 0

			gDict = nx.get_node_attributes(g, 'gender')
			if gDict.__len__():
				self.table[d].__setitem__('gender_order', filter(lambda x: gDict.__contains__(x) and gDict[x] == 0, g.nodes_iter()).__len__())
				self.table[d].__setitem__('f_gender_order', filter(lambda x: gDict.__contains__(x) and gDict[x] == 0, sg.nodes_iter()).__len__())

			if self.is_directed:
				if sg_order:
					self.table[d].__setitem__('f_rep', reciprocity(sg))
					self.table[d].__setitem__('f_rei', reinforce(sg))
					self.table[d].__setitem__('f_asr', nx.degree_assortativity_coefficient(sg, weight = "weight"))
					self.table[d].__setitem__('f_asr_gender', nx.attribute_assortativity_coefficient(sg, "gender"))
					self.table[d].__setitem__('f_asr_race', nx.attribute_assortativity_coefficient(sg, "race"))

				if g_order:
					self.table[d].__setitem__('rep', reciprocity(g))
					self.table[d].__setitem__('rei', reinforce(g))
					self.table[d].__setitem__('asr', nx.degree_assortativity_coefficient(g, weight = "weight"))
					if nx.get_node_attributes(g, 'gender'):
						self.table[d].__setitem__('asr_gender', nx.attribute_assortativity_coefficient(g, "gender"))
					if nx.get_node_attributes(g, 'race'):
						self.table[d].__setitem__('asr_race', nx.attribute_assortativity_coefficient(g, "race"))
					scc = nx.strongly_connected_component_subgraphs(g)[0]
					self.table[d].__setitem__('scc_order', scc.order())
					self.table[d].__setitem__('scc_size', scc.size())
					del scc
					wcc = nx.weakly_connected_component_subgraphs(g)[0]
					self.table[d].__setitem__('wcc_order', wcc.order())
					self.table[d].__setitem__('wcc_size', wcc.size())
					del wcc
				else:
					self.table[d].__setitem__('rep', 0)
					self.table[d].__setitem__('scc_order', 0)
					self.table[d].__setitem__('scc_size', 0)
					self.table[d].__setitem__('wcc_order', 0)
					self.table[d].__setitem__('wcc_size', 0)
			else:
				if sg_order:
					self.table[d].__setitem__('fcc', nx.average_clustering(sg))

				if g_order:
					self.table[d].__setitem__('cc', nx.average_clustering(g))
					gcc = nx.connected_component_subgraphs(g)[0] if g_order else nx.Graph()
					self.table[d].__setitem__('gcc_order', gcc.order())
					self.table[d].__setitem__('gcc_size', gcc.size())
					#self.table[d].__setitem__('gcc_apl', nx.average_shortest_path_length(gcc))
					del gcc
				else:
					self.table[d].__setitem__('cc', 0)
					self.table[d].__setitem__('gcc_order', 0)
					self.table[d].__setitem__('gcc_size', 0)
예제 #46
0
 def test_attribute_assortativity_multigraph(self):
     r = nx.attribute_assortativity_coefficient(self.M, "fish")
     assert r == 1.0
def plot_graph_bip_3comms_2set(G,broken_graph,broken_partition,npartition,layer1,layer2,layer3,d1=1.5,d2=5.,d3=0,d4=.8,nodesize=1000,withlabels=True,edgelist=[],layout=True,alpha=0.5):
    
    if layout:
        pos=nx.spring_layout(G)
    else:
        pos=nx.random_layout(G)

    top_set=set()
    bottom_set=set()
    middle_set=set()
    down=[]
    right=[]
    left=[]

    mlayer_part={}
    for i in broken_partition:
        ii=i.split('_')
        if ii[1] not in mlayer_part:
            mlayer_part[ii[1]]=set([ii[2]])
        else:
            mlayer_part[ii[1]].add(ii[2])

    layers_m=Counter()
    for k,v in mlayer_part.items():
        if len(v)==1:
            layers_m[1]+=1
        elif len(v)==2:
            layers_m[2]+=1
        elif len(v)==3:
            layers_m[3]+=1
        else:
            print k,v

    broken_pos={}
    singles=0

    for i,v in broken_partition.items():   
        name=i.split('_')
        if name[-1]=='s':
            singles+=1
        ndnd=random.choice(v)
        npos=pos[ndnd]
        if ndnd in layer1:
            broken_pos[i]=[d2*(npos[0]-d1),d2*(npos[1]+d1)] 
            top_set.add(i)
            left.append(broken_pos[i])
        elif ndnd in layer2:
            broken_pos[i]=[d2*(npos[0]+d1),d2*(npos[1]+d1)] 
            bottom_set.add(i)
            right.append(broken_pos[i])
        else:
            broken_pos[i]=[d2*npos[0],d2*(npos[1]-d1)] 
            middle_set.add(i)
            down.append(broken_pos[i])
        
    xleft=[i[0] for i in left]
    yleft=[i[1] for i in left]

    aleft = [min(xleft)-d1/2.,max(yleft)+d1/2.+d3]
    bleft = [max(xleft)+d1/2.,max(yleft)+d1/2.+3*d3]
    cleft = [max(xleft)+d1/2.,min(yleft)-d1/2.-3*d3]
    dleft = [min(xleft)-d1/2.,min(yleft)-d1/2.-d3]

    xright=[i[0] for i in right]
    yright=[i[1] for i in right]

    aright = [min(xright)-d1/2.,max(yright)+d1/2.+d3]
    bright = [max(xright)+d1/2.,max(yright)+d1/2.+3*d3]
    cright = [max(xright)+d1/2.,min(yright)-d1/2.-3*d3]
    dright = [min(xright)-d1/2.,min(yright)-d1/2.-d3]

    xdown=[i[0] for i in down]
    ydown=[i[1] for i in down]

    adown = [min(xdown)-d1/2.,max(ydown)+d1/2.+d3]
    bdown = [max(xdown)+d1/2.,max(ydown)+d1/2.+3*d3]
    cdown = [max(xdown)+d1/2.,min(ydown)-d1/2.-3*d3]
    ddown = [min(xdown)-d1/2.,min(ydown)-d1/2.-d3]

    fig=plt.figure(figsize=(20,20))
    ax=fig.add_subplot(111)

    ax.add_patch(Polygon([aleft,bleft,cleft,dleft],color='r',alpha=0.1)) 
    plt.plot([aleft[0],bleft[0],cleft[0],dleft[0],aleft[0]],[aleft[1],bleft[1],cleft[1],dleft[1],aleft[1]],'-r')

    ax.add_patch(Polygon([aright,bright,cright,dright],color='b',alpha=0.1)) 
    plt.plot([aright[0],bright[0],cright[0],dright[0],aright[0]],[aright[1],bright[1],cright[1],dright[1],aright[1]],'-b')

    ax.add_patch(Polygon([adown,bdown,cdown,ddown],color='g',alpha=0.1)) 
    plt.plot([adown[0],bdown[0],cdown[0],ddown[0],adown[0]],[adown[1],bdown[1],cdown[1],ddown[1],adown[1]],'-g')

    nodeSize=[nodesize*len(broken_partition[i]) for i in list(top_set)]
    nodeColor=[broken_graph.node[i]['color'] for i in list(top_set) ]
    
    nx.draw_networkx_nodes(broken_graph,broken_pos, nodelist=list(top_set),node_shape='s',node_color=nodeColor,alpha=1,node_size=nodeSize)
    nodeSize=[nodesize*len(broken_partition[i]) for i in list(middle_set)]
    nodeColor=[broken_graph.node[i]['color'] for i in list(middle_set) ]
    
    nx.draw_networkx_nodes(broken_graph,broken_pos, nodelist=list(middle_set),node_shape='s',node_color=nodeColor,alpha=1,node_size=nodeSize)
    nodeSize=[nodesize*len(broken_partition[i]) for i in list(bottom_set)]
    nodeColor=[broken_graph.node[i]['color'] for i in list(bottom_set) ]

    nx.draw_networkx_nodes(broken_graph,broken_pos,nodelist=list(bottom_set),node_shape='s',node_color=nodeColor,alpha=1,node_size=nodeSize)
    
    if withlabels:
        nx.draw_networkx_labels(G,pos)
    
    lay1_edges=[ed for ed in G.edges() if ed[0] in layer1 and ed[1] in layer1]
    lay2_edges=[ed for ed in G.edges() if ed[0] in layer2 and ed[1] in layer2]
    lay3_edges=[ed for ed in G.edges() if ed[0] in layer3 and ed[1] in layer3]
    
    nx.draw_networkx_edges(broken_graph,broken_pos,alpha=0.3) #0.15
    rr=nx.attribute_assortativity_coefficient(broken_graph,'color')
    title_s='%i Three vertex attributes (%i 3-layered, %i 2-layered, %i 1-layered)\n Attribute assortativity coefficient wrt layer partition = %f' %(len(npartition),layers_m[3],layers_m[2],layers_m[1],rr)  

    # title_s='%i Three vertex attributes (%i 3-layered, %i 2-layered, %i 1-layered)' %(len(npartition),layers_m[3],layers_m[2],layers_m[1])
    plt.title(title_s,{'size': '20'})
    plt.axis('off')
    plt.show()
# p1=p2=p3=0.1

# n=500
# G,J,FF,DD,JFD,edgeList = synthetic_three_level(n,p1,p2,p3,J_isolates=False,F_isolates= False, D_isolates= False)
# # print JFD.nodes()
# # print JFD.edges()
# # print F.nodes()
# # print F.edges()
# # print G.nodes()
# # print edgeList
# # print aaaa
# # print nx.isolates(G)
# # plot_graph(n,G,J,FF,DD,F,d1=2.,d2=3.,nodesize=100,withlabels=False,edgelist=edgeList,layout=True,b_alpha=0.5)
# plot_graph(n,G,J,FF,DD,JFD,d1=2.,d2=3.,nodesize=50,withlabels=False,edgelist=edgeList,layout=False,b_alpha=0.15)
# k=5
# n=10
# pp=[0.1,.1,.1,.1,.4]
# G, list_of_Graphs_final, Gagr, edgeList,nmap,mapping =synthetic_multi_level(k,n,p=pp,No_isolates=True)
# dic_of_edges=make_dict_of_edge_times(nmap,mapping,list_of_Graphs_final)

# print mapping
# print Gagr.edges()
# for i in list_of_Graphs_final:
#     print i,i.edges()
# for i in dic_of_edges:
#     print i,dic_of_edges[i]
# plot_graph_k(k,n,G, list_of_Graphs_final, Gagr, edgelist=edgeList)

# k=5
# pp=[0.21,.31,.21,.31,.4]
# # pp=[8,7,6,8,9]
# n=4
# m=6
# k=10
# n=4
# m=6
# pp=[0.19,.11,.11,.11,.14,.18,.12,.15,.13,.12]

# # pp=0.19
# G, list_of_Graphs_final, Gagr, edgeList,nmap,mapping =synthetic_multi_bipartite(k,n,m,p=pp)
# # plot_graph_k_n_m(k,n,m,G,list_of_Graphs_final, Gagr,colors_grey='bipartite', nodesize=50,withlabels=True,edgelist=edgeList,layout=True,b_alpha=0.5)
# plot_graph_k_nm(k,n+m,G,list_of_Graphs_final, Gagr,colors_grey='bipartite', nodesize=50,withlabels=False,edgelist=edgeList,layout=True,b_alpha=0.5)
# dic_of_edges,dict_of_edges_time=s3l.make_dict_of_edge_timesB(k,nmap,mapping,list_of_Graphs_final)

# k=10
# n=7
# m=6
# pp=[0.19,.11,.11,.11,.14,.18,.12,.15,.13,.12]
# G,ndls,timetoadd=create_synthetic3lgB(k,n,m,pp) 
# main_work(G,ndls,timetoadd)

# G,layer1,layer2,edgeList,partition=create_3comms_bipartite(n,m,p)
# broken_graph,broken_partition,npartition = create_node_3attri_graph(G,layer1,layer2,layer3,slayer1,slayer2)
# plot_graph(G,broken_graph,broken_partition,npartition,layer1,layer2,layer3,d1=1.4,d2=5.,d3=0.8,withlabels=False,nodesize=100,layout=False)
# print G,layer1,layer2
# print partition
# print G.edges()
# print G.nodes()