Example #1
0
def mention_nx_assortativity():
	os.chdir(IN_DIR)

	MENT=nx.read_edgelist(f_in_graph_weights, create_using=nx.DiGraph(), data=(('weight',int),))
	print(len(MENT.nodes(data=True)))

	cnt = 0
	d=defaultdict(int)
	d_val = defaultdict(int)
	d1 = defaultdict(int)
	with open(f_in_user_sentiment) as f:
	    for line in f:
	        (uid, label, val) = line.split()
	        uid = unicode(uid)
	        d1[uid]= int(float(val)*10000)
	        if uid in MENT.nodes():
	        	d[uid]= int(float(val)*10000)
	        	d_val[uid] = int(label)
	        else:
	        	cnt += 1
	print "Number of nodes for which we have sentminet but are not in the mention graph is ", cnt

	cnt = 0
	for node in MENT.nodes():
		if not node in d1:
			cnt += 1
			MENT.remove_node(node)
	print "Number of nodes that do not have sentminet value, so we remove them from the mention graph", cnt

	nx.set_node_attributes(MENT, 'sentiment' , d)
	nx.set_node_attributes(MENT, 'sentiment_val' , d_val)
	print "Final number of nodes in the graph ", (len(MENT.nodes(data=True)))
	print "Sentiment (by label) nominal numeric assortativity is %f " % nx.numeric_assortativity_coefficient(MENT, 'sentiment')
	print "Sentiment (by value) numeric assortativity is %f " % nx.numeric_assortativity_coefficient(MENT, 'sentiment_val')
def mention_nx_assortativity():
	os.chdir(IN_DIR)

	MENT=nx.read_edgelist(f_in_graph_weights, create_using=nx.DiGraph(), data=(('weight',int),))
	print(len(MENT.nodes(data=True)))

	cnt = 0
	d=defaultdict(int)
	d_val = defaultdict(int)
	d1 = defaultdict(int)
	with open(f_in_user_sentiment) as f:
	    for line in f:
	        (uid, label, val) = line.split()
	        uid = unicode(uid)
	        d1[uid]= int(float(val)*10000)
	        if uid in MENT.nodes():
	        	d[uid]= int(float(val)*10000)
	        	d_val[uid] = int(label)
	        else:
	        	cnt += 1
	print "Number of nodes for which we have sentminet but are not in the mention graph is ", cnt

	cnt = 0
	for node in MENT.nodes():
		if not node in d1:
			cnt += 1
			MENT.remove_node(node)
	print "Number of nodes that do not have sentminet value, so we remove them from the mention graph", cnt

	nx.set_node_attributes(MENT, 'sentiment' , d)
	nx.set_node_attributes(MENT, 'sentiment_val' , d_val)
	print "Final number of nodes in the graph ", (len(MENT.nodes(data=True)))
	print "Sentiment (by label) nominal numeric assortativity is %f " % nx.numeric_assortativity_coefficient(MENT, 'sentiment')
	print "Sentiment (by value) numeric assortativity is %f " % nx.numeric_assortativity_coefficient(MENT, 'sentiment_val')
def user_genre_similarity(G):
    """
	Get a Series with similarity for each genre from dict in which keys are genres and values are numeric assortativity coefficients
	"""
    Ser = pd.Series({
        genres[j]: nx.numeric_assortativity_coefficient(G, genres[j])
        for j in range(len(genres))
    })

    return Ser
Example #4
0
def create_random_scalar_attributes2(G,scale):
    F=nx.Graph()
    # print F.nodes(data=True)
    for ed in G.edges():
        attr_dic=G.edge[ed[0]][ed[1]]
        F.add_edge(ed[0],ed[1],attr_dict=attr_dic)
    for nd in G.nodes():
        attr_dic=G.node[nd]
        rand=random.choice(range(1,100))
        irand=int(rand*scale)
        F.add_node(nd,attr_dict=attr_dic,scalar_attribute=rand,scalar_attribute_numeric=irand)
    return F, nx.numeric_assortativity_coefficient(F,'scalar_attribute_numeric')
    def assortativity(self, boolean=True):
        """ 
        Computes assortativity according to network's attributes. 
        It also computes assortativity of these attributes in each community.
        """
        title = 'Assortativity.txt'
        assort_file = open(self.path + title, 'w')
        i = 0
        while i not in self.G.nodes():
            i += 1
        attributes = self.G.nodes(data=True)[i].keys()
        assorted = np.zeros(
            len(attributes
                ))  #it can be made bigger so that it includes communities

        for i, attr in enumerate(attributes):
            assort_file.write('assortativity according to ' + attr + ' is: ' +
                              '\n')
            assorted_attr = nx.numeric_assortativity_coefficient(self.G, attr)
            assorted[i] = assorted_attr
            assort_file.write(str(assorted_attr) + '\n\n')

        if boolean:  #it does not computes in graph_related
            community_members = self.modularity_communities  #XXX
            # community_members = self.modularity_communitiesx

            for num, com in enumerate(community_members):
                assort_file.write('community #{}:'.format(num) + '\n')
                for attr in self.G.nodes(data=True)[i].keys():
                    assort_file.write('assortativity according to ' + attr +
                                      ' is: ' + '\n')
                    assort_file.write(
                        str(
                            nx.numeric_assortativity_coefficient(
                                self.G, attr, nodes=com)) + '\n' + '\n')

        assort_file.close()
        return assorted, attributes
Example #6
0
def create_scalar_attributes_0_1(G,source):
    F=nx.Graph()
    for ed in G.edges():
        attr_dic=G.edge[ed[0]][ed[1]]
        F.add_edge(ed[0],ed[1],attr_dict=attr_dic)
    for nd in G.nodes():
        attr_dic=G.node[nd]
        if nd==source:
            rand=1.
            irand=1
            F.add_node(nd,attr_dict=attr_dic,scalar_attribute=rand,scalar_attribute_numeric=irand)
        else:
            rand=0.
            irand=0
            F.add_node(nd,attr_dict=attr_dic,scalar_attribute=rand,scalar_attribute_numeric=irand)
    return F, nx.numeric_assortativity_coefficient(F,'scalar_attribute_numeric')
Example #7
0
def create_random_scalar_attributes2(G, scale):
    F = nx.Graph()
    # print F.nodes(data=True)
    for ed in G.edges():
        attr_dic = G.edge[ed[0]][ed[1]]
        F.add_edge(ed[0], ed[1], attr_dict=attr_dic)
    for nd in G.nodes():
        attr_dic = G.node[nd]
        rand = random.choice(range(1, 100))
        irand = int(rand * scale)
        F.add_node(nd,
                   attr_dict=attr_dic,
                   scalar_attribute=rand,
                   scalar_attribute_numeric=irand)
    return F, nx.numeric_assortativity_coefficient(F,
                                                   'scalar_attribute_numeric')
def graph_seq_similarity(Gs):
    """
	Get DataFrame indexed by starting and ending date with genre as column names
	Series must be transposed and concatenated by row 
	"""
    Simframe = []

    for i in range(len(Gs)):

        # The keys are the name of the columns
        Simframe.append(
            pd.DataFrame(
                {
                    genres[j]: nx.numeric_assortativity_coefficient(
                        Gs[i], genres[j])
                    for j in range(len(genres))
                },
                index=[Gs.index.values[i]]))

    return pd.concat(Simframe)
Example #9
0
 def assortativity(self, G, attr_assorttype):
     tab = Table()
     tdata = []
     assorttypes = {
         'attribute':
         lambda x, y: nx.attribute_assortativity_coefficient(x, y),
         'numeric': lambda x, y: nx.numeric_assortativity_coefficient(x, y)
     }
     rescale_func = {
         'attribute':
         lambda x, xsetsorted: x,
         'numeric':
         lambda x, setsorted: self.classify(x, setsorted, 100)
         if max(setsorted) > 1000 else x
     }
     for attr, assorttype in attr_assorttype.iteritems():
         start = timeit.default_timer()
         node_attr = nx.get_node_attributes(G, attr)
         setsorted = set(node_attr.values())
         if len(setsorted) > 1:
             if max(setsorted) > 1000:
                 node_attr = self.classify(node_attr, 100)
             G_assort = nx.Graph(G.subgraph(node_attr.keys()))
             nx.set_node_attributes(G_assort, attr, node_attr)
             coef = assorttypes[assorttype](G_assort, attr)
         else:
             coef = np.nan
         stop = timeit.default_timer()
         tdata.append((assorttype, attr, coef, (stop - start)))
     start = timeit.default_timer()
     coef = nx.degree_pearson_correlation_coefficient(G)
     deg = set(nx.degree(G).values())
     stop = timeit.default_timer()
     tdata.append(('degree', '', coef, (stop - start)))
     tab.from_tuples(tdata,
                     columns=['Type', 'Attribute', 'Coef.', 'Time (sec)'])
     tab.sort_values(by=['Coef.'], ascending=False)
     tab.display()
Example #10
0
def create_scalar_attributes_0_1(G, source):
    F = nx.Graph()
    for ed in G.edges():
        attr_dic = G.edge[ed[0]][ed[1]]
        F.add_edge(ed[0], ed[1], attr_dict=attr_dic)
    for nd in G.nodes():
        attr_dic = G.node[nd]
        if nd == source:
            rand = 1.
            irand = 1
            F.add_node(nd,
                       attr_dict=attr_dic,
                       scalar_attribute=rand,
                       scalar_attribute_numeric=irand)
        else:
            rand = 0.
            irand = 0
            F.add_node(nd,
                       attr_dict=attr_dic,
                       scalar_attribute=rand,
                       scalar_attribute_numeric=irand)
    return F, nx.numeric_assortativity_coefficient(F,
                                                   'scalar_attribute_numeric')
Example #11
0
# In[20]:

# M. E. J. Newman, Mixing patterns in networks Physical Review E, 67 026126, 2003
nx.degree_assortativity_coefficient(G)  #计算一个图的度匹配性。

# In[21]:

Ge = nx.Graph()
Ge.add_nodes_from([0, 1], size=2)
Ge.add_nodes_from([2, 3], size=3)
Ge.add_edges_from([(0, 1), (2, 3)])
node_size = [list(Ge.nodes[i].values())[0] * 1000 for i in Ge.nodes()]
nx.draw(Ge, with_labels=True, node_size=node_size)

print(nx.numeric_assortativity_coefficient(Ge, 'size'))

# In[53]:

# plot degree correlation
from collections import defaultdict
import numpy as np

l = defaultdict(list)
g = nx.karate_club_graph()

for i in g.nodes():
    k = []
    for j in g.neighbors(i):
        k.append(g.degree(j))
    l[g.degree(i)].append(np.mean(k))
 def test_numeric_assortativity_mixed(self):
     r = nx.numeric_assortativity_coefficient(self.M, "margin")
     np.testing.assert_almost_equal(r, 0.4340, decimal=4)
 def test_numeric_assortativity_float(self):
     r = nx.numeric_assortativity_coefficient(self.F, "margin")
     np.testing.assert_almost_equal(r, -0.1429, decimal=4)
 def test_numeric_assortativity_negative(self):
     r = nx.numeric_assortativity_coefficient(self.N, "margin")
     np.testing.assert_almost_equal(r, -0.2903, decimal=4)
def polinfluence_sim(nodes,p,sa,iterations,scale=1000):
    while  True:
        # G=nx.connected_watts_strogatz_graph(25, 2, 0.8, tries=100)
        G=nx.erdos_renyi_graph(nodes,p)
        if nx.is_connected(G):
            break
    G.remove_nodes_from(nx.isolates(G))
    # col=y
    pos=nx.spring_layout(G)

    ##nx.draw_networkx(g,pos=pos, node_color=col,cmap=plot.cm.Reds)
    # scale=1000
    F,asoc=utilat.create_random_scalar_attributes(G,scale)
    col=[F.node[i]['scalar_attribute'] for i in F.nodes()]

    fig = plt.figure(figsize=(17,17))


    sstt="Initial distribution of scalar attributes over graph nodes" #\n (diffusion source at node %s with initial attribute =%f)" %(F.nodes()[0],starting_value_of_zero_node)
    plt.subplot(3,2,1).set_title(sstt)
    # plt.set_cmap('cool')
    nx.draw_networkx(G,pos=pos, node_color=col,vmin=0.,vmax=1.)
    plt.axis('equal')
    plt.axis('off')
    
    # plt.figure(2)
    sstt = "Time variation of scalar attributes over graph nodes" #%F.nodes()[0]#,starting_value_of_zero_node)
    plt.subplot(3,2,5).set_title(sstt)
    plt.ylim(-0.01,1.01)
    # for i in F.nodes(data=True):
        # print i
    # print nx.numeric_assortativity_coefficient(F,'scalar_attribute_numeric')
    iterat=[]

    assort=[]
    y=[F.node[i]['scalar_attribute'] for i in F.nodes()]
    ckck=4
    kckc=2
    for ii in range(iterations):
        # sa=0.05
        checkin=True
        # nd=F.nodes()[0]
        # # # print nd
        # uu=0

        # nei=nx.neighbors(F,nd)
        # # # print nei
        # for nnei in nei:
        #     uu+=F.node[nnei]['scalar_attribute']
        
        # if F.node[nd]['scalar_attribute'] < uu/len(nei):
        #     sau=sa*max(2.*F.node[nd]['scalar_attribute'] - uu/len(nei),0.)+(1-sa)*F.node[nd]['scalar_attribute']
        # if F.node[nd]['scalar_attribute'] > uu/len(nei):
        #     sau=sa*min(2.*F.node[nd]['scalar_attribute'] - uu/len(nei),1.)+(1-sa)*F.node[nd]['scalar_attribute']

        # # sau=(sa*uu/len(nei))+(1-sa)*F.node[nd]['scalar_attribute']
        # insau=int(sau*scale)
        # F.add_node(nd,scalar_attribute=sau,scalar_attribute_numeric=insau)

        # nd=F.nodes()[-1]
        # # # print nd
        # uu=0

        # nei=nx.neighbors(F,nd)
        # # # print nei
        # for nnei in nei:
        #     uu+=F.node[nnei]['scalar_attribute']
        
        # if F.node[nd]['scalar_attribute'] < uu/len(nei):
        #     sau=sa*max(2.*F.node[nd]['scalar_attribute'] - uu/len(nei),0.)+(1-sa)*F.node[nd]['scalar_attribute']
        # if F.node[nd]['scalar_attribute'] > uu/len(nei):
        #     sau=sa*min(2.*F.node[nd]['scalar_attribute'] - uu/len(nei),1.)+(1-sa)*F.node[nd]['scalar_attribute']

        # # sau=(sa*uu/len(nei))+(1-sa)*F.node[nd]['scalar_attribute']
        # insau=int(sau*scale)
        # F.add_node(nd,scalar_attribute=sau,scalar_attribute_numeric=insau)


        # if F.node[nd]['scalar_attribute'] < (uu/len(nei)):

        #     sau=(sa*max(2.*F.node[nd]['scalar_attribute'] - (uu/len(nei)),0.)+(1-sa)*F.node[nd]['scalar_attribute']
        # if F.node[nd]['scalar_attribute'] > (uu/len(nei)):

        # # else:
        #     sau=(sa*min(2.*F.node[nd]['scalar_attribute'] - (uu/len(nei)),1.)+(1-sa)*F.node[nd]['scalar_attribute']

        # # sau=(sa*uu/len(nei))+(1-sa)*F.node[nd]['scalar_attribute']
        
        # insau=int(sau*scale)
        # F.add_node(nd,scalar_attribute=sau,scalar_attribute_numeric=insau)
        # for nd in F.nodes()[1:-1]:
        #     # sa=1-(1./nx.degree(F,nd))
        #     uu=0
        #     nei=nx.neighbors(F,nd)
        #     # print nei
        #     for nnei in nei:
        #         uu+=F.node[nnei]['scalar_attribute']

        #     sau=(sa*uu/len(nei))+(1-sa)*F.node[nd]['scalar_attribute']
        #     insau=int(sau*scale)
        #     F.add_node(nd,scalar_attribute=sau,scalar_attribute_numeric=insau)


        for nd in F.nodes():
            # sa=1-(1./nx.degree(F,nd))
            uu=0
            nei=nx.neighbors(F,nd)
            # print nei
            for nnei in nei:
                uu+=F.node[nnei]['scalar_attribute']
            X=F.node[nd]['scalar_attribute']
            Xnei=uu/len(nei)

            if X < Xnei:
                uX=sa*max(2.*X - Xnei,0.)+(1-sa)*X
            if X > Xnei:
                uX=sa*min(2.*X - Xnei,1.)+(1-sa)*X

            # uX=(sa*uu/len(nei))+(1-sa)*F.node[nd]['scalar_attribute']
            insau=int(uX*scale)
            F.add_node(nd,scalar_attribute=uX,scalar_attribute_numeric=insau)

            
        # for i in F.nodes(data=True):
        #   print i
        # Checking for attributes equality
        y1=y
        y=[F.node[i]['scalar_attribute'] for i in F.nodes()]
        if ii ==ckck and ii<10:
            # col=['%.5f' %yy for yy in y]
            col=[F.node[i]['scalar_attribute'] for i in F.nodes()]
            sstt="Distribution of scalar attributes over graph nodes at %i iterations" %(ii+1)

            plt.subplot(3,2,kckc).set_title(sstt)
            nx.draw_networkx(G,pos=pos, node_color=col,vmin=0.,vmax=1.)
            plt.axis('equal')
            plt.axis('off')
            ckck+=5
            kckc+=1
            plt.subplot(3,2,5)
        # for yy in it.combinations(y,2):
        #   checkin=checkin and yy[0] -yy[1] <(1./scale)
        # for yy in range(len(y1)):
        #   # print y[yy]-y1[yy]<(1./scale)
        #   checkin=checkin and y[yy]-y1[yy]<(0.1/(scale))
        #   # print checkin
        # if checkin:
        #   break
        asss=nx.numeric_assortativity_coefficient(F,'scalar_attribute_numeric')
        # if np.isnan(asss):
        #     asss=1.
        # print 'Iteration %i ==> %f' %(ii,asss),y 
        # print type(asss), asss<-1,asss>1
        iterat.append(ii)

        assort.append(asss)

        # print nx.numeric_assortativity_coefficient(F,'scalar_attribute_numeric')
        # y=[F.node[i]['scalar_attribute'] for i in F.nodes()]
        plt.plot(F.nodes(),y)
        # if asss==1. :
        #   break
    plt.plot(F.nodes(),y,linewidth=3.)
    sstt= "Time variation of scalar attribute assortativity coefficient"

    plt.subplot(3,2,6).set_title(sstt)

    # plt.figure(3)

    plt.plot(iterat,assort)
    plt.ylim(-1.1,1.1)
    # plt.figure(3)
    # plt.plot(F.nodes(),y)
    # col=['%.5f' %yy for yy in y]
    col=[F.node[i]['scalar_attribute'] for i in F.nodes()]
    sstt="Distribution of scalar attributes over graph nodes at %i iterations\n (polarized attributes = (%.2f, %.2f))" %(iterations,min(col),max(col))#col[0])
    plt.subplot(3,2,4).set_title(sstt)

    # plt.figure(4)

    
    # print col
    # print [F.node[i]['scalar_attribute_numeric'] for i in F.nodes()]
    # pos=nx.spring_layout(G)
    ##nx.draw_networkx(g,pos=pos, node_color=col,cmap=plot.cm.Reds)
    nx.draw_networkx(G,pos=pos, node_color=col,vmin=0.,vmax=1.)
    plt.axis('equal')
    plt.axis('off')

    plt.show()







# influence_sim(25,0.2,.1,500)
# infdif_sim(25,.2,0.,.1,500)
# polinf_sim(25,.2,500)
# sidif_sim(25,.2,1.,500)
# polinfluence_sim(25,0.2,.041,500)