def closeness_removal(g, recalculate=False):
    """
    Performs robustness analysis based on closeness centrality,
    on the network specified by infile using sequential (recalculate = True)
    or simultaneous (recalculate = False) approach. Returns a list
    with fraction of nodes removed, a list with the corresponding sizes of
    the largest component of the network, and the overall vulnerability
    of the network.
    """

    m = nx.closeness_centrality(g)
    l = sorted(m.items(), key=operator.itemgetter(1), reverse=True)
    x = []
    y = []

    dimension = fd.fractal_dimension(g, iterations=100, debug=False)
    n = len(g.nodes())
    x.append(0)
    y.append(dimension)

    for i in range(1, n-1):
        g.remove_node(l.pop(0)[0])
        if recalculate:
            m = nx.closeness_centrality(g)
            l = sorted(m.items(), key=operator.itemgetter(1),
                       reverse=True)
        dimension = fd.fractal_dimension(g, iterations=100, debug=False)
        x.append(i * 1. / n)
        y.append(dimension)

    return x, y
def compute_static_graph_statistics(G,start_time,end_time):
    verts = G.vertices
    n = len(verts)
    m = float(end_time - start_time)
    agg_statistics = [dict.fromkeys(verts,0),dict.fromkeys(verts,0),dict.fromkeys(verts,0)]*3
    avg_statistics = [dict.fromkeys(verts,0),dict.fromkeys(verts,0),dict.fromkeys(verts,0)]*3

    aggregated_graph = nx.Graph()
    aggregated_graph.add_nodes_from(verts)
    start_time = max(1,start_time)
    for t in xrange(start_time,end_time+1):
        aggregated_graph.add_edges_from(G.snapshots[t].edges_iter())
         
        dc = G.snapshots[t].degree()
        cc = nx.closeness_centrality(G.snapshots[t])
        bc = nx.betweenness_centrality(G.snapshots[t])
        for v in verts:
            avg_statistics[0][v] += dc[v]/(n-1.0)
            avg_statistics[1][v] += cc[v]
            avg_statistics[2][v] += bc[v]
    for v in verts:
        avg_statistics[0][v] = avg_statistics[0][v]/m
        avg_statistics[1][v] = avg_statistics[1][v]/m
        avg_statistics[2][v] = avg_statistics[2][v]/m
    
    dc = nx.degree_centrality(aggregated_graph)
    cc = nx.closeness_centrality(aggregated_graph)
    bc = nx.betweenness_centrality(aggregated_graph)
    for v in verts:
        agg_statistics[0][v] = dc[v]
        agg_statistics[1][v] = cc[v]
        agg_statistics[2][v] = bc[v]
    return (agg_statistics, avg_statistics)
def closeness_component(seed_num, graph_json_filename=None, graph_json_str=None):
  if graph_json_filename is None and graph_json_str is None:
    return []

  G = None
  if graph_json_str is None:
    G = util.load_graph(graph_json_filename=graph_json_filename)
  else:
    G = util.load_graph(graph_json_str=graph_json_str)

  components = list(nx.connected_components(G))
  components = filter(lambda x: len(x) > 0.1 * len(G), components)
  total_size = sum(map(lambda x: len(x), components))
  total_nodes = 0
  rtn = []
  for comp in components[1:]:
    num_nodes = int(float(len(comp)) / total_size * seed_num)
    component = G.subgraph(list(comp))
    clse_cent = nx.closeness_centrality(component)
    collector = collections.Counter(clse_cent)
    clse_cent = collector.most_common(num_nodes)
    rtn += map(lambda (x, y): x, clse_cent)
    total_nodes += num_nodes

  num_nodes = seed_num - total_nodes
  component = G.subgraph(list(components[0]))
  clse_cent = nx.closeness_centrality(component)
  collector = collections.Counter(clse_cent)
  clse_cent = collector.most_common(num_nodes)
  rtn += map(lambda (x, y): x, clse_cent)
  return rtn
Example #4
0
def closeness(infile, recalculate = False):
    """
    Performs robustness analysis based on closeness centrality,  
    on the network specified by infile using sequential (recalculate = True) 
    or simultaneous (recalculate = False) approach. Returns a list 
    with fraction of nodes removed, a list with the corresponding sizes of 
    the largest component of the network, and the overall vulnerability 
    of the network.
    """

    g = networkx.read_gml(infile)
    m = networkx.closeness_centrality(g)
    l = sorted(m.items(), key = operator.itemgetter(1), reverse = True)
    x = []
    y = []
    largest_component = max(networkx.connected_components(g), key = len)
    n = len(g.nodes())
    x.append(0)
    y.append(len(largest_component) * 1. / n)
    R = 0.0
    for i in range(1, n):
        g.remove_node(l.pop(0)[0])
        if recalculate:
            m = networkx.closeness_centrality(g)
            l = sorted(m.items(), key = operator.itemgetter(1), 
                       reverse = True)
        largest_component = max(networkx.connected_components(g), key = len)
        x.append(i * 1. / n)
        R += len(largest_component) * 1. / n
        y.append(len(largest_component) * 1. / n)
    return x, y, 0.5 - R / n
Example #5
0
def closeness_fracture(infile, outfile, fraction, recalculate = False):
    """
    Removes given fraction of nodes from infile network in reverse order of 
    closeness centrality (with or without recalculation of centrality values 
    after each node removal) and saves the network in outfile.
    """

    g = networkx.read_gml(infile)
    m = networkx.closeness_centrality(g)
    l = sorted(m.items(), key = operator.itemgetter(1), reverse = True)
    largest_component = max(networkx.connected_components(g), key = len)
    n = len(g.nodes())
    for i in range(1, n):
        g.remove_node(l.pop(0)[0])
        if recalculate:
            m = networkx.closeness_centrality(g)
            l = sorted(m.items(), key = operator.itemgetter(1), 
                       reverse = True)
        largest_component = max(networkx.connected_components(g), key = len)
        if i * 1. / n >= fraction:
            break
    components = networkx.connected_components(g)
    component_id = 1
    for component in components:
        for node in component:
            g.node[node]["component"] = component_id
        component_id += 1
    networkx.write_gml(g, outfile)
 def test_digraph(self):
     G = nx.path_graph(3, create_using=nx.DiGraph())
     c = nx.closeness_centrality(G)
     cr = nx.closeness_centrality(G.reverse())
     d = {0: 0.0, 1: 0.500, 2: 0.667}
     dr = {0: 0.667, 1: 0.500, 2: 0.0}
     for n in sorted(self.P3):
         assert_almost_equal(c[n], d[n], places=3)
         assert_almost_equal(cr[n], dr[n], places=3)
def closeness_centrality_distribution(G, return_dictionary=False):
    """Return a distribution of unweighted closeness centralities, as used in
    Borges, Coppersmith, Meyer, and Priebe 2011.
    If return_dictionary is specified, we return a dictionary indexed by
    vertex name, rather than just the values (as returned by default).
    """
    if return_dictionary:
        return nx.closeness_centrality(G)
    else:
        return nx.closeness_centrality(G).values()
 def test_wf_improved(self):
     G = nx.union(self.P4, nx.path_graph([4, 5, 6]))
     c = nx.closeness_centrality(G)
     cwf = nx.closeness_centrality(G, wf_improved=False)
     res = {0: 0.25, 1: 0.375, 2: 0.375, 3: 0.25,
            4: 0.222, 5: 0.333, 6: 0.222}
     wf_res = {0: 0.5, 1: 0.75, 2: 0.75, 3: 0.5,
               4: 0.667, 5: 1.0, 6: 0.667}
     for n in G:
         assert_almost_equal(c[n], res[n], places=3)
         assert_almost_equal(cwf[n], wf_res[n], places=3)
def closeness_apl(g, recalculate=False):
    """
    Performs robustness analysis based on closeness centrality,
    on the network specified by infile using sequential (recalculate = True)
    or simultaneous (recalculate = False) approach. Returns a list
    with fraction of nodes removed, a list with the corresponding sizes of
    the largest component of the network, and the overall vulnerability
    of the network.
    """

    m = networkx.closeness_centrality(g)
    l = sorted(m.items(), key=operator.itemgetter(1), reverse=True)
    x = []
    y = []

    average_path_length = 0.0
    number_of_components = 0
    n = len(g.nodes())

    for sg in networkx.connected_component_subgraphs(g):
        average_path_length += networkx.average_shortest_path_length(sg)
        number_of_components += 1

    average_path_length = average_path_length / number_of_components
    initial_apl = average_path_length

    x.append(0)
    y.append(average_path_length * 1. / initial_apl)

    r = 0.0
    for i in range(1, n):
        g.remove_node(l.pop(0)[0])
        if recalculate:
            m = networkx.closeness_centrality(g)
            l = sorted(m.items(), key=operator.itemgetter(1),
                       reverse=True)

        average_path_length = 0.0
        number_of_components = 0

        for sg in networkx.connected_component_subgraphs(g):
            if len(sg.nodes()) > 1:
                average_path_length += networkx.average_shortest_path_length(sg)
            number_of_components += 1

        average_path_length = average_path_length / number_of_components

        x.append(i * 1. / initial_apl)
        r += average_path_length * 1. / initial_apl
        y.append(average_path_length * 1. / initial_apl)
    return x, y, r / initial_apl
Example #10
0
	def closeness_centrality(self, withme=False, node=None, average=False):
		if node==None:
			if withme:
				my_dict = nx.closeness_centrality(self.mynet)
				new = {}
				new2={}
				for i in my_dict:
					new[self.id_to_name(i)] = my_dict[i]
					new2[i] = my_dict[i]
				if average:
					print "The average is " + str(round(sum(new.values())/float(len(new.values())),4))
				else:
					for i,j in new.items():
						print i, round(j,4)
					return new2
			else:
				my_dict = nx.closeness_centrality(self.no_ego_net)

				new = {}
				new2={}
				for i in my_dict:
					new[self.id_to_name(i)] = my_dict[i]
					new2[i] = my_dict[i]
				if average:
					print "The average is " + str(round(sum(new.values())/float(len(new.values())),4))
				else:
					for i,j in new.items():
						print i, round(j,4)
					return new2
		else:
			if withme:
				my_dict = nx.closeness_centrality(self.mynet)
				try:
					print "The coefficient for node "+str(node)+ "is "+ str(round(my_dict[node],4))
				except:
					try:
						print "The coefficient for node "+str(node)+ "is "+ str(my_dict[[self.name_to_id(node)]])
					except:
						print "Invalid node name"
			else:
				my_dict = nx.closeness_centrality(self.no_ego_net)
				try:
					print "The coefficient for node "+str(node)+ "is "+ str(round(my_dict[node],4))
				except:
					try:
						print "The coefficient for node "+str(node)+ "is "+ str(round(my_dict[[self.name_to_id(node)]],4))
					except:
						print "Invalid node name"
def analyze_graph(G):    
    #centralities and node metrics
    out_degrees = G.out_degree()
    in_degrees = G.in_degree()
    betweenness = nx.betweenness_centrality(G)
    eigenvector = nx.eigenvector_centrality_numpy(G)
    closeness = nx.closeness_centrality(G)
    pagerank = nx.pagerank(G)
    avg_neighbour_degree = nx.average_neighbor_degree(G)
    redundancy = bipartite.node_redundancy(G)
    load = nx.load_centrality(G)
    hits = nx.hits(G)
    vitality = nx.closeness_vitality(G)
    
    for name in G.nodes():
        G.node[name]['out_degree'] = out_degrees[name]
        G.node[name]['in_degree'] = in_degrees[name]
        G.node[name]['betweenness'] = betweenness[name]
        G.node[name]['eigenvector'] = eigenvector[name]
        G.node[name]['closeness'] = closeness[name]
        G.node[name]['pagerank'] = pagerank[name]
        G.node[name]['avg-neigh-degree'] = avg_neighbour_degree[name]
        G.node[name]['redundancy'] = redundancy[name]
        G.node[name]['load'] = load[name]
        G.node[name]['hits'] = hits[name]
        G.node[name]['vitality'] = vitality[name]
        
    #communities
    partitions = community.best_partition(G)
    for member, c in partitions.items():
        G.node[member]['community'] = c   
    
    return G
Example #12
0
    def test_networkx_roundtrip(self):
        print('\n---------- NetworkX Data Roundtrip Test Start -----------\n')

        g = nx.newman_watts_strogatz_graph(100, 3, 0.5)
        nodes = g.nodes()
        edges = g.edges()

        # Add some attributes
        g.graph['name'] = 'original'
        g.graph['density'] = nx.density(g)

        nx.set_node_attributes(g, 'betweenness', nx.betweenness_centrality(g))
        nx.set_node_attributes(g, 'degree', nx.degree(g))
        nx.set_node_attributes(g, 'closeness', nx.closeness_centrality(g))

        nx.set_edge_attributes(g, 'eb', nx.edge_betweenness(g))

        cyjs1 = util.from_networkx(g)
        g2 = util.to_networkx(cyjs1)

        self.assertEqual(len(g2.nodes()), len(nodes))
        self.assertEqual(len(g2.edges()), len(edges))

        edge_set = set(list(map(lambda x: (int(x[0]), int(x[1])), g2.edges())))
        self.assertEqual(0, len(edge_set.difference(set(edges))))

        node_original = g.node[1]
        node_generated = g2.node['1']

        print(node_original)
        print(node_generated)

        self.assertEqual(node_original['degree'], node_generated['degree'])
        self.assertEqual(node_original['betweenness'], node_generated['betweenness'])
        self.assertEqual(node_original['closeness'], node_generated['closeness'])
def generate_seeds(num_players, num_seeds, G):

	# Initialize see array to zeros
	seeds = np.zeros(num_seeds, dtype=np.int)
	
	neighbors = nx.to_dict_of_lists(G).values()

	m = nx.closeness_centrality(G);
	centralities = m.values();

	# Initialize seed array to zeros
	seeds = np.zeros(num_seeds, dtype=np.int)

	sum_centralities = [(None, None)] * len(neighbors)  # tuple of (node_id, degree)

	# Assign sum of centralities to each node in the graph (over all neighbors)
	for i in range(len(neighbors)):
		sum = 0.0
		this_nodes_neighbors = neighbors[i]
		for j in range(len(neighbors[i])):
			sum = sum + centralities[this_nodes_neighbors[j]]
		sum_centralities[i] = ( i, sum )
	sorted_centralities = sorted(sum_centralities, key=itemgetter(1), reverse=True)
	for i in range(num_seeds):
		seeds[i] = sorted_centralities[i][0]

	return seeds
Example #14
0
def closeness_centrality(graph, outfile, records=10):
    """ Perform a closeness centrality analysis on graph """
    ranking = nx.closeness_centrality(graph)
    ordering = sorted(ranking.items(), key=operator.itemgetter(1), reverse=True)[:records]
    print("Employee,Degree Centrality", file=outfile)
    for employee, rank in ordering:
      print("{},{}".format(employee, rank), file=outfile)
Example #15
0
    def test_networkx_roundtrip(self):
        print("\n---------- NetworkX Data Roundtrip Test Start -----------\n")

        g = nx.newman_watts_strogatz_graph(100, 3, 0.5)
        nodes = g.nodes()
        edges = g.edges()

        # Add some attributes
        g.graph["name"] = "original"
        g.graph["density"] = nx.density(g)

        nx.set_node_attributes(g, "betweenness", nx.betweenness_centrality(g))
        nx.set_node_attributes(g, "degree", nx.degree(g))
        nx.set_node_attributes(g, "closeness", nx.closeness_centrality(g))

        nx.set_edge_attributes(g, "eb", nx.edge_betweenness(g))

        cyjs1 = util.from_networkx(g)
        g2 = util.to_networkx(cyjs1)

        self.assertEqual(len(g2.nodes()), len(nodes))
        self.assertEqual(len(g2.edges()), len(edges))

        edge_set = set(list(map(lambda x: (int(x[0]), int(x[1])), g2.edges())))
        self.assertEqual(0, len(edge_set.difference(set(edges))))

        node_original = g.node[1]
        node_generated = g2.node["1"]

        print(node_original)
        print(node_generated)

        self.assertEqual(node_original["degree"], node_generated["degree"])
        self.assertEqual(node_original["betweenness"], node_generated["betweenness"])
        self.assertEqual(node_original["closeness"], node_generated["closeness"])
Example #16
0
def closeness_centrality_report(graph, n):
    """ reports on the top n most central individuals on the graph """
    pr = nx.closeness_centrality(graph)
    nodes = sorted(pr.items(), key=operator.itemgetter(1), reverse=True)[:n]
    print("degree centrality - top {} individuals".format(n))
    for n in nodes:
        print("  {:30}:\t{}".format(n[0], n[1]))
Example #17
0
def build_graph():
    pair_list = TwitterUser.get_top_100_pair()
    DG = nx.DiGraph()
    DG.add_edges_from([(foer, twitter_user) for twitter_user, foer in
        pair_list])
    betweenness = nx.betweenness_centrality(DG)
    closeness = nx.closeness_centrality(DG)
    edge_betweenness = nx.edge_betweenness(DG)
    clustering_co = nx.clustering(nx.Graph(DG))
    page_rank = nx.pagerank(DG)
    for twitter_id in DG.nodes():
        t = TwitterUser.get_by_id(twitter_id)
        node = DG.node[twitter_id]
        node['user_id'] = t.user_id
        node['label'] = t.scrn_name
        node['follower_count'] = t.foer_cnt
        node['friend_count'] = t.friend_cnt
        node['status_count'] = t.status_cnt
        node['location'] = t.location
        node['verified'] = t.verified
        node['twitter_age'] = (date.today() - t.created_at).days
        node['daily_tweet'] = t.status_cnt*1.0/node['twitter_age']
        node['indegree'] = len([(id, foer) for id, foer 
            in pair_list if id == twitter_id])
        node['outdegree'] = len([(id, foer) for id, foer 
            in pair_list if foer == twitter_id])
        node['cluster'] = clustering_co[twitter_id]
        node['betweenness'] = betweenness[twitter_id]
        node['closeness'] = closeness[twitter_id]
        node['page_rank'] = page_rank[twitter_id]
    for out_n, in_n in DG.edges():
        DG[out_n][in_n]['edge_betweenness'] = edge_betweenness[(out_n,in_n)]

    return DG
 def test_p3_closeness(self):
     c = nx.closeness_centrality(self.P3)
     d = {0: 0.667,
          1: 1.000,
          2: 0.667}
     for n in sorted(self.P3):
         assert_almost_equal(c[n], d[n], places=3)
Example #19
0
def relevant_stats(G):
	cloC = nx.closeness_centrality(G, distance = 'distance')
	betC = nx.betweenness_centrality(G, weight = 'distance')
	katC = nx.katz_centrality(G)
	eigC = nx.eigenvector_centrality(G)

	return
Example #20
0
def centrality_scores(vote_matrix, season_graph):
    deg = nx.degree(season_graph)
    deg = {k: round(v,1) for k,v in deg.iteritems()}

    close = nx.closeness_centrality(season_graph)
    close = {k: round(v,3) for k,v in close.iteritems()}

    btw = nx.betweenness_centrality(season_graph)
    btw = {k: round(v,3) for k,v in btw.iteritems()}

    eig = nx.eigenvector_centrality_numpy(season_graph)
    eig = {k: round(v,3) for k,v in eig.iteritems()}
    
    page = nx.pagerank(season_graph)
    page = {k: round(v,3) for k,v in page.iteritems()}

    # Add contestant placement (rank)
    order = list(vote_matrix.index)
    place_num = list(range(len(order)))
    place = {order[i]:i+1 for i in place_num}
    
    names = season_graph.nodes()

    # Build a table with centralities 
    table=[[name, deg[name], close[name], btw[name], eig[name], page[name], place[name]] for name in names]

    # Convert table to pandas df
    headers = ['name', 'deg', 'close', 'btw', 'eig', 'page', 'place']
    df = pd.DataFrame(table, columns=headers)
    df = df.sort_values(['page', 'eig', 'deg'], ascending=False)
    
    return df
def computeLeague(libSNA, session):
    d = nx.degree(libSNA.graph)
    c = nx.closeness_centrality(libSNA.graph)
    b = nx.betweenness_centrality(libSNA.graph)
    
    ds = sorted_map(d)
    cs = sorted_map(c)
    bs = sorted_map(b)
    
    weights = [.50, .30, .20]
    
    names1 = [x[0] for x in ds[:10]]
    names2 = [x[0] for x in cs[:10]]
    names3 = [x[0] for x in bs[:10]]
    
    names = list(set(names1) | set(names2) | set(names3))
    names = sorted(names, key = lambda name: (float(d[name])/ds[0][1])*weights[0] + (float(c[name])/cs[0][1])*weights[1] + (float(b[name])/bs[0][1])*weights[2], reverse = True)
    
    result = fbutils.fql(
        "SELECT uid, name FROM user WHERE uid IN ( " \
        "SELECT uid2 FROM friend WHERE uid1 = me() )",
        session['access_token'])
    
    nodes = {}
    for node in result:
        nodes[str(node['uid'])] = node['name']
    
    return [[name, nodes[name], str(d[name]), str(c[name]), str(b[name])] for name in names]
def closeness_neighbors(seed_num, graph=None, graph_json_filename=None, graph_json_str=None):
  if graph_json_filename is None and graph_json_str is None and graph is None:
    return []

  G = None
  if graph is not None:
    G = graph
  elif graph_json_str is None:
    G = util.load_graph(graph_json_filename=graph_json_filename)
  else:
    G = util.load_graph(graph_json_str=graph_json_str)

  clse_cent = nx.get_node_attributes(G, "centrality")
  if len(clse_cent) == 0:
    clse_cent = nx.closeness_centrality(G)
    nx.set_node_attributes(G, "centrality", clse_cent)
    print "closeness neighbors"

  collector = collections.Counter(clse_cent)
  clse_cent = collector.most_common(SURROUND_TOP)
  nodes = map(lambda (x, y): x, clse_cent)

  current_seed = 0
  rtn = []
  while current_seed < seed_num:
    current_node = nodes[current_seed % len(nodes)]
    current_neighbors = G.neighbors(current_node)
    rtn += random.sample(set(current_neighbors) - set(rtn) - set(nodes), 1)
    current_seed += 1

  return rtn
def generate_seeds(num_players, num_seeds, G):
	# Initialize see array to zeros
	seeds = np.zeros(num_seeds, dtype=np.int)
	neighbors = nx.to_dict_of_lists(G).values()
	m = nx.closeness_centrality(G)
	
	centralities = m.values()
	degrees = np.zeros(len(neighbors), dtype=np.int)  # tuple of (node_id, degree)
	for i in range(len(neighbors)):
		degrees[i] = len(neighbors[i])
		
	scores = [(None, None)] * len(neighbors)
	
	degree_max = max(degrees)
	cent_max = max(centralities)
	
	for i in range(len(neighbors)):
		norm_degree = float(degrees[i]) / degree_max
		norm_cent = float(centralities[i]) / cent_max
		scores[i] = (i, norm_degree * DEGREE_WEIGHT + norm_cent * CENT_WEIGHT)

	sorted_scores = sorted(scores, key=itemgetter(1), reverse=True)
	for i in range(num_seeds):
		seeds[i] = sorted_scores[i][0]

	return seeds
Example #24
0
def run_main(file):

    NumberOfStations=465
    print file
    adjmatrix = np.loadtxt(file,delimiter=' ',dtype=np.dtype('int32'))

    # for i in range (0,NumberOfStations):
    #     if(adjmatrix[i,i]==1):
    #         print "posicion: ["+str(i)+","+str(i)+"]"


    g = nx.from_numpy_matrix(adjmatrix, create_using = nx.MultiGraph())
    degree = g.degree()
    density = nx.density(g)
    degree_centrality = nx.degree_centrality(g)
    clossness_centrality = nx.closeness_centrality(g)
    betweenless_centrality = nx.betweenness_centrality(g)

    print degree
    print density
    print degree_centrality
    print clossness_centrality
    print betweenless_centrality
    #nx.draw(g)
#    np.savetxt(OutputFile, Matrix, delimiter=' ',newline='\n',fmt='%i')
def get_sna(path):
    sna_data = {}
    print 'Building relations graph'
    G = nx.read_gexf(path)
    print 'Nodes:', len(G.nodes())
    print 'Edges:', len(G.edges())
        
    print 'Calculating centralities:'
    print '    -degrees'
    degrees = G.degree()    
    for c in degrees:
        sna_data[c] = { 'degree':degrees[c],
                            'betweenness':0,
                            'closeness':0,
                            'eigenvector':0}
        
    print '    -betweenness'
    betweenness = nx.betweenness_centrality(G)
    for c in betweenness:
        sna_data[c]['betweenness'] = betweenness[c]
        
    print '    -closeness'
    closeness = nx.closeness_centrality(G)
    for c in closeness:
        sna_data[c]['closeness'] = closeness[c]
        
    print '    -eigenvector'
    eigenvector = nx.eigenvector_centrality_numpy(G)
    for c in eigenvector:
        sna_data[c]['eigenvector'] = eigenvector[c]
        
    return sna_data
def findVCCEdge(G):

	node1 = 0
	cen1 = float("inf")
	node2 = 0
	cen2 = float("inf")
	i = 0

	c = nx.closeness_centrality(G)

	cut = MinCut(G)

	while(True):
		for n in cut[0]:
			if((float(c[n]) < cen1) and (c[n] > 0)):
				cen1 = c[n]
				node1 = n

		for n in cut[1]:
			if((float(c[n]) < cen2) and (c[n] > 0)):
				cen2 = c[n]
				node2 = n
		
		edge = (node1 ,node2)

		if not(G.has_edge(*edge)):
			break
		else:
			cut[i%2].remove(edge[i%2])
			cen1 = float("inf")
			cen2 = float("inf")

		i+=1

	return edge
def plot_closeness_dist (graph, path):
    """Plot distribution of closeness centrality of the graph and save the figure
       at the given path. On X-axis we have closeness centrality values and on
       Y-axis we have percentage of the nodes that have that closeness value"""

    N = float(graph.order())
    node_to_closeness = nx.closeness_centrality(graph)
    closeness_to_percent = {}

    # calculate percentages of nodes with certain closeness value
    for node in node_to_closeness:
        closeness_to_percent[node_to_closeness[node]] = 1 + \
                closeness_to_percent.get(node_to_closeness[node], 0)
    for c in closeness_to_percent:
        closeness_to_percent[c] = closeness_to_percent[c] / N * 100

    x = sorted(closeness_to_percent.keys(), reverse = True)
    y = [closeness_to_percent[i] for i in x]

    plt.loglog(x, y, 'b-', marker = '.')
    plt.title("Closeness Centrality Distribution")
    plt.ylabel("Percentage")
    plt.xlabel("Closeness value")
    plt.axis('tight')
    plt.savefig(path)
def attack_based_max_closeness(G):
    """ Recalcuat closeness attack
    """
    n = G.number_of_nodes()
    tot_ND = [0] * (n+1)
    tot_T = [0] * (n+1)

    ND, ND_lambda = ECT.get_number_of_driver_nodes(G)
    tot_ND[0] = ND
    tot_T[0] = 0

    # remember when all the closeness have been zero for all nodes
    for i in range(1, n+1):
        all_closeness = nx.closeness_centrality(G)
        # get node with max betweenness       
        node = max(all_closeness, key=all_closeness.get)
        
        # remove all the edges adjacent to node
        if not nx.is_directed(G):   # undirected graph
            for key in G[node].keys():
                G.remove_edge(node, key)
        else:   # directed graph
            for x in [v for u, v in G.out_edges_iter(node)]:
                G.remove_edge(node, x)
            for x in [u for u, v in G.in_edges_iter(node)]:
                G.remove_edge(x, node)
        # calculate driver node number ND
        ND, ND_lambda = ECT.get_number_of_driver_nodes(G)
        tot_ND[i] = ND
        tot_T[i]  = i
    return (tot_ND, tot_T, Max_Betweenness_Zero_T)
Example #29
0
def nodal_summaryOut(G, n_nodes):
    """ Compute statistics for individual nodes

    Parameters
    ----------
    G: graph data output from mkgraph
    out: array output from nodal_summaryOut, so can keep appending
    cost: cost value for these calculations
    n_nodes: number of nodes in graph.

    Returns
    -------

    A dict with: lp, clust, b_cen, c_cen, nod_eff, loc_eff, degree."""
    
    lp = nodal_pathlengths(G,n_nodes) #can't use the regular one, because it substitutes [] for disconnected nodes
    clust = np.array(nx.clustering(G).values())
    b_cen = np.array(nx.betweenness_centrality(G).values())
    c_cen = np.array(nx.closeness_centrality(G).values())
    nod_eff=nodal_efficiency(G)
    loc_eff=local_efficiency(G)
    deg = G.degree().values()

    return dict(lp=lp, clust=clust, b_cen=b_cen, c_cen=c_cen, nod_eff=nod_eff,
                loc_eff=loc_eff,deg=deg)
def centrality(net):
    values ={}
    close = nx.closeness_centrality(net, normalized= True)
    eigen = nx.eigenvector_centrality_numpy(net)
    page = nx.pagerank(net)
    bet = nx.betweenness_centrality(net,normalized= True)
    flow_c = nx.current_flow_closeness_centrality(net,normalized= True)
    flow_b = nx.current_flow_betweenness_centrality(net,normalized= True)
    load = nx.load_centrality(net, normalized = True)
    com_c = nx.communicability_centrality(net)
    com_b = nx.communicability_betweenness_centrality(net, normalized= True)
    degree = net.degree()
    
    file3 = open("bl.csv",'w')
    for xt in [bet,load,degree,page,flow_b,com_c,com_b,eigen,close,flow_c]:#[impo,bet,flow_b,load,com_c,com_b] :
        for yt in [bet,load,degree,page,flow_b,com_c,com_b,eigen,close,flow_c]:#[impo,bet,flow_b,load,com_c,com_b] :
            corr(xt.values(),yt.values(),file3)
        print
        file3.write("\n")
    file3.close()
    #plt.plot(x,y, 'o')
    #plt.plot(x, m*x + c, 'r', label='Fitted line')
    #plt.show()
    #for key,item in close.iteritems() :
        #values[key] = [impo.get(key),bet.get(key),flow_b.get(key), load.get(key),com_c.get(key),com_b.get(key)]
        
    return values
Example #31
0
 def print_network_characteristics(self):
     time.sleep(time_to_analyze_network)
     print("degree_centrality: ", nx.degree_centrality(self.G))
     print("eigenvector_centrality: ", nx.eigenvector_centrality(self.G))
     print("closeness_centrality: ", nx.closeness_centrality(self.G))
     print("pagerank: ", nx.pagerank(self.G))
Example #32
0
    secondname.append(tere[9 - kk][1])

index = np.arange(len(firstname))
index = 9 - index
plt.bar(index, secondname)
plt.xlabel('Character', fontsize=10)
plt.ylabel('Degree Centrality', fontsize=10)
plt.xticks(index, firstname, fontsize=10, rotation=30)
plt.title('Bar Graph representing Degree Centrality of Top 10 Characters.')
plt.show()

#____________________________________________________________

print("\n\n __________________________________________________________\n")
print("\nCloseness Centrality:")
closeness_centrality = nx.closeness_centrality(G)

close_listt = []

for x in closeness_centrality:
    #print(x, "-->", degree_centrality[x])
    close_listt.append([x, round(closeness_centrality[x], 4)])

close_mat_vals = np.vstack(close_listt)
#mat_vals[0]=int(mat_vals[0])
#mat_vals.sort()
close_tere = sorted(close_mat_vals, key=lambda x: (x[1]), reverse=True)
#print(close_tere)
#print("___________________________")
#print(sorted(mat_vals[0][0],key=lambda x: (x[1]), reverse=True))
Example #33
0
# https://xkcd.com/353/

import networkx as nx
import matplotlib.pyplot as pl

# Load the Karate (pronounced Care-Ra-Tae) data
karateGraph = nx.read_gml('data/karate.gml')

# Calculate the degree, eigenvector, closeness, and betweenness centrality for each node
degCen = nx.degree_centrality(karateGraph)
eigCen = nx.eigenvector_centrality(karateGraph)
clsCen = nx.closeness_centrality(karateGraph)
betCen = nx.betweenness_centrality(karateGraph)

# Print it all to console
print('Degree centrality per node:')
for i in degCen:
    print(i, ':', degCen[i])
print('\nEigenvector centrality per node:')
for i in eigCen:
    print(i, ':', eigCen[i])
print('\nCloseness centrality per node:')
for i in clsCen:
    print(i, ':', clsCen[i])
print('\nBetweenness centrality per node:')
for i in betCen:
    print(i, ':', betCen[i])
Example #34
0
def four_main_characters(movie1, G1, movie2, G2):
    G1_direct_weighted = G1[0]
    G1_direct_no_weights = G1[1]
    G1_undircet_weighted = G1[2]
    G1_undircet_no_weights = G1[3]
    #
    G2_direct_weighted = G2[0]
    G2_direct_no_weights = G2[1]
    G2_undircet_weighted = G2[2]
    G2_undircet_no_weights = G2[3]

    # Part C
    print(movie1, ' Nodes:\n', G1_direct_weighted.number_of_nodes(), '\n')
    print(movie1, ' Edges:\n', G1_direct_weighted.number_of_edges(), '\n')
    print(movie2, ' Nodes:\n', G2_direct_weighted.number_of_nodes(), '\n')
    print(movie2, ' Edges:\n', G2_direct_weighted.number_of_edges(), '\n')

    # Part D         - First Movie -
    # page Rank - made in b                 #1
    # katz - works on directed only         #2
    print(movie1, 'Direct_no_weights - Katz:\n', (sorted(
        (nx.katz_centrality(G1_direct_no_weights).items()),
        key=lambda kv: (kv[1], kv[0]),
        reverse=True)), '\n')
    print(movie1, 'Direct_weighted - Katz:\n', (sorted(
        (nx.katz_centrality(G1_direct_weighted).items()),
        key=lambda kv: (kv[1], kv[0]),
        reverse=True)), '\n')

    # DegreeCentrality                      #3
    print(movie1, '- Direct_no_weights - degree_centrality:\n', (sorted(
        (nx.degree_centrality(G1_direct_no_weights).items()),
        key=lambda kv: (kv[1], kv[0]),
        reverse=True)), '\n')
    print(movie1, '- Direct_weighted - degree_centrality:\n', (sorted(
        (nx.degree_centrality(G1_direct_weighted).items()),
        key=lambda kv: (kv[1], kv[0]),
        reverse=True)), '\n')
    print(movie1, '- Undircet_no_weights - degree_centrality:\n', (sorted(
        (nx.degree_centrality(G1_undircet_no_weights).items()),
        key=lambda kv: (kv[1], kv[0]),
        reverse=True)), '\n')
    print(movie1, '- Undircet_weighted - degree_centrality:\n', (sorted(
        (nx.degree_centrality(G1_undircet_weighted).items()),
        key=lambda kv: (kv[1], kv[0]),
        reverse=True)), '\n')
    # ClosenessCentrality                   #4
    print(movie1, '- Direct_no_weights - Closeness Centrality:\n', (sorted(
        (nx.closeness_centrality(G1_direct_no_weights).items()),
        key=lambda kv: (kv[1], kv[0]),
        reverse=True)), '\n')
    print(movie1, '- Direct_weighted - Closeness Centrality:\n', (sorted(
        (nx.closeness_centrality(G1_direct_weighted).items()),
        key=lambda kv: (kv[1], kv[0]),
        reverse=True)), '\n')
    print(movie1, '- Undircet_no_weights - Closeness Centrality:\n', (sorted(
        (nx.closeness_centrality(G1_undircet_no_weights).items()),
        key=lambda kv: (kv[1], kv[0]),
        reverse=True)), '\n')
    print(movie1, '- Undircet_weighted - Closeness Centrality:\n', (sorted(
        (nx.closeness_centrality(G1_undircet_weighted).items()),
        key=lambda kv: (kv[1], kv[0]),
        reverse=True)), '\n')
    # Load Centrality                         #5
    nx.load_centrality(G1_direct_no_weights)
    print(movie1, '- Direct_no_weights - Load Centrality:\n', (sorted(
        (nx.load_centrality(G1_direct_no_weights).items()),
        key=lambda kv: (kv[1], kv[0]),
        reverse=True)), '\n')
    print(movie1, '- Direct_weighted - Load Centrality:\n', (sorted(
        (nx.load_centrality(G1_direct_weighted).items()),
        key=lambda kv: (kv[1], kv[0]),
        reverse=True)), '\n')
    print(movie1, '- Undircet_no_weights - Load Centrality:\n', (sorted(
        (nx.load_centrality(G1_undircet_no_weights).items()),
        key=lambda kv: (kv[1], kv[0]),
        reverse=True)), '\n')
    print(movie1, '- Undircet_weighted - Load Centrality:\n', (sorted(
        (nx.load_centrality(G1_undircet_weighted).items()),
        key=lambda kv: (kv[1], kv[0]),
        reverse=True)), '\n')
    #eigenvector_centrality
    print(movie1, '- dircet_weighted - eigenvector_centrality:\n', (sorted(
        (nx.eigenvector_centrality(G1_direct_weighted).items()),
        key=lambda kv: (kv[1], kv[0]),
        reverse=True)), '\n')
    print(movie1, '- dircet_unweighted - eigenvector_centrality:\n', (sorted(
        (nx.eigenvector_centrality(G1_direct_no_weights).items()),
        key=lambda kv: (kv[1], kv[0]),
        reverse=True)), '\n')
    print(movie1, '- undircet_weighted - eigenvector_centrality:\n', (sorted(
        (nx.eigenvector_centrality(G1_undircet_weighted).items()),
        key=lambda kv: (kv[1], kv[0]),
        reverse=True)), '\n')
    print(movie1, '- undircet_unweighted - eigenvector_centrality:\n', (sorted(
        (nx.eigenvector_centrality(G1_undircet_no_weights).items()),
        key=lambda kv: (kv[1], kv[0]),
        reverse=True)), '\n')
    # BetweennessCentrality
    print(movie1, '- dircet_weighted - betweenness_centrality:\n', (sorted(
        (nx.betweenness_centrality(G1_direct_weighted).items()),
        key=lambda kv: (kv[1], kv[0]),
        reverse=True)), '\n')
    print(movie1, '- dircet_unweighted - betweenness_centrality:\n', (sorted(
        (nx.betweenness_centrality(G1_direct_no_weights).items()),
        key=lambda kv: (kv[1], kv[0]),
        reverse=True)), '\n')
    print(movie1, '- undircet_weighted - betweenness_centrality:\n', (sorted(
        (nx.betweenness_centrality(G1_undircet_weighted).items()),
        key=lambda kv: (kv[1], kv[0]),
        reverse=True)), '\n')
    print(movie1, '- undircet_unweighted - betweenness_centrality:\n', (sorted(
        (nx.betweenness_centrality(G1_undircet_no_weights).items()),
        key=lambda kv: (kv[1], kv[0]),
        reverse=True)), '\n')

    #           -   Second Movie    -
    # page Rank - made in b                 #1
    # katz - works on directed only         #2
    #print(movie2, 'Direct_no_weights - Katz:\n', (sorted((nx.katz_centrality(G2_direct_no_weights).items()), key=lambda kv: (kv[1], kv[0]), reverse=True)), '\n')
    #print(movie2, 'Direct_weighted - Katz:\n', (sorted((nx.katz_centrality(G2_undircet_weighted).items()), key=lambda kv: (kv[1], kv[0]), reverse=True)), '\n')
    # DegreeCentrality                      #3
    print(movie2, '- Direct_no_weights - degree_centrality:\n', (sorted(
        (nx.degree_centrality(G2_direct_no_weights).items()),
        key=lambda kv: (kv[1], kv[0]),
        reverse=True)), '\n')
    print(movie2, '- Direct_weighted - degree_centrality:\n', (sorted(
        (nx.degree_centrality(G2_direct_weighted).items()),
        key=lambda kv: (kv[1], kv[0]),
        reverse=True)), '\n')
    print(movie2, '- Undircet_no_weights - degree_centrality:\n', (sorted(
        (nx.degree_centrality(G2_undircet_no_weights).items()),
        key=lambda kv: (kv[1], kv[0]),
        reverse=True)), '\n')
    print(movie2, '- Undircet_weighted - degree_centrality:\n', (sorted(
        (nx.degree_centrality(G2_undircet_weighted).items()),
        key=lambda kv: (kv[1], kv[0]),
        reverse=True)), '\n')
    # ClosenessCentrality                   #4
    print(movie2, '- Direct_no_weights - Closeness Centrality:\n', (sorted(
        (nx.closeness_centrality(G2_direct_no_weights).items()),
        key=lambda kv: (kv[1], kv[0]),
        reverse=True)), '\n')
    print(movie2, '- Direct_weighted - Closeness Centrality:\n', (sorted(
        (nx.closeness_centrality(G2_direct_weighted).items()),
        key=lambda kv: (kv[1], kv[0]),
        reverse=True)), '\n')
    print(movie2, '- Undircet_no_weights - Closeness Centrality:\n', (sorted(
        (nx.closeness_centrality(G2_undircet_no_weights).items()),
        key=lambda kv: (kv[1], kv[0]),
        reverse=True)), '\n')
    print(movie2, '- Undircet_weighted - Closeness Centrality:\n', (sorted(
        (nx.closeness_centrality(G2_undircet_weighted).items()),
        key=lambda kv: (kv[1], kv[0]),
        reverse=True)), '\n')
    # Load Centrality                         #5
    print(movie2, '- Direct_no_weights - Load Centrality:\n', (sorted(
        (nx.load_centrality(G2_direct_no_weights).items()),
        key=lambda kv: (kv[1], kv[0]),
        reverse=True)), '\n')
    print(movie2, '- Direct_weighted - Load Centrality:\n', (sorted(
        (nx.load_centrality(G2_direct_weighted).items()),
        key=lambda kv: (kv[1], kv[0]),
        reverse=True)), '\n')
    print(movie2, '- Undircet_no_weights - Load Centrality:\n', (sorted(
        (nx.load_centrality(G2_undircet_no_weights).items()),
        key=lambda kv: (kv[1], kv[0]),
        reverse=True)), '\n')
    print(movie2, '- Undircet_weighted - Load Centrality:\n', (sorted(
        (nx.load_centrality(G2_undircet_weighted).items()),
        key=lambda kv: (kv[1], kv[0]),
        reverse=True)), '\n')
    # eigenvector_centrality
    print(movie2, '- dircet_weighted - eigenvector_centrality:\n', (sorted(
        (nx.eigenvector_centrality(G2_direct_weighted).items()),
        key=lambda kv: (kv[1], kv[0]),
        reverse=True)), '\n')
    print(movie2, '- dircet_unweighted - eigenvector_centrality:\n', (sorted(
        (nx.eigenvector_centrality(G2_direct_no_weights).items()),
        key=lambda kv: (kv[1], kv[0]),
        reverse=True)), '\n')
    print(movie2, '- undircet_weighted - eigenvector_centrality:\n', (sorted(
        (nx.eigenvector_centrality(G2_undircet_weighted).items()),
        key=lambda kv: (kv[1], kv[0]),
        reverse=True)), '\n')
    print(movie2, '- undircet_unweighted - eigenvector_centrality:\n', (sorted(
        (nx.eigenvector_centrality(G2_undircet_no_weights).items()),
        key=lambda kv: (kv[1], kv[0]),
        reverse=True)), '\n')
    # BetweennessCentrality
    print(movie2, '- dircet_weighted - betweenness_centrality:\n', (sorted(
        (nx.betweenness_centrality(G2_direct_weighted).items()),
        key=lambda kv: (kv[1], kv[0]),
        reverse=True)), '\n')
    print(movie2, '- dircet_unweighted - betweenness_centrality:\n', (sorted(
        (nx.betweenness_centrality(G2_direct_no_weights).items()),
        key=lambda kv: (kv[1], kv[0]),
        reverse=True)), '\n')
    print(movie2, '- undircet_weighted - betweenness_centrality:\n', (sorted(
        (nx.betweenness_centrality(G2_undircet_weighted).items()),
        key=lambda kv: (kv[1], kv[0]),
        reverse=True)), '\n')
    print(movie2, '- undircet_unweighted - betweenness_centrality:\n', (sorted(
        (nx.betweenness_centrality(G2_undircet_no_weights).items()),
        key=lambda kv: (kv[1], kv[0]),
        reverse=True)), '\n')
Example #35
0
nx.bfs_tree(g, 0).edges()

# Connectivity
nx.is_connected(g)
sorted(nx.connected_components(g))
nx.node_connectivity(g)
nx.minimum_node_cut(g)
nx.edge_connectivity(g)
nx.minimum_edge_cut(g)
nx.minimum_node_cut(g, 4, 0)
nx.minimum_edge_cut(g, 4, 0)

# Centrality
nx.degree_centrality(g)  # degree/number of nodes
nx.closeness_centrality(g)  # number of nodes/sum of shortest path lengths
nx.betweenness_centrality(
    g, normalized=True, endpoints=False,
    k=5)  # sum of (# of shortest path through the node/# of shortest paths)
nx.edge_betweenness_centrality(
    g, normalized=True
)  # sum of (# of shortest path through the edge/# of shortest paths)

# Karate Club Data
k = nx.karate_club_graph()
nx.draw_networkx(k, pos=nx.random_layout(k))
nx.draw_networkx(k, pos=nx.circular_layout(k))
nx.draw_networkx(k, pos=nx.circular_layout(k), edge_color='0.4', alpha=0.1)

# Page Rank
# Assuming equal share at the beginning and recalculate based on the share received till convergence
Example #36
0
def closeness(graph):
    return nx.closeness_centrality(graph)
Example #37
0
def incremental_closeness_centrality(G,
                                     edge,
                                     prev_cc=None,
                                     insertion=True,
                                     wf_improved=True):
    r"""Incremental closeness centrality for nodes.

    Compute closeness centrality for nodes using level-based work filtering
    as described in Incremental Algorithms for Closeness Centrality by Sariyuce et al.

    Level-based work filtering detects unnecessary updates to the closeness
    centrality and filters them out.

    ---
    From "Incremental Algorithms for Closeness Centrality":

    Theorem 1: Let :math:`G = (V, E)` be a graph and u and v be two vertices in V
    such that there is no edge (u, v) in E. Let :math:`G' = (V, E \cup uv)`
    Then :math:`cc[s] = cc'[s]` if and only if :math:`\left|dG(s, u) - dG(s, v)\right| \leq 1`.

    Where :math:`dG(u, v)` denotes the length of the shortest path between
    two vertices u, v in a graph G, cc[s] is the closeness centrality for a
    vertex s in V, and cc'[s] is the closeness centrality for a
    vertex s in V, with the (u, v) edge added.
    ---

    We use Theorem 1 to filter out updates when adding or removing an edge.
    When adding an edge (u, v), we compute the shortest path lengths from all
    other nodes to u and to v before the node is added. When removing an edge,
    we compute the shortest path lengths after the edge is removed. Then we
    apply Theorem 1 to use previously computed closeness centrality for nodes
    where :math:`\left|dG(s, u) - dG(s, v)\right| \leq 1`. This works only for
    undirected, unweighted graphs; the distance argument is not supported.

    Closeness centrality [1]_ of a node `u` is the reciprocal of the
    sum of the shortest path distances from `u` to all `n-1` other nodes.
    Since the sum of distances depends on the number of nodes in the
    graph, closeness is normalized by the sum of minimum possible
    distances `n-1`.

    .. math::

        C(u) = \frac{n - 1}{\sum_{v=1}^{n-1} d(v, u)},

    where `d(v, u)` is the shortest-path distance between `v` and `u`,
    and `n` is the number of nodes in the graph.

    Notice that higher values of closeness indicate higher centrality.

    Parameters
    ----------
    G : graph
      A NetworkX graph

    edge : tuple
      The modified edge (u, v) in the graph.

    prev_cc : dictionary
      The previous closeness centrality for all nodes in the graph.

    insertion : bool, optional
      If True (default) the edge was inserted, otherwise it was deleted from the graph.

    wf_improved : bool, optional (default=True)
      If True, scale by the fraction of nodes reachable. This gives the
      Wasserman and Faust improved formula. For single component graphs
      it is the same as the original formula.

    Returns
    -------
    nodes : dictionary
      Dictionary of nodes with closeness centrality as the value.

    See Also
    --------
    betweenness_centrality, load_centrality, eigenvector_centrality,
    degree_centrality, closeness_centrality

    Notes
    -----
    The closeness centrality is normalized to `(n-1)/(|G|-1)` where
    `n` is the number of nodes in the connected part of graph
    containing the node.  If the graph is not completely connected,
    this algorithm computes the closeness centrality for each
    connected part separately.

    References
    ----------
    .. [1] Freeman, L.C., 1979. Centrality in networks: I.
       Conceptual clarification.  Social Networks 1, 215--239.
       https://doi.org/10.1016/0378-8733(78)90021-7
    .. [2] Sariyuce, A.E. ; Kaya, K. ; Saule, E. ; Catalyiirek, U.V. Incremental
       Algorithms for Closeness Centrality. 2013 IEEE International Conference on Big Data
       http://sariyuce.com/papers/bigdata13.pdf
    """
    if prev_cc is not None and set(prev_cc.keys()) != set(G.nodes()):
        raise NetworkXError("prev_cc and G do not have the same nodes")

    # Unpack edge
    (u, v) = edge
    path_length = nx.single_source_shortest_path_length

    if insertion:
        # For edge insertion, we want shortest paths before the edge is inserted
        du = path_length(G, u)
        dv = path_length(G, v)

        G.add_edge(u, v)
    else:
        G.remove_edge(u, v)

        # For edge removal, we want shortest paths after the edge is removed
        du = path_length(G, u)
        dv = path_length(G, v)

    if prev_cc is None:
        return nx.closeness_centrality(G)

    nodes = G.nodes()
    closeness_centrality = {}
    for n in nodes:
        if n in du and n in dv and abs(du[n] - dv[n]) <= 1:
            closeness_centrality[n] = prev_cc[n]
        else:
            sp = path_length(G, n)
            totsp = sum(sp.values())
            len_G = len(G)
            _closeness_centrality = 0.0
            if totsp > 0.0 and len_G > 1:
                _closeness_centrality = (len(sp) - 1.0) / totsp
                # normalize to number of nodes-1 in connected part
                if wf_improved:
                    s = (len(sp) - 1.0) / (len_G - 1)
                    _closeness_centrality *= s
            closeness_centrality[n] = _closeness_centrality

    # Leave the graph as we found it
    if insertion:
        G.remove_edge(u, v)
    else:
        G.add_edge(u, v)

    return closeness_centrality
Example #38
0
def make_table_analysis_players_team(G, team_data):
    """

    :param G: graph
    :param team_data: dic[ pos] = Player
    :return:
    """

    #EN LOS DATOS SÍ SE CUENTA EL GOL Y FAIL PARA CÁLCULOS

    r = 5

    degree = G.degree
    degree_centrality = nx.degree_centrality(G)
    betweenness_centrality = nx.betweenness_centrality(G, weight='weight')
    closeness_centrality = nx.closeness_centrality(G, distance='weight')
    eigenvector_centrality = nx.eigenvector_centrality(G, weight='weight')
    #katz_centrality = nx.katz_centrality(G, tol= 0.05,weight='weight')
    pagerank_centrality = nx.pagerank(G, weight='weight')
    clustering_data = nx.clustering(G, weight='weight')

    metric_def = metric_defensive(G)
    set_metric = []

    print(
        "------------------------------*******TABLA******----------------------------"
    )
    print(
        "TEAM" + "&",
        "POS" + "&",
        "NAME" + "&",
        "$\delta$" + "&",
        "$\delta_{c}$" + "&",
        "$\delta_{bet}$" + "&",
        "$Clos$" + "&",
        "$E_c$" + "&",
        #"$Katz_c$" + "&",
        "$PR$" + "&",
        "Clus" + "&",
        "M_{df}" + "\\")
    for p_pos in team_data:
        player = team_data[p_pos]
        player_name = player.name
        player_pos = player.pos
        player_degree = str(round(degree[player_pos], r)) + "&"
        player_degree_c = str(round(degree_centrality[player_pos], r)) + "&"
        player_betwee_c = str(round(betweenness_centrality[player_pos],
                                    r)) + "&"
        player_closeness_c = str(round(closeness_centrality[player_pos],
                                       r)) + "&"
        player_eigen_c = str(round(eigenvector_centrality[player_pos],
                                   r)) + "&"
        #player_katz_c = str( round(katz_centrality[player_pos],r) ) + "&"
        player_pagerank = str(round(pagerank_centrality[player_pos], r)) + "&"
        player_clustering = str(round(clustering_data[player_pos], r)) + "&"
        player_df = str(metric_def[player_pos]) + "\\" + "\\"
        print(
            player.name_team + "&",
            player_pos + "&",
            player_name + "&",
            player_degree,
            player_degree_c,
            player_betwee_c,
            player_closeness_c,
            player_eigen_c,
            #player_katz_c,
            player_pagerank,
            player_clustering,
            player_df)
        set_metric.append(metric_def[player_pos])
    return set_metric
Example #39
0
 def test_p3_closeness(self):
     c = nx.closeness_centrality(self.P3)
     d = {0: 0.667, 1: 1.000, 2: 0.667}
     for n in sorted(self.P3):
         assert_almost_equal(c[n], d[n], places=3)
Example #40
0
 def test_k5_closeness(self):
     c = nx.closeness_centrality(self.K5)
     d = {0: 1.000, 1: 1.000, 2: 1.000, 3: 1.000, 4: 1.000}
     for n in sorted(self.K5):
         assert_almost_equal(c[n], d[n], places=3)
def main_dirG():
    #Directed Graph
    G = nx.DiGraph(weight=0)
    for t in range(len(sent_tokens)):
        word_tokens = RegexpTokenizer(r'\w+').tokenize(sent_tokens[t])
        #capital to lower case
        word_tokens = [ele.lower() for ele in word_tokens]
        #print(word_tokens) # 테스트용 문장
        G.add_nodes_from(word_tokens)
        #graph generation for each sentence's words

        #G = nx.complete_graph(len(word_tokens)) #all pairs of nodes get edges

        ######## edge 연결 시도 다른 방식 - 1개 선택!
        #n1=n2=1
        #G = adjOne(G,word_tokens)
        #n1=n1=2
        #G = adjTwo(G,word_tokens)
        #all connected
        G = dir_completeG(G, word_tokens)

    ###########
    #print(len(G.nodes()))
    #print(G.edges())
    #degHistogram(G)
    ###########

    #Eigenvector Centrality
    eigen_central = nx.eigenvector_centrality(G)

    #print('eigenvector centrality: ',eigen_central)

    eigen_key_max = max(eigen_central.keys(), key=(lambda k: eigen_central[k]))
    eigen_key_min = min(eigen_central.keys(), key=(lambda k: eigen_central[k]))
    print('max eigen centrality: ', eigen_key_max,
          eigen_central[eigen_key_max])
    print('min eigen centrality: ', eigen_key_min,
          eigen_central[eigen_key_min])

    with open('eigenvector_centrality.txt', 'w+') as file:
        file.write(json.dumps(eigen_central))
    file.close()

    #############################################################################
    #Katz Centrality ; https://networkx.github.io/documentation/stable/reference/algorithms/generated/networkx.algorithms.centrality.katz_centrality.html#networkx.algorithms.centrality.katz_centrality
    katz_central = nx.katz_centrality(
        G, alpha=0.001, max_iter=20000
    )  #default alpha = 0.1 -> not converge! so, used 0.01 / for complete graph: alpha=0.001
    #print(katz_central)

    katz_key_max = max(katz_central.keys(), key=(lambda k: katz_central[k]))
    katz_key_min = min(katz_central.keys(), key=(lambda k: katz_central[k]))
    print('max katz centrality: ', katz_key_max, katz_central[katz_key_max])
    print('min katz centrality: ', katz_key_min, katz_central[katz_key_min])

    with open('katz_centrality.txt', 'w+') as file:
        file.write(json.dumps(katz_central))
    file.close()

    #############################################################################

    #Page Rank
    page_rank = nx.pagerank(G)
    #print(page_rank)

    pr_key_max = max(page_rank.keys(), key=(lambda k: page_rank[k]))
    pr_key_min = min(page_rank.keys(), key=(lambda k: page_rank[k]))
    print('max page rank: ', pr_key_max, page_rank[pr_key_max])
    print('min page rank: ', pr_key_min, page_rank[pr_key_min])

    with open('pagerank.txt', 'w+') as file:
        file.write(json.dumps(page_rank))
    file.close()

    #############################################################################

    #Closeness Centrality
    closeness_central = nx.closeness_centrality(G)
    #print(closeness_central)

    close_key_max = max(closeness_central.keys(),
                        key=(lambda k: closeness_central[k]))
    close_key_min = min(closeness_central.keys(),
                        key=(lambda k: closeness_central[k]))
    print('max closeness centrality: ', close_key_max,
          closeness_central[close_key_max])
    print('min closeness centrality: ', close_key_min,
          closeness_central[close_key_min])

    with open('closeness_centrality.txt', 'w+') as file:
        file.write(json.dumps(closeness_central))
    file.close()

    #############################################################################
    #Betweenness Centrality
    betweenness_central = nx.betweenness_centrality(G)
    #print(betweenness_central)
    bc_key_max = max(betweenness_central.keys(),
                     key=(lambda k: betweenness_central[k]))
    bc_key_min = min(betweenness_central.keys(),
                     key=(lambda k: betweenness_central[k]))
    print('max betweenness centrality: ', bc_key_max,
          betweenness_central[bc_key_max])
    print('min betweenness centrality: ', bc_key_min,
          betweenness_central[bc_key_min])

    with open('betweenness_centrality.txt', 'w+') as file:
        file.write(json.dumps(betweenness_central))
    file.close()
    '''
Example #42
0
                                   name='degree_centrality',
                                   values=dict_degree_centrality)

            print("Calculating: eigevector centrality")
            dict_eigenvector_centrality = nx.eigenvector_centrality(
                Gtc, weight='weight')
            nx.set_node_attributes(Gtc,
                                   name='eigenvector_centrality',
                                   values=dict_eigenvector_centrality)

            #print('Calculating: katz centrality')
            #dict_katz_centrality = nx.katz_centrality(Gt, weight='weight')
            #nx.set_node_attributes(Gt, name='katz_centrality', values=dict_katz_centrality)

            print('Calculating: closeness centrality')
            dict_closeness_centrality = nx.closeness_centrality(
                Gtc, distance='distance')

            print(
                'Calculating: betweenness centrality (this may take a while)')
            dict_betweenness_centrality = nx.betweenness_centrality(Gtc)
            nx.set_node_attributes(Gtc,
                                   name='betweenness_centrality',
                                   values=dict_betweenness_centrality)

            print('Calculating: page rank')
            dict_page_rank = nx.pagerank(Gtc)
            nx.set_node_attributes(Gtc, name='pagerank', values=dict_page_rank)

            print('Calculating: clustering (this may take a while)')
            dict_clustering = nx.clustering(Gtc, weight='weight')
            nx.set_node_attributes(Gtc,
Example #43
0
def get_map_map(tab, dataset_selection, traveler, centrality_index, label_size,
                node_size):

    map_graph = diogenetGraph("map", dataset_selection, dataset_selection,
                              'locations_data.csv', 'travels_blacklist.csv')

    if tab == "map_maps":
        data = None
        mapa = None

        map_graph.current_centrality_index = centrality_index
        if traveler == "All" or traveler == []:
            map_graph.edges_filter = []
        else:
            for m_filter in traveler:
                map_graph.set_edges_filter(m_filter)
            map_graph.create_subgraph()

        data = map_graph.get_map_data(min_weight=node_size[0],
                                      max_weight=node_size[1])
        df = pd.DataFrame(data)

        #Folium base map configurations
        url = 'https://basemap.nationalmap.gov/arcgis/rest/services/USGSImageryOnly/MapServer/tile/{z}/{y}/{x}'
        attribution = '&copy; <a href="https://developers.arcgis.com/">ArcGIS</a> '

        base_map = folium.Map(location=[35, 30],
                              min_zoom=4,
                              max_zoom=7,
                              zoom_start=6,
                              tiles=None,
                              attr=attribution)

        folium.TileLayer(
            tiles=url,
            show=True,
            attr=attribution,
            min_zoom=3,
            max_zoom=8,
            name="USGS - The National Map",
        ).add_to(base_map)

        markers_source = folium.FeatureGroup(name='Source').add_to(base_map)
        markers_target = folium.FeatureGroup(name='Target').add_to(base_map)

        for i in range(len(df['Source'])):
            #source marker
            popup_source = folium.Popup(str(
                "{} \n (lat = {:.1f}, \n lon={:.1f})".format(
                    df['Source'][i], df["SourceLatitude"][i],
                    df["SourceLongitude"][i])),
                                        parse_html=True,
                                        max_width=450)
            tooltip_source = "{} (lat = {:.1f}, lon={:.1f})".format(
                df['Source'][i], df["SourceLatitude"][i],
                df["SourceLongitude"][i])

            markers_source.add_child(
                folium.CircleMarker(location=(float(df["SourceLatitude"][i]),
                                              float(df["SourceLongitude"][i])),
                                    popup=popup_source,
                                    tooltip=tooltip_source,
                                    fill=True,
                                    color=df["SourceColor"][i],
                                    fill_color=df["SourceColor"][i],
                                    radius=int(df["SourceSize"][i] * 1.3)))

            #target marker
            popup_target = folium.Popup(str(
                "{} \n (lat = {:.1f}, \n lon={:.1f})".format(
                    df['Destination'][i], df["DestLatitude"][i],
                    df["DestLongitude"][i])),
                                        parse_html=True,
                                        max_width=450)
            tooltip_target = "{} (lat = {:.1f}, lon={:.1f})".format(
                df['Destination'][i], df["DestLatitude"][i],
                df["DestLongitude"][i])

            markers_target.add_child(
                folium.CircleMarker(location=(float(df["DestLatitude"][i]),
                                              float(df["DestLongitude"][i])),
                                    popup=popup_target,
                                    tooltip=tooltip_target,
                                    fill=True,
                                    color=df["DestinationColor"][i],
                                    fill_color=df["DestinationColor"][i],
                                    radius=int(df["DestinationSize"][i] *
                                               1.3)))

            # travel info line
            folium.PolyLine(
                [[df["SourceLatitude"][i], df["SourceLongitude"][i]],
                 [df["DestLatitude"][i], df["DestLongitude"][i]]],
                popup=folium.Popup(str("{} travel from {} to  {}".format(
                    df['Philosopher'][i], df["Source"][i],
                    df["Destination"][i])),
                                   parse_html=True,
                                   max_width=450),
                tooltip="{} travel from {} to  {}".format(
                    df['Philosopher'][i], df["Source"][i],
                    df["Destination"][i]),
                color='#ced4da',
                weight=1.5,
            ).add_to(base_map)

        suffix = ".html"
        temp_file_name = next(tempfile._get_candidate_names()) + suffix
        full_filename = os.path.abspath(
            os.path.join(os.path.dirname(__file__), '.', 'assets',
                         temp_file_name))

        #saving folium .html file
        folium.LayerControl().add_to(base_map)
        base_map.save(full_filename)
        return [
            html.Iframe(src=app.get_asset_url(f'{temp_file_name}'),
                        style={
                            "height": "100%",
                            "width": "100%"
                        })
        ]

    if tab == "map_metrics":

        if traveler == "All":
            all_travelers = sorted(list(set(map_graph.get_edges_names())))
            map_graph.edges_filter = []
            for m_filter in all_travelers:
                map_graph.set_edges_filter(m_filter)
            map_graph.create_subgraph()
        elif traveler == []:
            all_travelers = sorted(list(set(map_graph.get_edges_names())))
            map_graph.edges_filter = []
            for m_filter in all_travelers:
                map_graph.set_edges_filter(m_filter)
            map_graph.create_subgraph()
        else:
            map_graph.edges_filter = []
            for m_filter in traveler:
                map_graph.set_edges_filter(m_filter)
            map_graph.create_subgraph()

        def round_list_values(list_in):
            return [round(value, 4) for value in list_in]

        calculated_network_betweenness = list(
            pd.DataFrame.from_dict(
                nx.betweenness_centrality(
                    map_graph.networkx_subgraph).items())[1])
        calculated_network_degree = list(
            pd.DataFrame.from_dict(
                nx.degree_centrality(map_graph.networkx_subgraph).items())[1])
        calculated_network_closeness = list(
            pd.DataFrame.from_dict(
                nx.closeness_centrality(
                    map_graph.networkx_subgraph).items())[1])
        calculated_network_eigenvector = list(
            pd.DataFrame.from_dict(
                nx.eigenvector_centrality(
                    map_graph.networkx_subgraph).items())[1])

        calculated_degree = [
            round(value) for value in map_graph.calculate_degree()
        ]
        calculated_betweenness = round_list_values(
            map_graph.calculate_betweenness())
        calculated_closeness = round_list_values(
            map_graph.calculate_closeness())
        calculated_eigenvector = round_list_values(
            map_graph.calculate_eigenvector())

        dict_map_data_tables = {
            "City": map_graph.get_vertex_names(),
            "Degree": round_list_values(calculated_network_degree),
            "Betweeness": round_list_values(calculated_network_eigenvector),
            "Closeness": round_list_values(calculated_network_closeness),
            "Eigenvector": round_list_values(calculated_network_eigenvector),
        }

        df_map_data_tables = pd.DataFrame(dict_map_data_tables)

        dt_map = dash_table.DataTable(id='table-map',
                                      columns=[{
                                          "name": i,
                                          "id": i,
                                          'deletable': True
                                      } for i in df_map_data_tables.columns],
                                      style_data_conditional=[{
                                          'if': {
                                              'row_index': 'odd'
                                          },
                                          'backgroundColor':
                                          'rgb(220, 220, 220)',
                                      }],
                                      style_cell={'textAlign': 'center'},
                                      style_header={'textAlign': 'center'},
                                      page_current=0,
                                      page_size=20,
                                      page_action='custom',
                                      sort_mode='single',
                                      sort_by=[{
                                          'column_id': 'Degree',
                                          'direction': 'asc'
                                      }])

        foot_note = html.Div(children=[
            html.Span('Metrics obtained using the algorithms of '),
            html.A('Networkx',
                   href='https://networkx.org/documentation/stable/',
                   target='_blank')
        ])
        return [
            html.H6('Centrality Scores', className="mt-1 mb-2"),
            html.Hr(className='py-0'), dt_map, foot_note
        ]

    if tab == "map_graphs":

        map_graph.current_centrality_index = centrality_index

        graph_layout = "fr"
        pvis_graph = None

        if traveler == "All" or traveler == []:
            #map_graph.edges_filter = []
            pvis_graph = map_graph.get_pyvis(
                min_weight=node_size[0],
                max_weight=node_size[1],
                min_label_size=label_size[0],
                max_label_size=label_size[1],
                layout=graph_layout,
            )
        else:
            for m_filter in traveler:
                map_graph.set_edges_filter(m_filter)
            map_graph.create_subgraph()
            pvis_graph = map_graph.get_pyvis()

        if pvis_graph:
            suffix = ".html"
            temp_file_name = next(tempfile._get_candidate_names()) + suffix
            full_filename = os.path.abspath(
                os.path.join(os.path.dirname(__file__), '.', 'assets',
                             temp_file_name))

            pvis_graph.write_html(full_filename)
            return [
                html.Iframe(src=app.get_asset_url(f'{temp_file_name}'),
                            style={
                                "height": "100%",
                                "width": "100%"
                            })
            ]
Example #44
0
 def average_closeness_centrality(self):
     closeness = nx.closeness_centrality(self.graph)
     closeness = np.fromiter(closeness.values(), dtype=float)
     return np.mean(closeness)
    def get_net_features(self):
        num_nodes = nx.number_of_nodes(self._G)  # 节点数
        num_edges = nx.number_of_edges(self._G)  # 连接数
        density = nx.density(self._G)  # 密度
        clusterint_coefficient = nx.average_clustering(self._G)  # 平均聚集系数/局部聚集系数
        transitivity = nx.transitivity(self._G)  # 传递性/全局聚集系数
        reciprocity = nx.reciprocity(self._G)  # 互惠性

        print('节点个数: ', num_nodes)
        print('连接数: ', num_edges)
        print('密度: ', density)
        print('局部聚集系数: ', clusterint_coefficient)
        print('全局聚集系数: ', transitivity)
        print('互惠性: ', reciprocity)
        # 中心度计算
        out_degree = nx.out_degree_centrality(self._G)  # 出度中心度
        in_degree = nx.in_degree_centrality(self._G)  # 入度中心度
        out_closeness = nx.closeness_centrality(self._G.reverse())  # 出接近中心度
        in_closeness = nx.closeness_centrality(self._G)  # 入接近中心度
        betweenness = nx.betweenness_centrality(self._G)  # 中介中心度

        print('出度中心度: ', out_degree)
        print('入度中心度: ', in_degree)
        print('出接近中心度: ', out_closeness)
        print('入接近中心度: ', in_closeness)
        print('中介中心度: ', betweenness)
        # 中心势计算
        # 在networkx中没有似乎没有直接计算中心势的方法,这里我们可以根据公式自己计算。
        max_ = 0
        s = 0
        for out in out_degree.keys():
            if out_degree[out] > max_: max_ = out_degree[out]
            s = s + out_degree[out]
        print('出度中心势:', (num_nodes * max_ - s) / (num_nodes - 2))

        max_ = 0
        s = 0
        for in_ in in_degree.keys():
            if in_degree[in_] > max_: max_ = in_degree[in_]
            s = s + in_degree[in_]
        print('入度中心势:', (num_nodes * max_ - s) / (num_nodes - 2))

        max_ = 0
        s = 0
        for b in out_closeness.keys():
            if (out_closeness[b] > max_): max_ = out_closeness[b]
            s = s + out_closeness[b]
        print('出接近中心势:', (num_nodes * max_ - s) / (num_nodes - 1) / (num_nodes - 2) * (2 * num_nodes - 3))

        max_ = 0
        s = 0
        for b in in_closeness.keys():
            if (in_closeness[b] > max_): max_ = in_closeness[b]
            s = s + in_closeness[b]
        print('入接近中心势:', (num_nodes * max_ - s) / (num_nodes - 1) / (num_nodes - 2) * (2 * num_nodes - 3))

        max_ = 0
        s = 0
        for b in betweenness.keys():
            if (betweenness[b] > max_): max_ = betweenness[b]
            s = s + betweenness[b]
        print('中介中心势:', (num_nodes * max_ - s) / (num_nodes - 1))
Example #46
0
def update_table(
    page_current,
    page_size,
    sort_by,
    dataset_selection,
    traveler,
):
    map_graph = diogenetGraph("map", dataset_selection, dataset_selection,
                              'locations_data.csv', 'travels_blacklist.csv')

    if traveler == "All":
        all_travelers = sorted(list(set(map_graph.get_edges_names())))
        map_graph.edges_filter = []
        for m_filter in all_travelers:
            map_graph.set_edges_filter(m_filter)
        map_graph.create_subgraph()
    elif traveler == []:
        all_travelers = sorted(list(set(map_graph.get_edges_names())))
        map_graph.edges_filter = []
        for m_filter in all_travelers:
            map_graph.set_edges_filter(m_filter)
        map_graph.create_subgraph()
    else:
        map_graph.edges_filter = []
        for m_filter in traveler:
            map_graph.set_edges_filter(m_filter)
        map_graph.create_subgraph()

    def round_list_values(list_in):
        return [round(value, 4) for value in list_in]

    #Networkx Metrics
    calculated_networkx_betweenness = list(
        pd.DataFrame.from_dict(
            nx.betweenness_centrality(map_graph.networkx_subgraph).items())[1])
    calculated_networkx_degree = list(
        pd.DataFrame.from_dict(
            nx.degree_centrality(map_graph.networkx_subgraph).items())[1])
    calculated_networkx_closeness = list(
        pd.DataFrame.from_dict(
            nx.closeness_centrality(map_graph.networkx_subgraph).items())[1])
    calculated_networkx_eigenvector = list(
        pd.DataFrame.from_dict(
            nx.eigenvector_centrality(map_graph.networkx_subgraph).items())[1])

    # igraph Metrics
    calculated_degree = [
        round(value) for value in map_graph.calculate_degree()
    ]
    calculated_betweenness = round_list_values(
        map_graph.calculate_betweenness())
    calculated_closeness = round_list_values(map_graph.calculate_closeness())
    calculated_eigenvector = round_list_values(
        map_graph.calculate_eigenvector())

    dict_map_data_tables = {
        "City": map_graph.get_vertex_names(),
        "Degree": round_list_values(calculated_networkx_degree),
        "Betweeness": round_list_values(calculated_networkx_eigenvector),
        "Closeness": round_list_values(calculated_networkx_closeness),
        "Eigenvector": round_list_values(calculated_networkx_eigenvector),
    }

    df_map_data_tables = pd.DataFrame(dict_map_data_tables)

    #print(sort_by)
    if len(sort_by):
        dff = df_map_data_tables.sort_values(
            sort_by[0]['column_id'],
            ascending=sort_by[0]['direction'] == 'desc',
            inplace=False)
    else:
        # No sort is applied
        dff = df_map_data_tables

    return dff.iloc[page_current * page_size:(page_current + 1) *
                    page_size].to_dict('records')
Example #47
0
def Closeness_Centrality(G):
    Closeness_Centrality = nx.closeness_centrality(G)
    #print "Closeness_Centrality:", sorted(Closeness_Centrality.iteritems(), key=lambda d:d[1], reverse = True)
    return Closeness_Centrality
Example #48
0
if needs_eig:
    print "[+] Computing eigenvector centrality..."
    eig = pd.Series(nx.eigenvector_centrality_numpy(graph),
                    name='eigenvector_centrality')

if needs_clu:
    print "[+] Computing clustering coefficient..."
    clu = pd.Series(nx.clustering(graph), name='clustering_coefficient')

if needs_tri:
    print "[+] Computing number of triangles..."
    tri = pd.Series(nx.triangles(graph), name='triangles')

if needs_clo:
    print "[+] Computing closeness centrality..."
    clo = pd.Series(nx.closeness_centrality(graph),
                    name='closeness_centrality')

if needs_pag:
    print "[+] Computing pagerank..."
    pag = pd.Series(nx.pagerank(graph), name='pagerank')

if needs_squ:
    print "[+] Computing square clustering..."
    squ = pd.Series(nx.square_clustering(graph),
                    name='square_clustering_coefficient')

# Always run: connected components
_cco = {}
for i, c in enumerate(nx.connected_components(graph)):
    for e in c:
import pandas as pd
import sys
import networkx as nx
import csv
import matplotlib.pyplot as plt
from create_graph import cr_grph

G = nx.Graph()
(G, edge_data) = cr_grph()
cc_Dict = dict(nx.closeness_centrality(G))
cc_Dict_order = dict(
    sorted(cc_Dict.items(), key=lambda kv: kv[1], reverse=True))
keys = []
keys = list(cc_Dict_order.keys())
"""
for i in range(100):
      print("ranking",keys[i])
"""
with open('closeness_dict_rec.csv', 'w') as csv_file:
    writer = csv.writer(csv_file)
    for key, value in cc_Dict.items():
        writer.writerow([key, value])
Example #50
0
result = []
i = 58
G = readNet('resultFullData.txt', 927590400 + 1209600 * i)
G = updateWeight(G, 927590400 + 1209600 * i)
#G = generateProbabilisticGraph()

#x = betweenness_centrality(G)
x = closeness_centrality(G)
sorted_x = {
    key: rank
    for rank, key in enumerate(sorted(x, key=x.get, reverse=True), 1)
}

G = changeWeight(G)
#y = nx.betweenness_centrality(G,weight = 'weight')
y = nx.closeness_centrality(G, distance='weight')
sorted_y = {
    key: rank
    for rank, key in enumerate(sorted(y, key=y.get, reverse=True), 1)
}
difference = 0
for key in sorted_x:
    plt.scatter(sorted_x[key], sorted_y[key])
    difference = abs(sorted_x[key] - sorted_y[key]) + difference
plt.show()
print difference

G = updateWeight(G, 927590400 + 1209600 * i)
#G = changeWeight(G)
result = None
for i in range(100):
Example #51
0
def extended_stats(G,
                   connectivity=False,
                   anc=False,
                   ecc=False,
                   bc=False,
                   cc=False):
    """
    Calculate extended topological stats and metrics for a graph.

    Many of these algorithms have an inherently high time complexity. Global
    topological analysis of large complex networks is extremely time consuming
    and may exhaust computer memory. Consider using function arguments to not
    run metrics that require computation of a full matrix of paths if they
    will not be needed.

    Parameters
    ----------
    G : networkx multidigraph
    connectivity : bool
        if True, calculate node and edge connectivity
    anc : bool
        if True, calculate average node connectivity
    ecc : bool
        if True, calculate shortest paths, eccentricity, and topological metrics
        that use eccentricity
    bc : bool
        if True, calculate node betweenness centrality
    cc : bool
        if True, calculate node closeness centrality

    Returns
    -------
    stats : dict
        dictionary of network measures containing the following elements (some
        only calculated/returned optionally, based on passed parameters):

          - avg_neighbor_degree
          - avg_neighbor_degree_avg
          - avg_weighted_neighbor_degree
          - avg_weighted_neighbor_degree_avg
          - degree_centrality
          - degree_centrality_avg
          - clustering_coefficient
          - clustering_coefficient_avg
          - clustering_coefficient_weighted
          - clustering_coefficient_weighted_avg
          - pagerank
          - pagerank_max_node
          - pagerank_max
          - pagerank_min_node
          - pagerank_min
          - node_connectivity
          - node_connectivity_avg
          - edge_connectivity
          - eccentricity
          - diameter
          - radius
          - center
          - periphery
          - closeness_centrality
          - closeness_centrality_avg
          - betweenness_centrality
          - betweenness_centrality_avg

    """

    stats = {}
    full_start_time = time.time()

    # create a DiGraph from the MultiDiGraph, for those metrics that require it
    G_dir = nx.DiGraph(G)

    # create an undirected Graph from the MultiDiGraph, for those metrics that
    # require it
    G_undir = nx.Graph(G)

    # get the largest strongly connected component, for those metrics that
    # require strongly connected graphs
    G_strong = get_largest_component(G, strongly=True)

    # average degree of the neighborhood of each node, and average for the graph
    avg_neighbor_degree = nx.average_neighbor_degree(G)
    stats['avg_neighbor_degree'] = avg_neighbor_degree
    stats['avg_neighbor_degree_avg'] = sum(
        avg_neighbor_degree.values()) / len(avg_neighbor_degree)

    # average weighted degree of the neighborhood of each node, and average for
    # the graph
    avg_weighted_neighbor_degree = nx.average_neighbor_degree(G,
                                                              weight='length')
    stats['avg_weighted_neighbor_degree'] = avg_weighted_neighbor_degree
    stats['avg_weighted_neighbor_degree_avg'] = sum(
        avg_weighted_neighbor_degree.values()) / len(
            avg_weighted_neighbor_degree)

    # degree centrality for a node is the fraction of nodes it is connected to
    degree_centrality = nx.degree_centrality(G)
    stats['degree_centrality'] = degree_centrality
    stats['degree_centrality_avg'] = sum(
        degree_centrality.values()) / len(degree_centrality)

    # calculate clustering coefficient for the nodes
    stats['clustering_coefficient'] = nx.clustering(G_undir)

    # average clustering coefficient for the graph
    stats['clustering_coefficient_avg'] = nx.average_clustering(G_undir)

    # calculate weighted clustering coefficient for the nodes
    stats['clustering_coefficient_weighted'] = nx.clustering(G_undir,
                                                             weight='length')

    # average clustering coefficient (weighted) for the graph
    stats['clustering_coefficient_weighted_avg'] = nx.average_clustering(
        G_undir, weight='length')

    # pagerank: a ranking of the nodes in the graph based on the structure of
    # the incoming links
    pagerank = nx.pagerank(G_dir, weight='length')
    stats['pagerank'] = pagerank

    # node with the highest page rank, and its value
    pagerank_max_node = max(pagerank, key=lambda x: pagerank[x])
    stats['pagerank_max_node'] = pagerank_max_node
    stats['pagerank_max'] = pagerank[pagerank_max_node]

    # node with the lowest page rank, and its value
    pagerank_min_node = min(pagerank, key=lambda x: pagerank[x])
    stats['pagerank_min_node'] = pagerank_min_node
    stats['pagerank_min'] = pagerank[pagerank_min_node]

    # if True, calculate node and edge connectivity
    if connectivity:
        start_time = time.time()

        # node connectivity is the minimum number of nodes that must be removed
        # to disconnect G or render it trivial
        stats['node_connectivity'] = nx.node_connectivity(G_strong)

        # edge connectivity is equal to the minimum number of edges that must be
        # removed to disconnect G or render it trivial
        stats['edge_connectivity'] = nx.edge_connectivity(G_strong)
        log('Calculated node and edge connectivity in {:,.2f} seconds'.format(
            time.time() - start_time))

    # if True, calculate average node connectivity
    if anc:
        # mean number of internally node-disjoint paths between each pair of
        # nodes in G, i.e., the expected number of nodes that must be removed to
        # disconnect a randomly selected pair of non-adjacent nodes
        start_time = time.time()
        stats['node_connectivity_avg'] = nx.average_node_connectivity(G)
        log('Calculated average node connectivity in {:,.2f} seconds'.format(
            time.time() - start_time))

    # if True, calculate shortest paths, eccentricity, and topological metrics
    # that use eccentricity
    if ecc:
        # precompute shortest paths between all nodes for eccentricity-based
        # stats
        start_time = time.time()
        sp = {
            source: dict(
                nx.single_source_dijkstra_path_length(G_strong,
                                                      source,
                                                      weight='length'))
            for source in G_strong.nodes()
        }

        log('Calculated shortest path lengths in {:,.2f} seconds'.format(
            time.time() - start_time))

        # eccentricity of a node v is the maximum distance from v to all other
        # nodes in G
        eccentricity = nx.eccentricity(G_strong, sp=sp)
        stats['eccentricity'] = eccentricity

        # diameter is the maximum eccentricity
        diameter = nx.diameter(G_strong, e=eccentricity)
        stats['diameter'] = diameter

        # radius is the minimum eccentricity
        radius = nx.radius(G_strong, e=eccentricity)
        stats['radius'] = radius

        # center is the set of nodes with eccentricity equal to radius
        center = nx.center(G_strong, e=eccentricity)
        stats['center'] = center

        # periphery is the set of nodes with eccentricity equal to the diameter
        periphery = nx.periphery(G_strong, e=eccentricity)
        stats['periphery'] = periphery

    # if True, calculate node closeness centrality
    if cc:
        # closeness centrality of a node is the reciprocal of the sum of the
        # shortest path distances from u to all other nodes
        start_time = time.time()
        closeness_centrality = nx.closeness_centrality(G, distance='length')
        stats['closeness_centrality'] = closeness_centrality
        stats['closeness_centrality_avg'] = sum(
            closeness_centrality.values()) / len(closeness_centrality)
        log('Calculated closeness centrality in {:,.2f} seconds'.format(
            time.time() - start_time))

    # if True, calculate node betweenness centrality
    if bc:
        # betweenness centrality of a node is the sum of the fraction of
        # all-pairs shortest paths that pass through node
        start_time = time.time()
        betweenness_centrality = nx.betweenness_centrality(G, weight='length')
        stats['betweenness_centrality'] = betweenness_centrality
        stats['betweenness_centrality_avg'] = sum(
            betweenness_centrality.values()) / len(betweenness_centrality)
        log('Calculated betweenness centrality in {:,.2f} seconds'.format(
            time.time() - start_time))

    log('Calculated extended stats in {:,.2f} seconds'.format(time.time() -
                                                              full_start_time))
    return stats
    f = open('40_shortest_path.txt', 'w')
    f.writelines(s)
    f.close()

    # 4.1 centrality computing
    #   Degree
    #Compute the degree centrality for nodes.
    degree_centrality = nx.degree_centrality(soGraph.iniGraph())
    s = str(degree_centrality)
    f = open('41_degree_centrality.txt', 'w')
    f.writelines(s)
    f.close()

    #   Closness
    # print(nx.closeness_centrality(soGraph.iniGraph()))            #Compute closeness centrality for nodes.
    closeness_centrality = nx.closeness_centrality(soGraph.iniGraph())
    s = str(closeness_centrality)
    f = open('42_closeness_centrality.txt', 'w')
    f.writelines(s)
    f.close()

    #   Betweennes
    # print(nx.betweenness_centrality(soGraph.iniGraph()))        #Compute the shortest-path betweenness centrality for nodes.
    # print(nx.edge_betweenness_centrality(soGraph.iniGraph()))    #Compute betweenness centrality for edges.
    betweenness_centrality = nx.betweenness_centrality(soGraph.iniGraph())
    s = str(betweenness_centrality)
    f = open('43_betweenness_centrality.txt', 'w')
    f.writelines(s)
    f.close()

    # 4.2 clustering
Example #53
0
# ### 4.4.2. Betweeness Centrality
# The assumption of important nodes have connect other nodes is made here.

# In[17]:

bc_nodes = nx.betweenness_centrality(H)
bc_nodes_sorted = sorted(bc_nodes.items(), key=lambda x: x[1], reverse=True)
print bc_nodes_sorted[:10]

# ### 4.4.3. Closeness Centrality
# The assumption of important nodes close to each other is made here.

# In[18]:

cc_nodes = nx.closeness_centrality(H)
cc_nodes_sorted = sorted(cc_nodes.items(), key=lambda x: x[1], reverse=True)
print cc_nodes_sorted[:10]

# ## 4.5. Community Detection
# Community in a social network is the sub network with more intra connectivity and less inter connectivty with other communities.<br>
# **Girvan Newman algorithm** is used to detect the communities in a network. It is based on *edge betweenness*. The edges which are connecting two communities tend to have high betweenness. The algorithms detect communities by repeatedly removing the edges with high betweenness.
#
# Let us use this community detection concept to form more than 200 groups.

# In[46]:


def edge_to_remove(G):
    eb_cent = nx.edge_betweenness_centrality(
        G)  #Key-edges as tuple ;Value-edge betweeness
print(
    f"There are {graph.number_of_nodes()} nodes and {graph.number_of_edges()} \
edges present in the Graph after pruning edges")

if PARTITIONS:
    import metis
    colors = ['#f22613', '#3498db']
    (edgecuts, parts) = metis.part_graph(graph, 2)
    for i, p in enumerate(parts):
        graph.nodes[i]['group'] = p
        graph.nodes[i]['color'] = colors[p]

if CENTRALITIES:
    deg_centrality = nx.degree_centrality(graph)
    clos_centrality = nx.closeness_centrality(graph)
    betw_centrality = nx.betweenness_centrality(graph)

    nx.set_node_attributes(G=graph,
                           name='betw_centrality',
                           values=betw_centrality)
    nx.set_node_attributes(G=graph,
                           name='clos_centrality',
                           values=clos_centrality)
    nx.set_node_attributes(G=graph,
                           name='deg_centrality',
                           values=deg_centrality)

if PARTITIONS and PARTISANSHIP:
    for node in list(graph.nodes):
        deg_0 = 0
Example #55
0
    def test_duplicated_modification(self):
        G = nx.complete_graph(5, create_using=self.Graph)
        ret = nx.builtin.closeness_centrality(G)
        assert ret == {0: 1.0, 1: 1.0, 2: 1.0, 3: 1.0, 4: 1.0}

        # test add node
        G.add_node(5)
        ret = nx.builtin.closeness_centrality(G)
        assert ret == {0: 0.8, 1: 0.8, 2: 0.8, 3: 0.8, 4: 0.8, 5: 0.0}

        # test add edge
        G.add_edge(4, 5)
        ret = nx.builtin.closeness_centrality(G)
        expect1 = {
            0: 0.8,
            1: 0.8,
            2: 0.8,
            3: 0.8,
            4: 0.8,
            5: 0.555556,
        }
        expect2 = {
            0: 0.833333,
            1: 0.833333,
            2: 0.833333,
            3: 0.833333,
            4: 1.0,
            5: 0.555556,
        }
        if G.is_directed():
            for n in ret:
                assert almost_equal(ret[n], expect1[n], places=4)
        else:
            for n in ret:
                assert almost_equal(ret[n], expect2[n], places=4)

        # test remove edge
        G.remove_edge(4, 5)
        ret = nx.builtin.closeness_centrality(G)
        assert ret == {0: 0.8, 1: 0.8, 2: 0.8, 3: 0.8, 4: 0.8, 5: 0.0}

        # test remove node
        G.remove_node(5)
        ret = nx.builtin.closeness_centrality(G)
        assert ret == {0: 1.0, 1: 1.0, 2: 1.0, 3: 1.0, 4: 1.0}

        # test update
        for e in G.edges:
            G.edges[e]["weight"] = 2
        ret = nx.builtin.closeness_centrality(G, distance="weight")
        assert ret == {0: 0.5, 1: 0.5, 2: 0.5, 3: 0.5, 4: 0.5}

        # test copy
        G2 = G.copy()
        ret = nx.builtin.closeness_centrality(G2)
        assert ret == {0: 1.0, 1: 1.0, 2: 1.0, 3: 1.0, 4: 1.0}

        # test reverse
        if G.is_directed():
            rG = G.reverse()
            ret = nx.builtin.closeness_centrality(rG)
            assert ret == {0: 1.0, 1: 1.0, 2: 1.0, 3: 1.0, 4: 1.0}

        # to_directed/to_undirected
        if G.is_directed():
            udG = G.to_undirected()
            ret = nx.builtin.closeness_centrality(udG)
            assert ret == {0: 1.0, 1: 1.0, 2: 1.0, 3: 1.0, 4: 1.0}
        else:
            dG = G.to_directed()
            ret = nx.builtin.closeness_centrality(dG)
            assert ret == {0: 1.0, 1: 1.0, 2: 1.0, 3: 1.0, 4: 1.0}

        # sub_graph
        sG = G.subgraph([0, 1, 2])
        ret = nx.builtin.closeness_centrality(sG)
        assert ret == {0: 1.0, 1: 1.0, 2: 1.0}

        esG = G.edge_subgraph([(0, 1), (1, 2), (2, 3)])
        ret = nxa.closeness_centrality(esG)
        expect1 = {
            0: 0.000,
            1: 0.333333,
            2: 0.444444,
            3: 0.500,
        }
        expect2 = {
            0: 0.5,
            1: 0.75,
            2: 0.75,
            3: 0.5,
        }
        if G.is_directed():
            for n in ret:
                assert almost_equal(ret[n], expect1[n], places=4)
        else:
            for n in ret:
                assert almost_equal(ret[n], expect2[n], places=4)
def undirected_page_rank(q, D, p, sim, threshold, selection, priors=None):

    topic_content = q_topics_dict[q]
    topic_string = '' + topic_content.title + ' ' + topic_content.desc + ' ' + topic_content.narr
    topic_processed = preprocessing(topic_string)

    # Pre-select p documents to be ranked by page rank
    if selection == 'bm25':
        docs, _, _ = bm25(topic_processed, D, p)
    elif selection == 'svm':
        docs, _, _ = ranking(topic_processed, D, p)
    elif selection == 'rrf':
        docs_bm, all_bm, _ = bm25(topic_processed, D, p)
        docs_svm, all_svm, _ = ranking(topic_processed, D, p)
        docs = rrf_score(list(dict.fromkeys(all_bm + all_svm)), all_svm,
                         all_bm, p)
    elif selection == 'boolean':
        docs = boolean_query(topic_processed, D,
                             3)[:p]  #choose to use query of k = 3

    selected_col = {}
    for doc_score in docs:
        if selection != 'boolean':
            doc = doc_score[0]
        else:
            doc = doc_score
        selected_col[doc] = D[doc]

    # Build graph for selected documents
    G = build_graph(selected_col, sim, threshold)

    # Rank documents based on built graph and use corresponding priors
    if priors == 'bm25' and selection == 'bm25':
        page_scores = nx.pagerank(G, personalization=dict(docs), max_iter=100)
    elif priors == 'bm25':
        docs, _, _ = bm25(topic_processed, D, p)
        page_scores = nx.pagerank(G, personalization=dict(docs), max_iter=100)
    elif priors == 'rrf' and selection == 'rrf':
        page_scores = nx.pagerank(G, personalization=dict(docs), max_iter=100)
    elif priors == 'rrf':
        docs_bm, all_bm, _ = bm25(topic_processed, D, p)
        docs_svm, all_svm, _ = ranking(topic_processed, D, p)
        docs = rrf_score(list(dict.fromkeys(all_bm + all_svm)), all_svm,
                         all_bm, p)
        page_scores = nx.pagerank(G, personalization=dict(docs), max_iter=100)
    elif priors == 'degree_cent':
        deg_centrality = nx.degree_centrality(G)
        if sum(deg_centrality.values()) != 0:
            page_scores = nx.pagerank(G,
                                      personalization=deg_centrality,
                                      max_iter=100)
        else:
            page_scores = nx.pagerank(G)
    elif priors == 'close_cent':
        close_centrality = nx.closeness_centrality(G)
        if sum(close_centrality.values()) != 0:
            page_scores = nx.pagerank(G,
                                      personalization=close_centrality,
                                      max_iter=100)
        else:
            page_scores = nx.pagerank(G)
    elif priors == 'bet_cent':
        bet_centrality = nx.betweenness_centrality(G,
                                                   normalized=True,
                                                   endpoints=False)
        if sum(bet_centrality.values()) != 0:
            page_scores = nx.pagerank(G,
                                      personalization=bet_centrality,
                                      max_iter=100)
        else:
            page_scores = nx.pagerank(G)
    else:
        page_scores = nx.pagerank(G)

    docs_id_sorted = sorted(page_scores, key=page_scores.get, reverse=True)

    res = []
    [res.append((doc_id, page_scores[doc_id])) for doc_id in docs_id_sorted]

    return res[:p]
Example #57
0
if input == 'b':
    bet_cen = nx.betweenness_centrality(G, weight="weight", normalized=False)
    y = max(bet_cen.items(), key=operator.itemgetter(1))[0]
    print(y)
    print("# Betweenness centrality:" + str(bet_cen))
    for node in G:
        if bet_cen[node] == bet_cen[y]:
            color_map.append('blue')
            node_sizes.append(750)
        else:
            color_map.append('red')
            node_sizes.append(250)

if input == 'c':
    clo_cen = nx.closeness_centrality(G, distance="weight")
    z = max(clo_cen.items(), key=operator.itemgetter(1))[0]
    print(z)
    print("# Closeness centrality:" + str(clo_cen))
    for node in G:
        if clo_cen[node] == clo_cen[z]:
            color_map.append('blue')
            node_sizes.append(750)
        else:
            color_map.append('red')
            node_sizes.append(250)

pos = nx.get_node_attributes(G, 'pos')
nx.draw(G, pos, node_size=node_sizes, with_labels=True, node_color=color_map)
labels = nx.get_edge_attributes(G, 'weight')
nx.draw_networkx_edge_labels(G, pos, node_size=node_sizes, edge_labels=labels)
Example #58
0
print(nx.info(g))

plt.figure(figsize=(20, 20))
pos = nx.spring_layout(g, k=0.15)
nx.draw_networkx(g, pos, node_size=25, node_color='Green')
plt.show()
g = nx.erdos_renyi_graph(10, 0.4)

# Average clustering

cc = nx.average_clustering(g)

# closeness centrality

closeness = nx.closeness_centrality(g)

# cluster coefficient

cluster_coeff = nx.clustering(g)

# Degree Centrality
## importing rotues.dat file
G = pd.read_table(
    "D:\\Data Science\\python_codes\\Network Analytics\\routes.dat", sep=",")
G = G.iloc[:, 1:10]

g = nx.Graph()

g = nx.from_pandas_edgelist(G,
                            source='Source Airport',
Example #59
0
def main():
	
	############################################################
	#			Reading the twitter data, converting it into a digraph
	di_graph = nx.read_edgelist("twitter_combined.txt", create_using=nx.DiGraph(), nodetype=int)

	# Getting the number of nodes in di_graph
	num_of_nodes = di_graph.number_of_nodes()
	############################################################



	############################################################
	# 			Defining Arrays later to be used for plotting
	copeland_scores = []
	degree_ratios = []
	c_centralities = []
	c_centralities_dict = {}
	b_centralities = []
	b_centralities_dict = {}
	############################################################


	############################################################
	#			Calculating Copeland Score and Degree Ratio	
	for g in di_graph:
		in_degree = di_graph.in_degree(g)
		out_degree = di_graph.out_degree(g)

		copeland = out_degree - in_degree
		degree = round((out_degree + 1)/(in_degree + 1), 3)
		
		copeland_scores.append(copeland)
		degree_ratios.append(degree)		
	############################################################


	############################################################
	#			Plotting Copeland Score and Degree Ration Historgrams

	plot_degree_dist_2(copeland_scores, 'Copeland Score Histogram')
	plot_degree_dist_2(degree_ratios, 'Degree Ratio Histogram')
	############################################################


	############################################################
	#			Plotting Betweenness Centrality Histogram

	# Calculating b centralities using a networkx function
	b_centrality_dict = nx.betweenness_centrality(di_graph, k=int(num_of_nodes/16))	

	# Writing Betweenness Centrality Values to a file
	f = open('b_centrality_values.txt', 'w')
	for key, val in b_centrality_dict.items():
		f.write(str(key) + ' ' + str(val))
		f.write('\n')

	# Reading B Centrality values from a file
	with open('b_centrality_values.txt', 'r') as b_file:
		line = b_file.readlines()
		for l in line:
			el = l.split()
			b_centralities.append(float(el[1]))

			b_centralities_dict[el[0]] = el[1]


	for i in range(0, len(b_centralities)):
		print(b_centralities[i])



	plot_degree_dist_2(b_centralities, 'Betweenness Centrality Histogram')
	############################################################
	


	############################################################
	# 			Plotting Closeness Centrality Histogram

	# Calculating c centralities using a networkx function
	c_centrality_dict = nx.closeness_centrality(di_graph)
	
	# Writing Closeness Centrality Values to a file
	f = open('c_centrality_values.txt', 'w')
	for key, val in c_centrality_dict.items():
		f.write(str(key) + ' ' + str(val))
		f.write('\n')
		


	# Reading Closeness Centrality Values from a file
	with open('c_centrality_values.txt', 'r') as c_file:
		line = c_file.readlines()
		for l in line:
			el = l.split()
			c_centralities.append(float(el[1]))

			c_centralities_dict[el[0]] = el[1]

	plot_degree_dist_2(c_centralities, 'Closeness Centrality Histogram')
	############################################################


	############################################################
	#			Mean, Median, and SD for Degree Ratio, Copeland Score, 
	#			and C Centrality for highest B Centrality 
	b_centralities_high = []

	copeland_scores_high_bc = []
	degree_ratios_high_bc = []
	c_centralities_high_bc = []



	for key, val in b_centralities_dict.items():
		if float(val) > 0.00002:
			b_centralities_high.append(key)


	for g in di_graph:
		try:
			if b_centralities_high.index(str(g)):
				in_degree = di_graph.in_degree(g)
				out_degree = di_graph.out_degree(g)

				copeland = out_degree - in_degree
				degree = round((out_degree + 1)/(in_degree + 1), 3)
				
				copeland_scores_high_bc.append(copeland)
				degree_ratios_high_bc.append(degree)
				c_centralities_high_bc.append(float(c_centralities_dict[str(g)]))

		except ValueError:
			print('', end='')


	# Mean, Median, and SD of Copeland Score
	mean_copeland = mean(copeland_scores_high_bc)
	median_copeland = median(copeland_scores_high_bc)
	std_dev_copeland = stdev(copeland_scores_high_bc)
	print(mean_copeland)
	print(median_copeland)
	print(std_dev_copeland)
	print(copeland_scores_high_bc)

	print()
	print()
	print()

	# Mean, Median, and SD of Degree Ratio
	mean_degree = mean(degree_ratios_high_bc)
	median_degree = median(degree_ratios_high_bc)
	std_dev_degree = stdev(degree_ratios_high_bc)

	print(mean_degree)
	print(median_degree)
	print(std_dev_degree)
	print(degree_ratios_high_bc)

	print()
	print()
	print()

	# Mean, Median, and SD of Closeness Centrality
	mean_closeness = mean(c_centralities_high_bc)
	median_closeness = median(c_centralities_high_bc)
	std_dev_closeness = stdev(c_centralities_high_bc)

	print(mean_closeness)
	print(median_closeness)
	print(std_dev_closeness)
	print(c_centralities_high_bc)
	############################################################



	############################################################
	# 			Triadic Census
	print(nx.triadic_census(di_graph))
def node_and_article_feature(df, g):
    """
    prepare node features (recursive term centrality based on article, author, affiliation, closeness centrality)
    :param df: dataset in dataframe
    :param g: graph
    :return: prepared node feature dataframe
    """
    article_set = []
    node_set = []
    y_weight = []
    closeness = []
    author_set = []
    affiliation_1_set = []
    affiliation_2_set = []
    country_set = []
    for nd, row in g.nodes(data=True):
        node_set.append(nd)
        y_weight.append(len(row['year']))
        closeness.append(nx.closeness_centrality(g, nd))
        for art_s in row['art_id']:
            article_set.append(art_s)
        for author_s in row['author']:
            author_set.append(author_s)
        for affiliation_1_s in row['affiliation_1']:
            affiliation_1_set.append(affiliation_1_s)
        for affiliation_2_s in row['affiliation_2']:
            affiliation_2_set.append(affiliation_2_s)
        for country_s in row['country']:
            country_set.append(country_s)
    article_set = set(article_set)
    author_set = set(author_set)
    affiliation_1_set = set(affiliation_1_set)
    affiliation_2_set = set(affiliation_2_set)
    country_set = set(country_set)
    article_index = list(article_set)
    author_index = list(author_set)
    affiliation_1_index = list(affiliation_1_set)
    affiliation_2_index = list(affiliation_2_set)
    country_index = list(country_set)
    node_index = list(node_set)
    td = np.zeros((len(g.nodes()), len(article_set)),dtype='uint8')
    ta = np.zeros((len(g.nodes()), len(author_set)),dtype='uint8')
    taf1 = np.zeros((len(g.nodes()), len(affiliation_1_set)))
    taf2 = np.zeros((len(g.nodes()), len(affiliation_2_set)))
    tc = np.zeros((len(g.nodes()), len(country_set)))
    for nd, art in g.nodes(data='art_id'):
        for art_s in art:
            td[node_index.index(nd)][article_index.index(art_s)] = 1
    for nd, aut in g.nodes(data='author'):
        for aut_s in aut:
            ta[node_index.index(nd)][author_index.index(aut_s)] = 1
    for nd, af1 in g.nodes(data='affiliation_1'):
        for af1_s in af1:
            taf1[node_index.index(nd)][affiliation_1_index.index(af1_s)] = 1
    for nd, af2 in g.nodes(data='affiliation_2'):
        for af2_s in af2:
            taf2[node_index.index(nd)][affiliation_2_index.index(af2_s)] = 1
    for nd, co in g.nodes(data='country'):
        for co_s in co:
            tc[node_index.index(nd)][country_index.index(co_s)] = 1

    # document & author centrality feature
    car, cnar = document_centrality(td, 20)
    cat, cnat = document_centrality(ta, 20)
    caf1, cnaf1 = document_centrality(taf1, 20)
    caf2, cnaf2 = document_centrality(taf2, 20)
    cou, cnou = document_centrality(tc, 20)
    node_feature = pd.DataFrame({'node_index': node_index, 'y_weight': y_weight,
                                 'term_art': cnar,
                                 'term_aut': cnat,
                                 'term_af1': cnaf1,
                                 'term_af2': cnaf2,
                                 'term_coun': cnou,
                                 'closeness': closeness})
    return node_feature