def plot_centralities(self, path): def sort_dict(A): return [sorted(A[i].iteritems(), key=operator.itemgetter(1), reverse=True) for i in range(len(A))] whole = nx.compose_all(self.__chapters_graphs) number_of_chapters = len(self.__chapters_graphs) vitalities = [nx.closeness_vitality(self.__chapters_graphs[i]) for i in range(len(self.__chapters_graphs))] degree_centralities = [nx.degree_centrality(self.__chapters_graphs[i]) for i in range(len(self.__chapters_graphs))] closeness_centralities = [nx.closeness_centrality(self.__chapters_graphs[i]) for i in range(len(self.__chapters_graphs))] # eigenvector_centralities = [nx.eigenvector_centrality_numpy(self.__chapters_graphs[i]) for i in range(len(self.__chapters_graphs))] whole_vitalities = nx.closeness_vitality(whole) whole_degree_centralities = nx.degree_centrality(whole) whole_closeness_centralities = nx.closeness_centrality(whole) # whole_eigenvector_centralities = nx.eigenvector_centrality_numpy(whole) # vitalities = sort_dict(vitalities) # degree_centralities = sort_dict(degree_centralities) # closeness_centralities = sort_dict(closeness_centralities) # eigenvector_centralities = sort_dict(eigenvector_centralities) whole_vitalities = sorted(whole_vitalities.iteritems(), key=operator.itemgetter(1), reverse=True) whole_degree_centralities = sorted(whole_degree_centralities.iteritems(), key=operator.itemgetter(1), reverse=True) whole_closeness_centralities = sorted(whole_closeness_centralities.iteritems(), key=operator.itemgetter(1), reverse=True) # whole_whole_eigenvector_centralities = sorted(whole_eigenvector_centralities.iteritems(), key=operator.itemgetter(1), reverse=True) bests = [x[0] for x in whole_degree_centralities[:5]] vitality_values = [[vitality[best] if best in vitality else 0 for vitality in vitalities] for best in bests] degree_centralities_values = [[centrality[best] if best in centrality else 0 for centrality in degree_centralities] for best in bests] closeness_centralities_values = [[centrality[best] if best in centrality else 0 for centrality in closeness_centralities] for best in bests] # eigenvector_centralities_values = [[centrality[best] if best in centrality else 0 for centrality in eigenvector_centralities] for best in bests] ylabels = ["vitality", "degree centrality", "closeness centrality"] whole_values = [vitality_values, degree_centralities_values, closeness_centralities_values] for values, ylabel in zip(whole_values, ylabels): legend_handlers = [] for i in range(len(bests)): tmp, = plt.plot(range(number_of_chapters), values[i], label=bests[i]) legend_handlers.append(tmp) # plt.xlim([plt.xlim()[0]*1.2, plt.xlim()[1]*1.2]) plt.ylim([plt.ylim()[0]*1.2, plt.ylim()[1]*1.2]) plt.xlabel("chapter") plt.ylabel(ylabel) plt.legend(handles=legend_handlers) plt.savefig(os.path.join(self.__result_dir, path, 'change of {} in chapters'.format(ylabel))) plt.clf()
def f40(self): start = 0 v = nx.closeness_vitality(self.G).values() res = sum(v) / len(v) stop = 0 # self.feature_time.append(stop - start) return res
def analyze_graph(G): #centralities and node metrics out_degrees = G.out_degree() in_degrees = G.in_degree() betweenness = nx.betweenness_centrality(G) eigenvector = nx.eigenvector_centrality_numpy(G) closeness = nx.closeness_centrality(G) pagerank = nx.pagerank(G) avg_neighbour_degree = nx.average_neighbor_degree(G) redundancy = bipartite.node_redundancy(G) load = nx.load_centrality(G) hits = nx.hits(G) vitality = nx.closeness_vitality(G) for name in G.nodes(): G.node[name]['out_degree'] = out_degrees[name] G.node[name]['in_degree'] = in_degrees[name] G.node[name]['betweenness'] = betweenness[name] G.node[name]['eigenvector'] = eigenvector[name] G.node[name]['closeness'] = closeness[name] G.node[name]['pagerank'] = pagerank[name] G.node[name]['avg-neigh-degree'] = avg_neighbour_degree[name] G.node[name]['redundancy'] = redundancy[name] G.node[name]['load'] = load[name] G.node[name]['hits'] = hits[name] G.node[name]['vitality'] = vitality[name] #communities partitions = community.best_partition(G) for member, c in partitions.items(): G.node[member]['community'] = c return G
def vitality(self): rslt = {} rslt['closeness_vitality'] = nx.closeness_vitality(self.graph) fname_vitality = self.DIR + '/vitality.json' with open(fname_vitality, "w") as f: json.dump(rslt, f, cls=SetEncoder, indent=2) print(fname_vitality)
def test_disconnecting_graph(self): """Tests that the closeness vitality of a node whose removal disconnects the graph is negative infinity. """ G = nx.path_graph(3) assert_equal(nx.closeness_vitality(G, node=1), -float('inf'))
def test_disconnecting_graph(self): """Tests that the closeness vitality of a node whose removal disconnects the graph is negative infinity. """ G = nx.path_graph(3) assert nx.closeness_vitality(G, node=1) == -float('inf')
def add_graph_infra(self, graph, infra_rate, infra_mode): node_ids_net = self.select_node_ids(graph, label='network') number_nodes = int(math.ceil(len(node_ids_net) * infra_rate)) if infra_mode == 'vitality': metrics = nx.closeness_vitality(graph) sorted_metrics = sorted(metrics, key=metrics.get, reverse=True) ids_infra = [ x for x in sorted_metrics if graph.node[x]['label'] == 'network' ] ids_infra = ids_infra[:number_nodes] # ids_infra = sorted_metrics[:number_nodes] elif infra_mode == 'centrality': metrics = nx.closeness_centrality(graph) sorted_metrics = sorted(metrics, key=metrics.get, reverse=True) # ids_infra = sorted_metrics[:number_nodes] ids_infra = [ x for x in sorted_metrics if graph.node[x]['label'] == 'network' ] ids_infra = ids_infra[:number_nodes] else: ids_infra = choice(node_ids_net, size=number_nodes, replace=False) for node_id in ids_infra: infra_id = self.add_infra_node(graph) self.add_edge_net(graph, node_id, infra_id)
def vitality(G, x): """ Args: G: NetworkX graph, current graph x: int, vertex to compute vitality on Returns: int, closeness vitality for x """ return nx.closeness_vitality(G, node=x, weight="weight", wiener_index=None)
def compute_features(self): # distribution of vitality self.add_feature( "vitality", lambda graph: list(nx.closeness_vitality(graph).values()), "The closeness vitality of a node is the change in the sum of distances between \ all node pairs when excluding that node", InterpretabilityScore(3), statistics="centrality", )
def measures(self, measure): mes = None if measure == 'neigh_degree': mes = nx.average_neighbor_degree(self.graph) elif measure == 'vitality': mes = nx.closeness_vitality(self.graph) elif measure == 'centrality': mes = nx.closeness_centrality(self.graph) elif measure == 'betweeness': mes = nx.betweenness_centrality(self.graph) elif measure == 'degree': mes = nx.degree(self.graph) return mes
def vitality(G): newgraph(G) selected_node = int( input("Enter the vitality of the node you wish to see: ")) print(nx.closeness_vitality(G, selected_node)) # shows graph with the node removed G.remove_node(selected_node) pos = nx.spring_layout(G) nx.draw(G, pos, with_labels=True, node_color='b', edge_color='k', node_size=200, alpha=0.5) pylab.title('Self_Define Net', fontsize=15) pylab.show()
def get_graph_metric(G, metric, extra_weighting_values="None"): """ Function to get many of the graph theory metrics we are interested in using Networkx. Current metrics that can be obtained include: eigenvector centrality, weighted clustering, katz centrality, current flow closeness centrality, pagerank, closeness vitality :param G: graph for a team :param metric: what metric to analyze :param extra_weighting_values: if you want to additionally weight the output by some additional metric enter that metric here and will weight each by player by that players weighted value/ sum all weighted values :return: all_ec, output all_ec: a list for all players without labels for each player output: dict with each player as a key """ if metric == "eigenvector_centrality": output = nx.eigenvector_centrality(G, weight='weight', max_iter=500) elif metric == "weighted_clustering": output = nx.algorithms.cluster.clustering(G, weight='weight') elif metric == "katz_centrality": output = nx.katz_centrality(G, max_iter=1000) elif metric == "current_flow_closeness_centrality": output = nx.current_flow_closeness_centrality(G, weight='weight') elif metric == "pagerank": output = nx.pagerank_numpy(G, weight='weight') elif metric == "closeness_vitality": output = nx.closeness_vitality(G, weight='weight') else: print('Need to give valid metric') if extra_weighting_values != "None": from_node = nx.get_node_attributes(G,extra_weighting_values) weighted_val = [from_node[i] for i in list(from_node)] weighted_val = weighted_val/np.sum(weighted_val) all_ec = [] for j, i in enumerate(output.keys()): if extra_weighting_values != "None": all_ec.append(output[i] * weighted_val[j]) else: all_ec.append(output[i]) return all_ec, output
def analysis(self): avnd = nx.average_neighbor_degree(self.graph) vit = nx.closeness_vitality(self.graph) cent = nx.closeness_centrality(self.graph) bet = nx.betweenness_centrality(self.graph) clus = nx.clustering(self.graph) # ecc = nx.eccentricity(self.graph) for node, data in self.graph.nodes_iter(data=True): label = data['label'] if label == 'infra' or label == 'network': cap = { # 'eccentricity': ecc[node], 'betweenness': bet[node], 'centrality': cent[node], 'vitality': vit[node], 'avg_neigh_degree': avnd[node], 'clustering': clus[node] } if 'capabilities' in data: data['capabilities']['analysis'] = cap self.cohesion_index()
def node_attributes(self): result = {} result['degree_centrality'] = nx.degree_centrality(self.graph) result['in_degree_centrality'] = nx.in_degree_centrality(self.graph) result['out_degree_centrality'] = nx.out_degree_centrality(self.graph) result['closeness_centrality'] = nx.closeness_centrality(self.graph) result['betweenness_centrality'] = nx.betweenness_centrality( self.graph) result['load_centrality'] = nx.load_centrality(self.graph) result['average_neighbor_degree'] = nx.average_neighbor_degree( self.graph) result['square_clustering'] = nx.square_clustering(self.graph) result['closeness_vitality'] = nx.closeness_vitality(self.graph) # nodes attributes node_attributes = [] for node in self.graph.nodes(): node_attributes.append((node, result['degree_centrality'][node], result['in_degree_centrality'][node], result['out_degree_centrality'][node], result['closeness_centrality'][node], result['betweenness_centrality'][node], result['load_centrality'][node], result['average_neighbor_degree'][node], result['square_clustering'][node], result['closeness_vitality'][node])) node_attributes.insert(0, [ 'node', 'degree_centrality', 'in_degree_centrality', 'out_degree_centrality', 'closeness_centrality', 'betweenness_centrality', 'load_centrality', 'average_neighbor_degree', 'square_clustering', 'closeness_vitality' ]) return node_attributes
def __init__(self, graphml, minThreshold, forwardDecay, reverseDecay, modifiedNodes, addedNodes, removedNodes, startingNodes, modifiedEdges, spreadModels, probabilityModels): self.__G = nx.read_graphml(graphml) self.__minThreshold = minThreshold self.__forwardDecay = forwardDecay self.__reverseDecay = reverseDecay self.__modifiedNodes = modifiedNodes self.__addedNodes = addedNodes self.__removedNodes = removedNodes self.__startingNodes = startingNodes self.__modifiedEdges = modifiedEdges self.__spreadModels = spreadModels self.__probabilityModels = probabilityModels G = self.__G self.__closeness = nx.closeness_vitality(G) #紧密度活性:去掉该节点后全图的变化情况 value = self.__closeness.values() tmp_close = 0 for k in value: tmp_close = tmp_close + float(k) self.__avg_close = tmp_close / float(len(G.nodes())) degree_all = 0 self.__betw = nx.betweenness_centrality(G, k=None, normalized=True, weight=None) total = 0 for i in G.nodes(): total = total + self.__betw[i] self.__average_betweenness = float(total) / len(G.nodes()) self.__de = G.degree() for n in G.nodes(): degree_all = degree_all + self.__de[n] self.__avg_degree = float(degree_all) / len(G.nodes()) for k in value: tmp_close = tmp_close + float(k) self.__avg_close = tmp_close / float(len(G.nodes()))
def test_closeness_vitality_weighted_multidigraph(self): G = nx.MultiDiGraph() G.add_cycle([0, 1, 2], weight=2) v = nx.closeness_vitality(G, weight='weight') assert_equal(v, {0: 16.0, 1: 16.0, 2: 16.0})
def test_closeness_vitality_weighted(self): G = nx.Graph() G.add_cycle([0, 1, 2], weight=2) v = nx.closeness_vitality(G, weight='weight') assert_equal(v, {0: 8.0, 1: 8.0, 2: 8.0})
def test_unweighted(self): G = nx.cycle_graph(3) vitality = nx.closeness_vitality(G) assert vitality == {0: 2, 1: 2, 2: 2}
def test_weighted_multidigraph(self): G = nx.MultiDiGraph() nx.add_cycle(G, [0, 1, 2], weight=2) nx.add_cycle(G, [2, 1, 0], weight=2) vitality = nx.closeness_vitality(G, weight='weight') assert vitality == {0: 8, 1: 8, 2: 8}
def test_unweighted(self): G = nx.cycle_graph(3) vitality = nx.closeness_vitality(G) assert_equal(vitality, {0: 2, 1: 2, 2: 2})
def test_weighted_multidigraph(self): G = nx.MultiDiGraph() G.add_cycle([0, 1, 2], weight=2) G.add_cycle([2, 1, 0], weight=2) vitality = nx.closeness_vitality(G,weight='weight') assert_equal(vitality, {0: 8, 1: 8, 2: 8})
def test_weighted(self): G = nx.Graph() G.add_cycle([0, 1, 2], weight=2) vitality = nx.closeness_vitality(G, weight='weight') assert_equal(vitality, {0: 4, 1: 4, 2: 4})
def vitality(self): """compute the n nodes with highest vitality""" vit_hash = nx.closeness_vitality(self.graph) vit_nodes = self._annotate_graph(vit_hash, 'vitality') self.build_output_data(vit_nodes, 'vitality')
def test_closeness_vitality_weighted(self): G=nx.Graph() G.add_cycle([0,1,2],weight=2) v=nx.closeness_vitality(G,weight='weight') assert_equal(v,{0:8.0, 1:8.0, 2:8.0})
if (i != 8 and i !=53): # exceptions g2 = nx.Graph() g2.add_edges_from(edges) g2.remove_node(i) vul = vul + [(i, nx.average_shortest_path_length(g2) - aspl)] trivul = sorted(vul, reverse = True, key = operator.itemgetter(1)) for i in range(len(trivul)): print(" " + str(i + 1) + " " + cities[ int(trivul[i][0]) ]) ### TIM - WHEN U COME BACK TO THIS, START AT PART 4 (LINE 62) # PART 4 HERE -- "04_closeness_vitality.py" import numpy vul2 = nx.closeness_vitality(g) mvul2 = list(vul2.items()) avul2 = numpy.array(mvul2) x = avul2[:, 1] mbet = list(bet.items()) abet = numpy.array(mbet) y = abet[:, 1] from scipy import stats slope, intercept, r_value, p_value, std_err = stats.linregress(x,y) r2 = r_value * r_value print r2 ### NOW YOU CAN FIDDLE AROUND ONCE YOU GET ALL THE ABOVE PARTS :p ### NOW YOU CAN FIDDLE AROUND ONCE YOU GET ALL THE ABOVE PARTS :p ### NOW YOU CAN FIDDLE AROUND ONCE YOU GET ALL THE ABOVE PARTS :p
def test_weighted_digraph(self): G = nx.DiGraph() nx.add_cycle(G, [0, 1, 2], weight=2) nx.add_cycle(G, [2, 1, 0], weight=2) vitality = nx.closeness_vitality(G, weight='weight') assert_equal(vitality, {0: 8, 1: 8, 2: 8})
def upload_file(request): f = request.FILES['ds'] # little bit of hacking... format_type = 'json' try: ds = json.load(f) except: rows = csv.reader(f) ds = list() names = list() for row in rows: if len(names) == 0: for v in row: names.append(v) else: idx = 0 cur = dict() for v in row: cur[names[idx]] = v idx += 1 ds.append(cur) format_type = 'csv' # Create network G = nx.Graph() # create date-centered / random id. not guaranteed to be unique. TODO change for scale. now = datetime.datetime.now() ds_id = "%d%d%d%d%d%d.%d" % (now.year, now.month, now.day, now.hour, now.minute, now.second, random.randint(0, 100000)) # known formats. # based on collab2008.json if (type(ds) == type(dict()) and 'links' in ds and 'nodes' in ds and len(ds['links']) > 0 and len(ds['nodes']) > 0): idx = 0 for node in ds['nodes']: G.add_node(idx, country_code=node['id']) idx += 1 for link in ds['links']: G.add_edge(link['source'], link['target'], weight=link['weight']) # based on elena's airbnb data, formatted as csv, with columns: # ego_name, ego_lat, ego_lng, alter_name, alter_lat, alter_lng, weight elif (type(ds) == type(list()) and type(ds[0]) == type(dict()) and 'ego_name' in ds[0] and 'alter_name' in ds[0]): node_names = set() name_to_ll = dict() for d in ds: node_names.add(d['ego_name']) node_names.add(d['alter_name']) name_to_ll[d['ego_name']] = { 'lat': d['ego_lat'], 'lng': d['ego_lng']} name_to_ll[d['alter_name']] = { 'lat': d['alter_lat'], 'lng': d['alter_lng']} nodemap = dict() idx = 0 for node_name in node_names: if node_name not in nodemap: G.add_node(idx, name=node_name, lat=name_to_ll[node_name]['lat'], lng=name_to_ll[node_name]['lng']) nodemap[node_name] = idx idx += 1 for d in ds: G.add_edge(nodemap[d['ego_name']], nodemap[d['alter_name']], weight=int(d['weight'])) else: return "ERROR_UNKNOWN_FORMAT" # Make sure that *every node* has a lat/lng no_geo = [] # maintain list of nodes removed ccode_to_ll = pickle.load(open('DATASETS/code_to_latlng.pkl', 'r')) for idx in G.node: # todo use HttpResponseBadRequest if no lat/lng exists if 'lat' not in G.node[idx] or 'lng' not in G.node[idx]: if 'country_code' in G.node[idx] and G.node[idx]['country_code'] in ccode_to_ll: c = G.node[idx]['country_code'] G.node[idx]['lat'] = ccode_to_ll[c]['lat'] G.node[idx]['lng'] = ccode_to_ll[c]['lng'] else: no_geo.append(idx) # remove nodes with missing geo info for idx in no_geo: G.remove_node(idx) # Add *EXTRA* data. Not always guaranteed to be returned. ctor = pickle.load(open('DATASETS/country_to_continent.pkl', 'r')) code_to_country = pickle.load(open('DATASETS/code_to_country.pkl', 'r')) pp = pprint.PrettyPrinter(stream=sys.stderr) #pp.pprint(G.nodes(data=True)) #pp.pprint(G.edges(data=True)) closeness_vitality = nx.closeness_vitality(G) pagerank = nx.pagerank(G) degree_centrality = nx.degree_centrality(G) average_neighbor_degree = nx.average_neighbor_degree(G) for idx in G.node: if 'country_code' in G.node[idx] and G.node[idx]['country_code'] in ctor: G.node[idx]['region'] = ctor[G.node[idx]['country_code']] else: G.node[idx]['region'] = 'Unknown' if 'country_code' in G.node[idx] and G.node[idx]['country_code'] in code_to_country: G.node[idx]['country_name'] = code_to_country[G.node[idx]['country_code']] else: G.node[idx]['country_name'] = 'Unknown' G.node[idx]['closeness_vitality'] = closeness_vitality[idx] G.node[idx]['pagerank'] = pagerank[idx] G.node[idx]['degree'] = G.degree(idx) G.node[idx]['degree_centrality'] = degree_centrality[idx] G.node[idx]['average_neighbor_degree'] = average_neighbor_degree[idx] G.node[idx]['weight'] = G.degree(idx, 'weight') name = "Location: %.2f,%.2f" % (float(G.node[idx]['lat']), float(G.node[idx]['lng'])) if 'name' in G.node[idx]: name += " (%s)" % G.node[idx]['name'] elif 'country_name' in G.node[idx] and G.node[idx]['country_name'] != "Unknown": name += " (%s)" % G.node[idx]['country_name'] G.node[idx]['name'] = name f = open("DATASETS/graph%s.pickle" % ds_id, 'w') pickle.dump(G, f) print >>sys.stderr, "UPLOAD COMPLETE. %d NODES IGNORED DUE TO MISSING GEO DATA." % len(no_geo) return ds_id
def test_closeness_vitality_weighted_digraph(self): G=nx.DiGraph() G.add_cycle([0,1,2],weight=2) v=nx.closeness_vitality(G,weighted_edges=True) assert_equal(v,{0:16.0, 1:16.0, 2:16.0})
print k print good_thing print good_counts nx.clustering(G) nx.clustering(opt_G) nx.shortest_path(opt_G) nx.all_pairs_shortest_path(opt_G, cutoff = 3) nx.all_pairs_shortest_path(opt_G, cutoff = 4) nx.dijkstra_path(G_new, '26_BLUE BOTTLE COFFEE','38_CHEESE BOARD PIZZA') nx.dijkstra_path(G_new, '38_CHEESE BOARD PIZZA','158_TILDEN REGIONAL PARK') nx.dijkstra_path(G_new,'142_STABLE CAFE','158_TILDEN REGIONAL PARK') nx.dijkstra_path(G_new,'38_CHEESE BOARD PIZZA','142_STABLE CAFE') nx.closeness_vitality(opt_G, weight='haversine') nx.dijkstra_path(opt_G, '26_BLUE BOTTLE COFFEE','38_CHEESE BOARD PIZZA') nx.dijkstra_path(opt_G, '38_CHEESE BOARD PIZZA','132_REDWOOD REGIONAL PARK') nx.dijkstra_path(opt_G,'142_STABLE CAFE','132_REDWOOD REGIONAL PARK') nx.dijkstra_path(opt_G,'38_CHEESE BOARD PIZZA','142_STABLE CAFE') def girvan_newman_step(G): ''' INPUT: Graph G OUTPUT: None Run one step of the Girvan-Newman community detection algorithm. Afterwards, the graph will have one more connected component. ''' init_ncomp = nx.number_connected_components(G)
def closeness_vitality_sum(self): if (self.closeness_vitality_dict == None): self.closeness_vitality_dict = nx.closeness_vitality(self.graph) return self.closeness_vitality_dict[self.node_1] + self.closeness_vitality_dict[self.node_2]
def test_unweighted_digraph(self): G = nx.DiGraph(nx.cycle_graph(3)) vitality = nx.closeness_vitality(G) assert_equal(vitality, {0: 4, 1: 4, 2: 4})
def features_part2(info): """ third set of features. """ G = info['G'] n = info['num_nodes'] num_units = info['num_units'] edges = info['edges'] nedges = len(edges) H = G.to_undirected() res = dict() cc = nx.closeness_centrality(G) res['closeness_centrality'] = cc[n - 1] res['closeness_centrality_mean'] = np.mean(list(cc.values())) bc = nx.betweenness_centrality(G) res['betweenness_centrality_mean'] = np.mean(list(bc.values())) cfcc = nx.current_flow_closeness_centrality(H) res['current_flow_closeness_centrality_mean'] = np.mean(list( cfcc.values())) cfbc = nx.current_flow_betweenness_centrality(H) res['current_flow_betweenness_centrality_mean'] = np.mean( list(cfbc.values())) soc = nx.second_order_centrality(H) res['second_order_centrality_mean'] = np.mean(list(soc.values())) / n cbc = nx.communicability_betweenness_centrality(H) res['communicability_betweenness_centrality_mean'] = np.mean( list(cbc.values())) comm = nx.communicability(H) res['communicability'] = np.log(comm[0][n - 1]) res['communicability_start_mean'] = np.log(np.mean(list(comm[0].values()))) res['communicability_end_mean'] = np.log( np.mean(list(comm[n - 1].values()))) res['radius'] = nx.radius(H) res['diameter'] = nx.diameter(H) res['local_efficiency'] = nx.local_efficiency(H) res['global_efficiency'] = nx.global_efficiency(H) res['efficiency'] = nx.efficiency(H, 0, n - 1) pgr = nx.pagerank_numpy(G) res['page_rank'] = pgr[n - 1] res['page_rank_mean'] = np.mean(list(pgr.values())) cnstr = nx.constraint(G) res['constraint_mean'] = np.mean(list(cnstr.values())[:-1]) effsize = nx.effective_size(G) res['effective_size_mean'] = np.mean(list(effsize.values())[:-1]) cv = np.array(list(nx.closeness_vitality(H).values())) cv[cv < 0] = 0 res['closeness_vitality_mean'] = np.mean(cv) / n res['wiener_index'] = nx.wiener_index(H) / (n * (n - 1) / 2) A = nx.to_numpy_array(G) expA = expm(A) res['expA'] = np.log(expA[0, n - 1]) res['expA_mean'] = np.log(np.mean(expA[np.triu_indices(n)])) return res
def closeness_vitality_sum(self): if (self.closeness_vitality_dict == None): self.closeness_vitality_dict = nx.closeness_vitality(self.graph) return self.closeness_vitality_dict[ self.node_1] + self.closeness_vitality_dict[self.node_2]
def test_unweighted_digraph(self): G = nx.DiGraph(nx.cycle_graph(3)) vitality = nx.closeness_vitality(G) assert vitality == {0: 4, 1: 4, 2: 4}
def vit(net): return ((nx.closeness_vitality(net), 'closeness'), )
def vitality(graph): """""" return list(nx.closeness_vitality(graph).values())
def test_closeness_vitality_unweighted_digraph(self): G = nx.DiGraph() G.add_cycle([0, 1, 2]) v = nx.closeness_vitality(G) assert_equal(v, {0: 8.0, 1: 8.0, 2: 8.0})
def test_weighted(self): G = nx.Graph() nx.add_cycle(G, [0, 1, 2], weight=2) vitality = nx.closeness_vitality(G, weight='weight') assert vitality == {0: 4, 1: 4, 2: 4}
def test_closeness_vitality_weighted_digraph(self): G = nx.DiGraph() G.add_cycle([0, 1, 2], weight=2) v = nx.closeness_vitality(G, weight=True) assert_equal(v, {0: 16.0, 1: 16.0, 2: 16.0})
def test_closeness_vitality_unweighted(self): G = nx.cycle_graph(3) v = nx.closeness_vitality(G) assert_equal(v, {0: 4.0, 1: 4.0, 2: 4.0})
def test_closeness_vitality_unweighted(self): G = nx.cycle_graph(3) v = nx.closeness_vitality(G) assert_equal(v, {0: 4.0, 1: 4.0, 2: 4.0}) assert_equal(v[0], 4.0)
def vitality(self): """compute vitality""" vit_hash = nx.closeness_vitality(self.graph) vit_nodes = self._annotate_graph(vit_hash, 'vitality')
def closeness_vitality(g): v = list(nx.closeness_vitality(g).values()) return np.mean(v[v != -np.inf])
def vit(net): return((nx.closeness_vitality(net),'closeness'),)