def _communicability_to_anchors(graph, anchors, normalize=False): comm = {} for node, comm_dict in nx.communicability_exp(graph).items(): d = np.array([comm_dict[a] for a in anchors]) if normalize: d = normalized(d) comm[node] = d return comm
def visualize_communicability(): if len(sys.argv) < 2: print(f'Usage {sys.argv[1]} <network>') return net = fio.read_network(sys.argv[1]) name = fio.get_network_name(sys.argv[1]) communicability = nx.communicability_exp(net.G) scores = np.array( [sum(communicability[u].values()) for u in communicability.keys()]) plt.title(f'{name}\nCommunicability') plt.hist(scores) # type: ignore plt.show(block=False) print(f'Network score: {np.sum(scores)}') scores = (scores - np.min(scores)) / (np.max(scores) - np.min(scores)) node_size = 300 * scores visualize_network(net.G, net.layout, name, node_size=node_size)
def getHugeStats(g): if nx.is_directed(g) == True: P1 = pd.DataFrame({'load_centrality': nx.load_centrality(g, weight='weight'), 'betweenness_centrality': nx.betweenness_centrality(g, weight='weight'), 'pagerank': pd.Series(nx.pagerank(g, alpha=0.85, personalization=None, max_iter=100, tol=1e-08, nstart=None, weight='weight')), 'eigenvector_centrality': nx.eigenvector_centrality_numpy(g), 'degree_centrality': pd.Series(nx.degree_centrality(g)), 'in_degree_centrality': pd.Series(nx.in_degree_centrality(g)), 'out_degree_centrality': pd.Series(nx.out_degree_centrality(g))}) else: P1 = pd.Panel({'spl': pd.DataFrame(nx.shortest_path_length(g)), 'apdp': pd.DataFrame(nx.all_pairs_dijkstra_path(g)), 'apdl': pd.DataFrame(nx.all_pairs_dijkstra_path_length(g)), 'c_exp': pd.DataFrame(nx.communicability_exp(g))}) return P1
def save_communicabilities(): network_classes = ( 'BarabasiAlbert(N=500,m=2)', 'BarabasiAlbert(N=500,m=3)', 'BarabasiAlbert(N=500,m=4)', 'ConnComm(N_comm=10,ib=(5, 10),num_comms=50,ob=(3, 6))', 'ConnComm(N_comm=20,ib=(15, 20),num_comms=25,ob=(3, 6))', 'ErdosRenyi(N=500,p=0.01)', 'ErdosRenyi(N=500,p=0.02)', 'ErdosRenyi(N=500,p=0.03)', 'WattsStrogatz(N=500,k=4,p=0.01)', 'WattsStrogatz(N=500,k=4,p=0.02)', 'WattsStrogatz(N=500,k=5,p=0.01)') rows = [] for class_name in tqdm(network_classes): nets = fio.read_network_class(class_name) communicabilties = (sum( c for inner_values in nx.communicability_exp(net.G).values() for c in inner_values.values()) for net in tqdm(nets)) rows.append([class_name]) rows.append(list(map(str, communicabilties))) with open('communicabilities.csv', 'w', newline='') as csv_file: writer = csv.writer(csv_file) writer.writerows(rows)
def infection_entropy_vs_communicability(): classes = ('BarabasiAlbert(N=500,m=2)', 'BarabasiAlbert(N=500,m=3)', 'BarabasiAlbert(N=500,m=4)', 'ConnComm(N_comm=10,ib=(5, 10),num_comms=50,ob=(3, 6))', 'ConnComm(N_comm=20,ib=(15, 20),num_comms=25,ob=(3, 6))', 'ErdosRenyi(N=500,p=0.01)', 'ErdosRenyi(N=500,p=0.02)', 'ErdosRenyi(N=500,p=0.03)', 'WattsStrogatz(N=500,k=4,p=0.01)', 'WattsStrogatz(N=500,k=4,p=0.02)', 'WattsStrogatz(N=500,k=5,p=0.01)') n_bins = 100 # 1000 should be 1 decimal point of precision for percentages csv_rows: List[Union[List[str], List[int], List[float]]] = [] for class_ in classes: rng = np.random.default_rng(777) nets = fio.read_network_class(class_) communicabilities: List[int] = [ sum(cell for row in nx.communicability_exp(net.G).values() for cell in row.values()) for net in tqdm(nets, desc='Communicability') ] entropies = [ calc_entropy( run_sim_batch(net, 500, sd.Disease(4, .3), sd.SimplePressureBehavior(net, rng, 2, .25), rng), n_bins) for net in tqdm(nets, 'Simulations & Entropy') ] csv_rows.append(['Network Class', class_]) csv_rows.append(['Communicability']) csv_rows.append(communicabilities) csv_rows.append(['Entropy']) csv_rows.append(entropies) with open( f'results/communicability-vs-infection-entropy-bins-{n_bins}.csv', 'w', newline='') as csv_file: writer = csv.writer(csv_file) writer.writerows(csv_rows)
def dist(self, G1, G2): r"""Compares the communicability matrix of two graphs. This distance is based on the communicability matrix, :math:`C`, of a graph consisting of elements :math:`c_{ij}` which are values corresponding to the numbers of shortest paths of length :math:`k` between nodes :math:`i` and :math:`j`. The commmunicability matrix is symmetric, which means the communicability sequence is formed by flattening the upper triangular of :math:`C`, which is then normalized to create the communicability sequence, :math:`P`. The communicability sequence entropy distance between two graphs, `G1` and `G2`, is the Jensen-Shannon divergence between these communicability sequence distributions, :math:`P1` and :math:`P2` of the two graphs. Parameters ---------- G1, G2 (nx.Graph) two graphs Returns ------- dist (float) between zero and one, this is the communicability sequence distance bewtween `G1` and `G2`. Notes ----- This function uses the networkx approximation of the communicability of a graph, `nx.communicability_exp`, which requires `G1` and `G2` to be simple undirected networks. In addition to the final distance scalar, `self.results` stores the two vectors :math:`P1` and :math:`P2`, their mixed vector, :math:`P0`, and their associated entropies. References ---------- .. [1] Estrada, E., & Hatano, N. (2008). Communicability in complex networks. Physical Review E, 77(3), 036111. https://journals.aps.org/pre/abstract/10.1103/PhysRevE.77.036111 .. [2] Chen, D., Shi, D. D., Qin, M., Xu, S. M., & Pan, G. J. (2018). Complex network comparison based on communicability sequence entropy. Physical Review E, 98(1), 012319. """ N1 = G1.number_of_nodes() N2 = G2.number_of_nodes() C1 = nx.communicability_exp(G1) C2 = nx.communicability_exp(G2) Ca1 = np.zeros((N1, N1)) Ca2 = np.zeros((N2, N2)) for i in range(Ca1.shape[0]): Ca1[i] = np.array(list(C1[i].values())) for i in range(Ca2.shape[0]): Ca2[i] = np.array(list(C2[i].values())) lil_sigma1 = np.triu(Ca1).flatten() lil_sigma2 = np.triu(Ca2).flatten() big_sigma1 = sum(lil_sigma1[np.nonzero(lil_sigma1)[0]]) big_sigma2 = sum(lil_sigma2[np.nonzero(lil_sigma2)[0]]) P1 = lil_sigma1 / big_sigma1 P2 = lil_sigma2 / big_sigma2 P1 = np.array(sorted(P1)) P2 = np.array(sorted(P2)) dist = entropy.js_divergence(P1, P2) self.results['P1'] = P1 self.results['P2'] = P2 self.results['dist'] = dist return dist
def centrality(self): result = {} result['degree_centrality'] = nx.degree_centrality(self.graph) if self.directed == 'directed': result['in_degree_centrality'] = nx.in_degree_centrality( self.graph) result['out_degree_centrality'] = nx.out_degree_centrality( self.graph) result['closeness_centrality'] = nx.closeness_centrality(self.graph) result['betweenness_centrality'] = nx.betweenness_centrality( self.graph) # fix the tuple cant decode into json problem stringify_temp = {} temp = nx.edge_betweenness_centrality(self.graph) for key in temp.keys(): stringify_temp[str(key)] = temp[key] result['edge_betweenness_centrality'] = stringify_temp if self.directed == 'undirected': result[ 'current_flow_closeness_centrality'] = nx.current_flow_closeness_centrality( self.graph) result[ 'current_flow_betweenness_centrality'] = nx.current_flow_betweenness_centrality( self.graph) stringify_temp = {} temp = nx.edge_current_flow_betweenness_centrality(self.graph) for key in temp.keys(): stringify_temp[str(key)] = temp[key] result['edge_current_flow_betweenness_centrality'] = stringify_temp result[ 'approximate_current_flow_betweenness_centrality'] = nx.approximate_current_flow_betweenness_centrality( self.graph) result['eigenvector_centrality'] = nx.eigenvector_centrality( self.graph) result[ 'eigenvector_centrality_numpy'] = nx.eigenvector_centrality_numpy( self.graph) result['katz_centrality'] = nx.katz_centrality(self.graph) result['katz_centrality_numpy'] = nx.katz_centrality_numpy( self.graph) result['communicability'] = nx.communicability(self.graph) result['communicability_exp'] = nx.communicability_exp(self.graph) result[ 'communicability_centrality'] = nx.communicability_centrality( self.graph) result[ 'communicability_centrality_exp'] = nx.communicability_centrality_exp( self.graph) result[ 'communicability_betweenness_centrality'] = nx.communicability_betweenness_centrality( self.graph) result['estrada_index'] = nx.estrada_index(self.graph) result['load_centrality'] = nx.load_centrality(self.graph) stringify_temp = {} temp = nx.edge_load(self.graph) for key in temp.keys(): stringify_temp[str(key)] = temp[key] result['edge_load'] = stringify_temp result['dispersion'] = nx.dispersion(self.graph) fname_centra = self.DIR + '/centrality.json' with open(fname_centra, "w") as f: json.dump(result, f, cls=SetEncoder, indent=2) print(fname_centra)
def low_communicability_objective(edges_present: np.ndarray) -> float: """Accept a bitset of edges and return the sum of the communicability values.""" net = lib.edge_set_to_network(edges_present) communicability = nx.communicability_exp(net.G) return sum(c for inner_values in communicability.values() for c in inner_values.values())
def dist(self, G1, G2): """ This distance is based on the communicability matrix, $C$, of a graph consisting of elements $c_{ij}$ which are values corresponding to the numbers of shortest paths of length $k$ between nodes $i$ and $j$. See: Estrada, E., & Hatano, N. (2008). Communicability in complex networks. Physical Review E, 77(3), 036111. https://journals.aps.org/pre/abstract/10.1103/PhysRevE.77.036111 for a full introduction. The commmunicability matrix is symmetric, which means the communicability sequence is formed by flattening the upper triangular of $C$, which is then normalized to create the communicability sequence, $P$. The communicability sequence entropy distance between two graphs, $G1$ and $G2$, is the Jensen-Shannon divergence between these communicability sequence distributions, $P1$ and $P2$ of the two graphs. Note: this function uses the networkx approximation of the communicability of a graph, `nx.communicability_exp`, which requires G1 and G2 to be simple undirected networks. In addition to the final distance scalar, `self.results` stores the two vectors $P1$ and $P2$, their mixed vector, $P0$, and their associated entropies. Params ------ G1 (nx.Graph): the first graph G2 (nx.Graph): the second graph Returns ------- dist (float): between zero and one, this is the communicability sequence distance bewtween G1 and G2. """ N1 = G1.number_of_nodes() N2 = G2.number_of_nodes() C1 = nx.communicability_exp(G1) C2 = nx.communicability_exp(G2) Ca1 = np.zeros((N1, N1)) Ca2 = np.zeros((N2, N2)) for i in range(Ca1.shape[0]): Ca1[i] = np.array(list(C1[i].values())) for i in range(Ca2.shape[0]): Ca2[i] = np.array(list(C2[i].values())) lil_sigma1 = np.triu(Ca1).flatten() lil_sigma2 = np.triu(Ca2).flatten() big_sigma1 = sum(lil_sigma1[np.nonzero(lil_sigma1)[0]]) big_sigma2 = sum(lil_sigma2[np.nonzero(lil_sigma2)[0]]) P1 = lil_sigma1 / big_sigma1 P2 = lil_sigma2 / big_sigma2 P0 = (P1 + P2) / 2 H1 = sp.stats.entropy(P1) H2 = sp.stats.entropy(P2) H0 = sp.stats.entropy(P0) dist = np.sqrt(H0 - 0.5 * (H1 + H2)) self.results['P1'] = P1 self.results['P2'] = P2 self.results['P0'] = P0 self.results['entropy_1'] = H1 self.results['entropy_2'] = H2 self.results['entropy_mixture'] = H0 self.results['dist'] = dist return dist
def communicability_exp(uG, ni, nj, rand_node): c = nx.communicability_exp(uG) return c[ni][nj], c[ni][rand_node]