def compute(self): centrality_unweighted = bct.betweenness_bin(self.binarized) centrality_weighted = bct.betweenness_wei(self.g) self.stats['Betweenness Unweighted'] = [ v for v in centrality_unweighted ] self.stats['Betweenness Weighted'] = [v for v in centrality_weighted] return self.stats
def centrality(self, sbj_number, nodes_number, atlas, make_symmetric=True, upper_threshold=None, lower_threshold=None, binarize=False): ''' Computing centrality measures of the adjencency matrix Parameters ---------- sbj_number: int | number of subjects nodes_number: int| number of nodes atlas: excel file | please se example available in the repo (e.g. new_atlas_coords.xlsx) make_symmetric: Boolean| True indicate that the matrix is either upper or lower triangular and need to be symmetrize False indicate that the matrix is a full matrix already upper_threshold: int | an integer value ranging from 0 to 100 representing the percentage of values as respect to maximum. The value under that threshold will be 0 (Default is None) lower_threshold: int | an integer value ranging from 0 to 100 representing the percentage of values as respect to maximum. The value above that threshold will be 0 (Default is None) binarize= Boolean| True will make the connectivity matrix binary Default is False Returns ------- dict: : dictonary with the following keys | edge_betweeness_bin: | np.ndarray Edge betweenness centrality is the fraction of all shortest paths in the network that contain a given edge. Edges with high values of betweenness centrality participate in a large number of shortest paths. It will return node betweenness centrality vector. edge_betweeness_wei: | np.ndarray Edge betweenness centrality is the fraction of all shortest paths in the network that contain a given edge. Edges with high values of betweenness centrality participate in a large number of shortest paths. It will return node betweenness centrality vector. eigenvector_centrality_und: | np.ndarray Eigenector centrality is a self-referential measure of centrality: nodes have high eigenvector centrality if they connect to other nodes that have high eigenvector centrality. The eigenvector centrality of node i is equivalent to the ith element in the eigenvector corresponding to the largest eigenvalue of the adjacency matrix. It will return the eigenvector associated with the largest eigenvalue of the matrix coreness_kcoreness_centrality_bu: | np.ndarray The k-core is the largest subgraph comprising nodes of degree at least k. The coreness of a node is k if the node belongs to the k-core but not to the (k+1)-core. This function computes the coreness of all nodes for a given binary undirected connection matrix. It will return the node coreness. kn_kcoreness_centrality_bu: | np.ndarray The k-core is the largest subgraph comprising nodes of degree at least k. The coreness of a node is k if the node belongs to the k-core but not to the (k+1)-core. This function computes the coreness of all nodes for a given binary undirected connection matrix. It will return the size of k-core module_degree_zscore: | np.ndarray The within-module degree z-score is a within-module version of degree centrality. It will return within-module degree Z-score participation_coef: | np.ndarray Participation coefficient is a measure of diversity of intermodular connections of individual nodes. It will return the participation coefficient subgraph_centrality: | np.ndarray The subgraph centrality of a node is a weighted sum of closed walks of different lengths in the network starting and ending at the node. This function returns a vector of subgraph centralities for each node of the network. It will return the subgraph centrality ''' with open(self.net_label_txt) as f: net = f.read().splitlines() self.atlas = pd.read_excel(atlas, header=None) self.atlas = np.array(self.atlas) self.ci_original = self.atlas[:, 8] self.centrality = { "edge_betweeness_bin": np.zeros([sbj_number, nodes_number]), "edge_betweeness_wei": np.zeros([sbj_number, nodes_number]), "eigenvector_centrality_und": np.zeros([sbj_number, nodes_number]), "coreness_kcoreness_centrality_bu": np.zeros([sbj_number, nodes_number]), "kn_kcoreness_centrality_bu": np.zeros([sbj_number, nodes_number]), "module_degree_zscore": np.zeros([sbj_number, nodes_number]), "participation_coef": np.zeros([sbj_number, nodes_number]), "subgraph_centrality": np.zeros([sbj_number, nodes_number]) } for subj in range(len(self.matrices_files)): self.matrix = pd.read_csv(self.matrices_files[subj], sep=' ', header=None) self.matrix = np.array(self.matrix) if make_symmetric == True: self.matrix = self.matrix + self.matrix.T - np.diag( self.matrix.diagonal()) else: self.matrix = self.matrix self.max = np.max(self.matrix.flatten()) if upper_threshold == None: self.matrix = self.matrix else: self.matrix[self.matrix < upper_threshold * self.max / 100] = 0 if lower_threshold == None: self.matrix = self.matrix else: self.matrix[self.matrix > lower_threshold * self.max / 100] = 0 self.matrix_bin = bct.algorithms.binarize(self.matrix) self.matrix_weight = self.matrix if binarize == True: self.matrix = bct.algorithms.binarize(self.matrix) else: self.matrix = self.matrix np.fill_diagonal(self.matrix, 0) np.fill_diagonal(self.matrix_bin, 0) np.fill_diagonal(self.matrix_weight, 0) self.BC = bct.betweenness_bin(self.matrix_bin) self.centrality['edge_betweeness_bin'][subj] = self.BC self.BC_w = bct.betweenness_wei(self.matrix_weight) self.centrality['edge_betweeness_wei'][subj] = self.BC_w self.v = bct.eigenvector_centrality_und(self.matrix) self.centrality['eigenvector_centrality_und'][subj] = self.v self.coreness, self.kn = bct.kcoreness_centrality_bu( self.matrix_bin) self.centrality['coreness_kcoreness_centrality_bu'][ subj] = self.coreness self.centrality['kn_kcoreness_centrality_bu'][subj] = self.kn self.Z = bct.module_degree_zscore(self.matrix, ci=self.ci_original) self.centrality['module_degree_zscore'][subj] = self.Z self.P = bct.participation_coef(self.matrix, ci=self.ci_original) self.centrality['participation_coef'][subj] = self.P self.Cs = bct.subgraph_centrality(self.matrix_bin) self.centrality['subgraph_centrality'][subj] = self.Cs return self.centrality
def nodal_degree_vulnerability(self, sbj_number, nodes_number, make_symmetric=True, binarize=False, threshold=None, recalculate=False, attack_type='target', metric2use='degree'): ''' Performs robustness analysis based on nodal degree. Parameters ---------- sbj_number: int | number of subjects nodes_number: int| number of nodes make_symmetric: Boolean| True indicate that the matrix is either upper or lower triangular and need to be symmetrize False indicate that the matrix is a full matrix already binarize: Boolean| True will make the connectivity matrix binary Default is False recalculate: Boolean| It will use sequential (recalculate = True) or simultaneous (recalculate = False) approach. Default is False attack_type: str | It can be either 'target' or 'random' Returns ------- vulnerability: np.array | The overall vulnerability of the network ''' self.all_vulnerability = np.zeros([sbj_number]) self.all_x = np.zeros([sbj_number, nodes_number]) self.all_y = np.zeros([sbj_number, nodes_number]) self.all_largest_comp = np.zeros([sbj_number, nodes_number]) for subj in range(len(self.matrices_files)): self.matrix = pd.read_csv(self.matrices_files[subj], sep=' ', header=None) self.matrix = np.array(self.matrix) if make_symmetric == True: self.matrix = self.matrix + self.matrix.T - np.diag( self.matrix.diagonal()) else: self.matrix = self.matrix if binarize == True: self.matrix = bct.algorithms.binarize(self.matrix) else: self.matrix = self.matrix if threshold == None: self.matrix = self.matrix else: self.matrix[self.matrix < threshold * np.max(self.matrix.flatten()) / 100] = 0 np.fill_diagonal(self.matrix, 0) if attack_type == 'target': if metric2use == 'degree': self.deg = bct.algorithms.degrees_und(self.matrix) elif metric2use == 'eigenvector_centrality': self.deg = bct.eigenvector_centrality_und(self.matrix) elif metric2use == 'betweenness_bin': self.deg = bct.betweenness_bin(self.matrix) elif metric2use == 'betweenness_wei': self.deg = bct.betweenness_wei(self.matrix) self.g = networkx.convert_matrix.from_numpy_array(self.matrix) self.m = dict(enumerate(self.deg.flatten(), 0)) self.l = sorted(self.m.items(), key=operator.itemgetter(1), reverse=True) self.x = [] self.y = [] self.lcomp = [] self.largest_component = max(networkx.connected_components( self.g), key=len) self.n = len(self.g.nodes()) self.x.append(0) self.y.append(len(self.largest_component) * 1. / self.n) self.lcomp.append(len(self.largest_component)) self.R = 0.0 for i in range(1, self.n): self.g.remove_node(self.l.pop(0)[0]) if recalculate: self.matrix = networkx.convert_matrix.to_numpy_array( self.g) self.g = networkx.convert_matrix.from_numpy_array( self.matrix) if metric2use == 'degree': self.deg = bct.algorithms.degrees_und(self.matrix) elif metric2use == 'eigenvector_centrality': self.deg = bct.eigenvector_centrality_und( self.matrix) elif metric2use == 'betweenness_bin': self.deg = bct.betweenness_bin(self.matrix) elif metric2use == 'betweenness_wei': self.deg = bct.betweenness_wei(self.matrix) self.m = dict(enumerate(self.deg.flatten(), 0)) self.l = sorted(self.m.items(), key=operator.itemgetter(1), reverse=True) self.largest_component = max(networkx.connected_components( self.g), key=len) self.x.append(i * 1. / self.n) self.R += len(self.largest_component) * 1. / self.n self.y.append(len(self.largest_component) * 1. / self.n) self.lcomp.append(len(self.largest_component)) elif attack_type == 'random': self.g = networkx.convert_matrix.from_numpy_array(self.matrix) self.l = [(self.node, 0) for self.node in self.g.nodes()] random.shuffle(self.l) self.x = [] self.y = [] self.lcomp = [] self.largest_component = max(networkx.connected_components( self.g), key=len) self.n = len(self.g.nodes()) self.x.append(0) self.y.append(len(self.largest_component) * 1. / self.n) self.lcomp.append(len(self.largest_component)) self.R = 0.0 for i in range(1, self.n): self.g.remove_node(self.l.pop(0)[0]) self.largest_component = max(networkx.connected_components( self.g), key=len) self.x.append(i * 1. / self.n) self.R += len(self.largest_component) * 1. / self.n self.y.append(len(self.largest_component) * 1. / self.n) self.lcomp.append(len(self.largest_component)) self.all_x[subj] = np.array(self.x) self.all_y[subj] = np.array(self.y) self.all_vulnerability[subj] = np.array(0.5 - self.R / self.n) self.all_largest_comp[subj] = np.array(self.lcomp) return self.all_vulnerability, self.all_x, self.all_y, self.all_largest_comp
) ctrl_c, ctrl_e = grab_corr( subjects, nodes=None, task=task, condition=cond1, session="1", atlas="shen2015", ) p, adj, fig = nbs_and_graphs( ctrl_c, cond_c, p_thresh=0.05, k=1000, atlas=shen_nii, verbose=False ) centrality = bct.betweenness_bin(adj) centrality_df = pd.Series( centrality, index=np.arange(1, 269), name="betweenness centrality" ) centrality_df.to_csv( join( data_dir, "nbs", "{0}_students-{1}-centrality.csv".format(group, task), ), header=False, ) adjacency = pd.DataFrame( adj, columns=np.arange(1, 269), index=np.arange(1, 269) )