def weighted_signal_flow(A): """Implementation of the signal flow metric from Varshney et al 2011 Parameters ---------- A : [type] [description] Returns ------- [type] [description] """ A = A.copy() A = remove_loops(A) W = (A + A.T) / 2 D = np.diag(np.sum(W, axis=1)) L = D - W b = np.sum(W * (A - A.T), axis=1) L_pinv = np.linalg.pinv(L) z = L_pinv @ b return z
class_key = "merge_class" sg = list(chain.from_iterable(source_groups)) og = list(chain.from_iterable(out_groups)) sg_name = "All" og_name = "All" meta = mg.meta.copy() class_key = "merge_class" meta["idx"] = range(len(meta)) A = mg.adj A = A.copy() A = remove_loops(A) W = (A + A.T) / 2 D = np.diag(np.sum(W, axis=1)) L = D - W L_pinv = np.linalg.pinv(L) voltage_df = pd.DataFrame(index=meta.index) rank_voltage_df = pd.DataFrame(index=meta.index) # col_meta = pd.DataFrame(index=range(len(source_groups) * len(sink_groups))) # col_meta["in_out"] = -1 from_inds = meta[meta[class_key].isin((sg[0],))]["idx"].values out_inds = meta[meta[class_key].isin(og)]["idx"].values current = np.zeros((len(A), 1)) current[from_inds] = 1 / len(from_inds) current[out_inds] = -1 / len(out_inds)
# %% [markdown] # ## rows = [] class DDCSBMEstimator(DCSBMEstimator): def __init__(self, **kwargs): super().__init__(degree_directed=True, **kwargs) for l in range(n_levels + 1): labels = meta[f"lvl{l}_labels"].values left_adj = binarize(adj[np.ix_(lp_inds, lp_inds)]) left_adj = remove_loops(left_adj) right_adj = binarize(adj[np.ix_(rp_inds, rp_inds)]) right_adj = remove_loops(right_adj) for model, name in zip( [DDCSBMEstimator, DCSBMEstimator, SBMEstimator], ["DDCSBM", "DCSBM", "SBM"] ): # train on left estimator = model(directed=True, loops=False) uni_labels, inv = np.unique(labels, return_inverse=True) estimator.fit(left_adj, inv[lp_inds]) train_left_p = estimator.p_mat_ train_left_p[train_left_p == 0] = 1 / train_left_p.size n_params = estimator._n_parameters() + len(uni_labels) # test on left
def calc_model_liks(adj, meta, lp_inds, rp_inds, n_levels=10): rows = [] for l in range(n_levels + 1): labels = meta[f"lvl{l}_labels"].values left_adj = binarize(adj[np.ix_(lp_inds, lp_inds)]) left_adj = remove_loops(left_adj) right_adj = binarize(adj[np.ix_(rp_inds, rp_inds)]) right_adj = remove_loops(right_adj) for model, name in zip([DCSBMEstimator, SBMEstimator], ["DCSBM", "SBM"]): estimator = model(directed=True, loops=False) uni_labels, inv = np.unique(labels, return_inverse=True) estimator.fit(left_adj, inv[lp_inds]) train_left_p = estimator.p_mat_ train_left_p[train_left_p == 0] = 1 / train_left_p.size n_params = estimator._n_parameters() + len(uni_labels) score = poisson.logpmf(left_adj, train_left_p).sum() rows.append( dict( train_side="Left", test="Same", test_side="Left", score=score, level=l, model=name, n_params=n_params, norm_score=score / left_adj.sum(), ) ) score = poisson.logpmf(right_adj, train_left_p).sum() rows.append( dict( train_side="Left", test="Opposite", test_side="Right", score=score, level=l, model=name, n_params=n_params, norm_score=score / right_adj.sum(), ) ) estimator = model(directed=True, loops=False) estimator.fit(right_adj, inv[rp_inds]) train_right_p = estimator.p_mat_ train_right_p[train_right_p == 0] = 1 / train_right_p.size n_params = estimator._n_parameters() + len(uni_labels) score = poisson.logpmf(left_adj, train_right_p).sum() rows.append( dict( train_side="Right", test="Opposite", test_side="Left", score=score, level=l, model=name, n_params=n_params, norm_score=score / left_adj.sum(), ) ) score = poisson.logpmf(right_adj, train_right_p).sum() rows.append( dict( train_side="Right", test="Same", test_side="Right", score=score, level=l, model=name, n_params=n_params, norm_score=score / right_adj.sum(), ) ) return pd.DataFrame(rows)
def motif_matching( paths, ID, atlas, namer_dir, name_list, metadata_list, multigraph_list_all, graph_path_list_all, rsn=None, ): import networkx as nx import numpy as np import glob import pickle from pynets.core import thresholding from pynets.stats.netmotifs import compare_motifs from sklearn.metrics.pairwise import cosine_similarity from pynets.stats.netstats import community_resolution_selection from graspy.utils import remove_loops, symmetrize, get_lcc from pynets.core.nodemaker import get_brainnetome_node_attributes [struct_graph_path, func_graph_path] = paths struct_mat = np.load(struct_graph_path) func_mat = np.load(func_graph_path) [struct_coords, struct_labels, struct_label_intensities] = \ get_brainnetome_node_attributes(glob.glob( f"{str(Path(struct_graph_path).parent.parent)}/nodes/*.json"), struct_mat.shape[0]) [func_coords, func_labels, func_label_intensities] = \ get_brainnetome_node_attributes(glob.glob( f"{str(Path(func_graph_path).parent.parent)}/nodes/*.json"), func_mat.shape[0]) # Find intersecting nodes across modalities (i.e. assuming the same # parcellation, but accomodating for the possibility of dropped nodes) diff1 = list(set(struct_label_intensities) - set(func_label_intensities)) diff2 = list(set(func_label_intensities) - set(struct_label_intensities)) G_struct = nx.from_numpy_array(struct_mat) G_func = nx.from_numpy_array(func_mat) bad_idxs = [] for val in diff1: bad_idxs.append(struct_label_intensities.index(val)) bad_idxs = sorted(list(set(bad_idxs)), reverse=True) if type(struct_coords) is np.ndarray: struct_coords = list(tuple(x) for x in struct_coords) for j in bad_idxs: G_struct.remove_node(j) print(f"Removing: {(struct_labels[j], struct_coords[j])}...") del struct_labels[j], struct_coords[j] bad_idxs = [] for val in diff2: bad_idxs.append(func_label_intensities.index(val)) bad_idxs = sorted(list(set(bad_idxs)), reverse=True) if type(func_coords) is np.ndarray: func_coords = list(tuple(x) for x in func_coords) for j in bad_idxs: G_func.remove_node(j) print(f"Removing: {(func_labels[j], func_coords[j])}...") del func_labels[j], func_coords[j] struct_mat = nx.to_numpy_array(G_struct) func_mat = nx.to_numpy_array(G_func) struct_mat = thresholding.autofix(symmetrize(remove_loops(struct_mat))) func_mat = thresholding.autofix(symmetrize(remove_loops(func_mat))) if func_mat.shape == struct_mat.shape: func_mat[~struct_mat.astype("bool")] = 0 struct_mat[~func_mat.astype("bool")] = 0 print( "Edge disagreements after matching: ", sum(sum(abs(func_mat - struct_mat))), ) metadata = {} assert ( len(struct_coords) == len(struct_labels) == len(func_coords) == len(func_labels) == func_mat.shape[0] ) metadata["coords"] = struct_coords metadata["labels"] = struct_labels metadata_list.append(metadata) struct_mat = np.maximum(struct_mat, struct_mat.T) func_mat = np.maximum(func_mat, func_mat.T) struct_mat = thresholding.standardize(struct_mat) func_mat = thresholding.standardize(func_mat) struct_node_comm_aff_mat = community_resolution_selection( nx.from_numpy_matrix(np.abs(struct_mat)) )[1] func_node_comm_aff_mat = community_resolution_selection( nx.from_numpy_matrix(np.abs(func_mat)) )[1] struct_comms = [] for i in np.unique(struct_node_comm_aff_mat): struct_comms.append(struct_node_comm_aff_mat == i) func_comms = [] for i in np.unique(func_node_comm_aff_mat): func_comms.append(func_node_comm_aff_mat == i) sims = cosine_similarity(struct_comms, func_comms) try: struct_comm = struct_comms[np.argmax(sims, axis=0)[0]] except BaseException: print('Matching by structural communities failed...') struct_comm = struct_mat try: func_comm = func_comms[np.argmax(sims, axis=0)[0]] except BaseException: print('Matching by functional communities failed...') func_comm = func_mat comm_mask = np.equal.outer(struct_comm, func_comm).astype(bool) try: assert comm_mask.shape == struct_mat.shape == func_mat.shape except AssertionError as e: e.args += (comm_mask, comm_mask.shape, struct_mat, struct_mat.shape, func_mat, func_mat.shape) try: struct_mat[~comm_mask] = 0 except BaseException: print('Skipping community masking...') try: func_mat[~comm_mask] = 0 except BaseException: print('Skipping community masking...') struct_name = struct_graph_path.split("/rawgraph_" )[-1].split(".npy")[0] func_name = func_graph_path.split("/rawgraph_")[-1].split(".npy")[0] name = f"sub-{ID}_{atlas}_mplx_Layer-1_{struct_name}_" \ f"Layer-2_{func_name}" name_list.append(name) struct_mat = np.maximum(struct_mat, struct_mat.T) func_mat = np.maximum(func_mat, func_mat.T) try: [mldict, g_dict] = compare_motifs( struct_mat, func_mat, name, namer_dir) except BaseException: print(f"Adaptive thresholding by motif comparisons failed " f"for {name}. This usually happens when no motifs are found") return [], [], [], [] multigraph_list_all.append(list(mldict.values())[0]) graph_path_list = [] for thr in list(g_dict.keys()): multigraph_path_list_dict = {} [struct, func] = g_dict[thr] struct_out = f"{namer_dir}/struct_{atlas}_{struct_name}.npy" func_out = f"{namer_dir}/struct_{atlas}_{func_name}_" \ f"motif-{thr}.npy" np.save(struct_out, struct) np.save(func_out, func) multigraph_path_list_dict[f"struct_{atlas}_{thr}"] = struct_out multigraph_path_list_dict[f"func_{atlas}_{thr}"] = func_out graph_path_list.append(multigraph_path_list_dict) graph_path_list_all.append(graph_path_list) else: print( f"Skipping {rsn} rsn, since structural and functional graphs are " f"not identical shapes." ) return name_list, metadata_list, multigraph_list_all, graph_path_list_all
print("Checking if graph is fully connected") print(is_fully_connected(Gn)) Gn, inds = get_lcc(Gn, return_inds=True) num_removed = n_verts_original - len(Gn) print(f"Removed {num_removed} node") # select metadata classes = meta_to_array(Gn, "Class") simple_classes = to_simple_class(classes) names = meta_to_array(Gn, "Name") ids = meta_to_array(Gn, "ID") # load adjacency and preprocess Gn_adj = import_graph(Gn) Gn_adj = remove_loops(Gn_adj) # Gn = pass_to_ranks(Gn) Gn_adj = binarize(Gn_adj) # plot graph heatmap( Gn_adj, inner_hier_labels=simple_classes, sort_nodes=True, figsize=(15, 15), title=use_graph, ) # compute and plot directed normalized laplacian L = to_laplace(Gn_adj, form="R-DAD", regularizer=1) heatmap(