def plot_rdm(rdm, mat=0, cmap=None): '''function to visualize RDM based rank transformed and scaled similarity values (only for plotting, raw/initial values remain unchanged''' from os.path import join as opj from scipy.io.matlab import loadmat from nilearn.connectome import sym_matrix_to_vec from scipy.stats import rankdata from nilearn.connectome import vec_to_sym_matrix from sklearn import preprocessing import pandas as pd import seaborn as sns import matplotlib.pyplot as plt if mat == 1: matfile = loadmat(rdm) rdm = matfile['rdm'][0][0] if cmap == None: cmap = 'Spectral_r' else: cmap = cmap rdm = pd.read_csv(rdm, sep=',') if 'Unnamed: 0' in rdm: del rdm['Unnamed: 0'] categories = list(rdm.columns) rdm = rdm.as_matrix() rdm_vec = sym_matrix_to_vec(rdm) rdm_vec = rankdata(rdm_vec) min_max_scaler = preprocessing.MinMaxScaler(feature_range=(0, 1), copy=True) rdm_array = rdm_vec.reshape(-1, 2) rdm_array = min_max_scaler.fit_transform(rdm_array) rdm_array = rdm_array.flatten() rdm_rank_scale = vec_to_sym_matrix(rdm_array) ax = sns.heatmap(rdm_rank_scale, xticklabels=categories, yticklabels=categories, cmap=cmap) ax.set_yticklabels(categories, rotation=0) ax.xaxis.tick_top() ax.set_xticklabels(categories, rotation=90) ax.collections[0].colorbar.set_label( "pairwise similarities (iMDS), rank transformed & scaled [0,1]") plt.tight_layout()
def plot_rdm(rdm, mat=False, cmap="Spectral_r"): ''' function to visualize RDM based rank transformed and scaled similarity values (only for plotting, raw/initial values remain unchanged) ''' from os.path import join as opj from scipy.io.matlab import loadmat from scipy.stats import rankdata import matplotlib.pyplot as plt from sklearn.preprocessing import minmax_scale import pandas as pd import seaborn as sns from nilearn.connectome import sym_matrix_to_vec, vec_to_sym_matrix if mat is True: matfile = loadmat(rdm) rdm = matfile['rdm'][0][0] if isinstance(rdm, str) is True: rdm = pd.read_csv(rdm, sep=',') if 'Unnamed: 0' in rdm: del rdm['Unnamed: 0'] else: rdm = rdm categories = list(rdm.columns) rdm = rdm.as_matrix() rdm_vec = sym_matrix_to_vec(rdm) rdm_vec = rankdata(rdm_vec) rdm_array = rdm_vec.reshape(-1, 1) rdm_array = minmax_scale(rdm_array, (0, 1)) rdm_array = rdm_array.flatten() rdm_rank_scale = vec_to_sym_matrix(rdm_array) y_categories = list(reversed(categories)) ax = sns.heatmap(rdm_rank_scale, xticklabels=categories, yticklabels=y_categories, cmap=cmap) ax.set_yticklabels(y_categories, rotation=0) ax.xaxis.tick_top() ax.set_xticklabels(categories, rotation=90) ax.collections[0].colorbar.set_label( "pairwise similarities, rank transformed & scaled [0,1]") plt.tight_layout()
def netmat(timeseries_list, measure="partial correlation"): # measure van be: "correlation", "partial correlation", "tangent" # This takes the timeseries of all subjects, needed for the "tangent" measure import pandas as pd import numpy as np import os from nilearn.connectome import ConnectivityMeasure from nilearn.connectome import vec_to_sym_matrix pooled_subjects = [] for subj in timeseries_list: ts = pd.read_csv(subj, sep="\t") pooled_subjects.append(ts.values) conn_measure = ConnectivityMeasure(kind=measure, vectorize=True, discard_diagonal=True) correlation_matrix = conn_measure.fit_transform(pooled_subjects) regnum = ts.shape[1] subject_matrix_list = [] for i in range(correlation_matrix.shape[0]): mat = pd.DataFrame( vec_to_sym_matrix(correlation_matrix[i, :], diagonal=np.repeat(0, regnum))) mat.columns = ts.columns mat.index = mat.columns # mimic mapnode behavior!! directory = "mapflow/_" + measure.replace(' ', '_') + "_matrix" + str(i) if not os.path.exists(directory): os.makedirs(directory) mat.to_csv(directory + "/" + "mtx.tsv", sep="\t") #print os.path.join(os.getcwd(), str(i) + "_mtx.tsv") subject_matrix_list.append( str(os.path.join(os.getcwd(), directory, "mtx.tsv"))) mean = pd.DataFrame(conn_measure.mean_) mean.values[range(regnum), range(regnum)] = 0 # zero-out digonal mean.columns = ts.columns mean.index = mean.columns outfile = measure.replace(' ', '_') + "_mean_mtx.tsv" mean.to_csv(outfile, sep="\t") return os.path.join(os.getcwd(), outfile), subject_matrix_list
# svr.fit(X_scaled, Y) return svr mod = model(k=opt_k) fit = mod.fit(X, Y) print fit.predict(X) print mod.named_steps['elastic'].coef_ featuremask = mod.named_steps['fsel'].get_support() RES = np.zeros(X[1].shape) RES_bin = np.zeros(X[1].shape) RES[featuremask] = mod.named_steps['elastic'].coef_ RES_bin[featuremask] = np.ones(len(mod.named_steps['elastic'].coef_)) m = vec_to_sym_matrix(RES, diagonal=np.repeat(0, len(mist_64_labels))) m_sign = m / abs(m) m_bin = vec_to_sym_matrix(RES_bin, diagonal=np.repeat(0, len(mist_64_labels))) print "itt" plotting.plot_matrix(m_sign, figure=(15, 15), labels=mist_64_labels['label'], title="", grid=True) plotting.show() print "itt" from matplotlib import colors as mcolors plotting.plot_connectome(m, mist_64_labels[['x', 'y', 'z']], display_mode='lzry', colorbar=True)
# prepare bootstrap sample boot = resample(indices, replace=True, n_samples=N_obs) #print('Bootstrap Sample: %s' % boot) #print(X[boot]) # out of bag observations oob = [x for x in indices if x not in boot] #print('OOB Sample: %s' % oob) bootfit=m_boot.fit(X[boot], y[boot]) RES[boot_i,featuremask] = bootfit.named_steps['model'].coef_ #print(RES[boot_i,featuremask]) RES_MAT = vec_to_sym_matrix(RES[boot_i], diagonal=np.repeat(0, len(labels))) #plot.plot_matrix(RES_MAT, labels['labels'].values, labels['modules'].values, outfile=global_vars._PLOT_PRED_MATRIX_) idx = np.transpose(np.nonzero(np.triu(RES_MAT, k=1))) #print "Number of predictive connections:" + str(len(idx)) df = pd.DataFrame(RES_MAT, columns=labels['labels'].values, index=labels['labels'].values) cils=np.zeros(len(original_coefs)) cihs=np.zeros(len(original_coefs)) occs=np.zeros(len(original_coefs)) ps=np.zeros(len(original_coefs)) for coef_i in range(len(original_coefs)):
def plot_rdm(rdm, mat=False, model=False, level=None, comp=None, cmap="Spectral_r"): ''' function to visualize RDM based rank transformed and scaled similarity values (only for plotting, raw/initial values remain unchanged) ''' from scipy.io.matlab import loadmat from scipy.stats import rankdata import matplotlib.pyplot as plt from sklearn.preprocessing import minmax_scale import pandas as pd import seaborn as sns from nilearn.connectome import sym_matrix_to_vec, vec_to_sym_matrix if mat is True: matfile = loadmat(rdm) rdm = matfile['rdm'][0][0] if isinstance(rdm, str) is True: rdm = pd.read_csv(rdm, sep=',') if 'Unnamed: 0' in rdm: del rdm['Unnamed: 0'] else: rdm=rdm categories = list(rdm.columns) y_categories = list(categories) if model is False and level == '2nd': ax = sns.heatmap(rdm, xticklabels=categories, yticklabels=y_categories, cmap=cmap, vmin=-1, vmax=1) ax.set_yticklabels(y_categories, rotation=0) ax.xaxis.tick_top() ax.set_xticklabels(categories, rotation=90) if comp is None: ax.collections[0].colorbar.set_label("correlations between RDMs") if comp == 'kendalltaua': ax.collections[0].colorbar.set_label("correlations between RDMs [kendall tau]") if comp == 'spearman': ax.collections[0].colorbar.set_label("correlations between RDMs [spearman]") if comp == 'pearson': ax.collections[0].colorbar.set_label("correlations between RDMs [pearson]") plt.tight_layout() if model is False and level is None: rdm = rdm.to_numpy() rdm_vec = sym_matrix_to_vec(rdm) rdm_vec = rankdata(rdm_vec) rdm_array = rdm_vec.reshape(-1, 1) rdm_array = minmax_scale(rdm_array, (0, 1)) rdm_array = rdm_array.flatten() rdm_rank_scale = vec_to_sym_matrix(rdm_array) ax = sns.heatmap(rdm_rank_scale, xticklabels=categories, yticklabels=y_categories, cmap=cmap) ax.set_yticklabels(y_categories, rotation=0) ax.xaxis.tick_top() ax.set_xticklabels(categories, rotation=90) ax.collections[0].colorbar.set_label("pairwise similarities, rank transformed & scaled [0,1]") plt.tight_layout() if model is True: rdm = rdm.to_numpy() rdm_vec = sym_matrix_to_vec(rdm) rdm_array = rdm_vec.reshape(-1, 1) rdm_array = minmax_scale(rdm_array, (0, 1)) rdm_array = rdm_array.flatten() rdm_scale = vec_to_sym_matrix(rdm_array) ax = sns.heatmap(rdm_scale, xticklabels=categories, yticklabels=y_categories, cmap=cmap) ax.set_yticklabels(y_categories, rotation=0) ax.xaxis.tick_top() ax.set_xticklabels(categories, rotation=90) ax.collections[0].colorbar.set_label("pairwise similarities, scaled [0,1]") plt.tight_layout()
def rdm_compare(rdms, models, comp=None, plot=None): '''function to compare target and model rmds''' global dict_rdms global DefaultListOrderedDict from glob import glob import pandas as pd from collections import OrderedDict from scipy.spatial import distance from nilearn.connectome import sym_matrix_to_vec, vec_to_sym_matrix from scipy.stats import rankdata, spearmanr, kendalltau, pearsonr, mstats import numpy as np from itertools import combinations import pickle import seaborn as sns import matplotlib.pyplot as plt import copy class DefaultListOrderedDict(OrderedDict): def __missing__(self,k): self[k] = [] return self[k] if isinstance(rdms, str) is True: with open(rdms, 'rb') as f: dict_rdms = pickle.load(f) target_rdms = copy.deepcopy(dict_rdms['rdm']) target_conds = target_rdms[0].keys() else: target_rdms = rdms target_conds = rdms[0].keys() if isinstance(models, str) is True: with open(models, 'rb') as f: dict_models = pickle.load(f) models = dict_models['rdm'] model_ids = dict_models['id'] else: models = models for rdm in dict_models['rdm']: if 'Unnamed: 0' in rdm: del rdm['Unnamed: 0'] for index, rdm in enumerate(target_rdms): target_rdms[index] = target_rdms[index].as_matrix() global rdm_avg list_cor_rdm=list(range(0,len(target_rdms))) list_p=list(range(0,len(target_rdms))) target_rdms_trans=list(range(0,len(target_rdms))) if comp is None: print('rdm values will not be transformed') rdm_avg = pd.DataFrame(np.mean(target_rdms, axis=0), columns=target_conds) for index, part_rdm in enumerate(target_rdms): list_cor_rdm[index], list_p[index] = kendalltau(part_rdm.flatten(), rdm_avg.as_matrix().flatten()) list_cor_sub = list() list_cor_rdm_sub = list() list_p_sub = list() for index, part in enumerate(target_rdms): tmp_rdms = target_rdms.copy() tmp_part = target_rdms[index] tmp_rdms.pop(index) tmp_rdm_avg = np.mean(tmp_rdms, axis=0) list_cor_sub.append(kendalltau(tmp_part.flatten(), tmp_rdm_avg.flatten())) for i, cor in enumerate(list_cor_sub): list_cor_rdm_sub.append(cor.correlation) list_p_sub.append(cor.pvalue) elif comp == 'spearman': for index, rdm in enumerate(target_rdms): target_rdms_trans[index] = vec_to_sym_matrix(rankdata(sym_matrix_to_vec(rdm))) rdm_avg = pd.DataFrame(np.mean(target_rdms_trans, axis=0), columns=target_conds) for index, part_rdm in enumerate(target_rdms_trans): list_cor_rdm[index], list_p[index] = spearmanr(part_rdm.flatten(), rdm_avg.as_matrix().flatten()) list_cor_sub = list() list_cor_rdm_sub = list() list_p_sub = list() for index, part in enumerate(target_rdms_trans): tmp_rdms = target_rdms_trans.copy() tmp_part = target_rdms_trans[index] tmp_rdms.pop(index) tmp_rdm_avg = np.mean(tmp_rdms, axis=0) list_cor_sub.append(spearmanr(tmp_part.flatten(), tmp_rdm_avg.flatten())) for i, cor in enumerate(list_cor_sub): list_cor_rdm_sub.append(cor.correlation) list_p_sub.append(cor.pvalue) elif comp == 'pearson': for index, rdm in enumerate(target_rdms): target_rdms_trans[index] = vec_to_sym_matrix(mstats.zscore(sym_matrix_to_vec(rdm))) rdm_avg = pd.DataFrame(np.mean(target_rdms_trans, axis=0), columns=target_conds) for index, part_rdm in enumerate(target_rdms_trans): list_cor_rdm[index], list_p[index] = pearsonr(part_rdm.flatten(), rdm_avg.as_matrix().flatten()) list_cor_sub = list() list_cor_rdm_sub = list() list_p_sub = list() for index, part in enumerate(target_rdms_trans): tmp_rdms = target_rdms_trans.copy() tmp_part = target_rdms_trans[index] tmp_rdms.pop(index) tmp_rdm_avg = np.mean(tmp_rdms, axis=0) list_cor_sub.append(pearsonr(tmp_part.flatten(), tmp_rdm_avg.flatten())) for i, cor in enumerate(list_cor_sub): list_cor_rdm_sub.append(cor[0]) list_p_sub.append(cor[1]) upper_noise_ceiling = np.mean(list_cor_rdm) lower_noise_ceiling=np.mean(list_cor_rdm_sub) model_comp = pd.DataFrame(columns=['participant', 'models', 'cor'], index=np.arange(len(dict_models['id'])*len(dict_rdms['id']))) model_comp['participant']=dict_rdms['id']*len(dict_models['id']) model_comp['models']=sorted(dict_models['id']*len(dict_rdms['id'])) list_cor_models=list() snd_rdms=list() snd_rdms.append(rdm_avg.as_matrix()) for mod_rdm in models: snd_rdms.append(mod_rdm.as_matrix()) ids_rdms=list() ids_rdms.append('group average') for mod_ids in model_ids: ids_rdms.append(mod_ids) if comp is None: for index, model_rdm in enumerate(dict_models['rdm']): for i, sub_rdm in enumerate(target_rdms): list_cor_models.append(kendalltau(sub_rdm.flatten(), model_rdm.as_matrix().flatten()).correlation) rdms_dist = [kendalltau(x.flatten(), y.flatten()).correlation for x, y in combinations(snd_rdms, 2)] rdms_dist = pd.DataFrame(distance.squareform(rdms_dist), columns=ids_rdms) elif comp == 'spearman': for index, model_rdm in enumerate(dict_models['rdm']): for i, sub_rdm in enumerate(target_rdms_trans): list_cor_models.append(spearmanr(sub_rdm.flatten(), model_rdm.as_matrix().flatten()).correlation) rdms_dist = [spearmanr(x.flatten(), y.flatten()).correlation for x, y in combinations(snd_rdms, 2)] rdms_dist = pd.DataFrame(distance.squareform(rdms_dist), columns=ids_rdms) elif comp == 'pearson': for index, model_rdm in enumerate(dict_models['rdm']): for i, sub_rdm in enumerate(target_rdms_trans): list_cor_models.append(pearsonr(sub_rdm.flatten(), model_rdm.as_matrix().flatten())[0]) rdms_dist = [pearsonr(x.flatten(), y.flatten())[0] for x, y in combinations(snd_rdms, 2)] rdms_dist = pd.DataFrame(distance.squareform(rdms_dist), columns=ids_rdms) model_comp['cor']=list_cor_models if plot is None: print('results will no be plotted') elif plot == 'bar': ax=sns.barplot(x=model_comp['models'], y=model_comp['cor'], data=model_comp) plt.plot(np.linspace(-20, 120, 1000), [upper_noise_ceiling] * 1000, 'r', alpha=0.1) plt.plot(np.linspace(-20, 120, 1000), [lower_noise_ceiling] * 1000, 'r', alpha=0.1) rect = plt.Rectangle((-20, lower_noise_ceiling), 10000, (upper_noise_ceiling - lower_noise_ceiling), color='r', alpha=0.5) ax.set_xticklabels(labels=list(dict_models['id'])) if comp is None or 'spearman': ax.set(ylabel='spearman correlation with target RDM') if comp == 'pearson': ax.set(ylabel='pearson correlation with target RDM') if comp == 'kendalltau': ax.set(ylabel='kendall tau a correlation with target RDM') ax.add_patch(rect) plt.tight_layout() elif plot == 'violin': ax=sns.violinplot(x=model_comp['models'], y=model_comp['cor'], data=model_comp) plt.plot(np.linspace(-20, 120, 1000), [upper_noise_ceiling] * 1000, 'r', alpha=0.1) plt.plot(np.linspace(-20, 120, 1000), [lower_noise_ceiling] * 1000, 'r', alpha=0.1) rect = plt.Rectangle((-20, lower_noise_ceiling), 10000, (upper_noise_ceiling - lower_noise_ceiling), color='r', alpha=0.5) ax.set_xticklabels(labels=list(dict_models['id'])) if comp is None or 'spearman': ax.set(ylabel='spearman correlation with target RDM') if comp == 'pearson': ax.set(ylabel='pearson correlation with target RDM') if comp == 'kendalltau': ax.set(ylabel='kendall tau a correlation with target RDM') ax.add_patch(rect) plt.tight_layout() return rdm_avg, model_comp, rdms_dist
from nilearn.datasets import fetch_atlas_msdl, fetch_cobre from nilearn.input_data import NiftiMapsMasker from nilearn.connectome import vec_to_sym_matrix from nilearn.plotting import plot_matrix from posce import PopulationShrunkCovariance # fetch atlas msdl = fetch_atlas_msdl() # fetch rfMRI scans from cobre dataset cobre = fetch_cobre(n_subjects=20) # extract timeseries masker = NiftiMapsMasker(msdl.maps, detrend=True, standardize=True, verbose=1, memory=".") masker.fit() ts = [masker.transform(f) for f in cobre.func] # compute PoSCE on the same dataset posce = PopulationShrunkCovariance(shrinkage=1e-2) posce.fit(ts) connectivities = posce.transform(ts) # plot the first shrunk covariance cov = vec_to_sym_matrix(connectivities[0]) plot_matrix(cov)
def _run_interface(self, runtime): # Loading data group_corr_mat = np.load( self.inputs.group_corr_mat) # array with matrices for all runs group_conf_summary = pd.read_csv( self.inputs.group_conf_summary, sep='\t') # motion summary for all runs pipeline_name = self.inputs.pipeline_name distance_vector = sym_matrix_to_vec( np.load(self.inputs.distance_matrix)) # load distance matrix # Plotting motion colour = ["#fe6863", "#00a074"] sns.set_palette(colour) fig = motion_plot(group_conf_summary) fig.savefig(join(self.inputs.output_dir, f"motion_criterion_{pipeline_name}.svg"), dpi=300) # Creating vectors with subject filter all_sub_no = len(group_conf_summary) icluded_sub = group_conf_summary["include"] excluded_sub_no = all_sub_no - sum( icluded_sub) # number of subjects excluded from analyses # Create dictionary describing full sampple and sample after exluding highly motion runs included = { f"All subjects (n = {all_sub_no})": [np.ones((all_sub_no), dtype=bool), False, all_sub_no, "All"], f"After excluding {excluded_sub_no} high motion subjects (n = {all_sub_no - excluded_sub_no})": [ group_conf_summary["include"].values.astype("bool"), True, all_sub_no - excluded_sub_no, "No_high_motion" ] } group_corr_vec = sym_matrix_to_vec(group_corr_mat) n_edges = group_corr_vec.shape[1] fc_fd_corr, fc_fd_pval = (np.zeros(n_edges) for _ in range(2)) fc_fd_summary = [] edges_weight = {} edges_weight_clean = {} for key, value in included.items(): for i in range(n_edges): corr = pearsonr(group_corr_vec[value[0], i], group_conf_summary['mean_fd'].values[value[0]]) fc_fd_corr[i] = corr[0] # Pearson's r values fc_fd_pval[i] = corr[1] # p-values fc_fd_corr = np.nan_to_num(fc_fd_corr) # TODO: write exception # Calculate correlation between FC-FD r values and distance vector distance_dependence = pearsonr(fc_fd_corr, distance_vector)[0] # Store summary measure fc_fd_summary.append({ "pipeline": pipeline_name, "perc_fc_fd_uncorr": np.sum(fc_fd_pval < 0.5) / len(fc_fd_pval) * 100, "pearson_fc_fd": np.median(fc_fd_corr), "distance_dependence": distance_dependence, "tdof_loss": group_conf_summary["n_conf"].mean(), "cleaned": value[1], "subjects": value[3], "sub_no": value[2] }) # For cleaned dataset if value[1]: edges_weight_clean = { pipeline_name: group_corr_vec[value[0]].mean(axis=0) } # For full dataset if not value[1]: edges_weight = { pipeline_name: group_corr_vec[value[0]].mean(axis=0) } # Plotting FC and FC-FD correlation matrices fc_fd_corr_mat = vec_to_sym_matrix(fc_fd_corr) fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(16, 6)) fig1 = ax1.imshow(group_corr_mat[value[0]].mean(axis=0), vmin=-1, vmax=1, cmap="RdBu_r") ax1.set_title(f"{pipeline_name}: mean FC") fig.colorbar(fig1, ax=ax1) fig2 = ax2.imshow(fc_fd_corr_mat, vmin=-1, vmax=1, cmap="RdBu_r") ax2.set_title(f"{pipeline_name}: FC-FD correlation") fig.colorbar(fig2, ax=ax2) fig.suptitle(f"{pipeline_name}: {key}") fig.savefig(join( self.inputs.output_dir, f"FC_FD_corr_mat_{pipeline_name}_{value[3].lower()}.png"), dpi=300) exclude_list = [ f"sub-{x + 1:02}" for x in group_conf_summary[ group_conf_summary['include'] == 1]['subject'] ] self._results["fc_fd_summary"] = fc_fd_summary self._results["edges_weight"] = edges_weight self._results["edges_weight_clean"] = edges_weight_clean self._results["exclude_list"] = exclude_list return runtime
standardize=True, verbose=1, memory=".") masker.fit() ts = [masker.transform(f) for f in cobre.func] #%% # compute correlation corr = ConnectivityMeasure(kind="correlation") corr_connectivities = corr.fit_transform(ts) # compute partial correlation pcorr = ConnectivityMeasure(kind="partial correlation") pcorr_connectivities = pcorr.fit_transform(ts) # compute tangent embedding tangent = ConnectivityMeasure(kind="tangent") tangent_connectivities = tangent.fit_transform(ts) #%% # compute PoSCE posce = PopulationShrunkCovariance(shrinkage=1e-2) posce.fit(ts) shrunk_connectivities = posce.transform(ts) shrunk_connectivities = [vec_to_sym_matrix(c) for c in shrunk_connectivities] #%% # plot first subject plot_matrix(corr_connectivities[0], title="Correlation") plot_matrix(pcorr_connectivities[0], title="Partial Correlation") plot_matrix(tangent_connectivities[0], title="Tangent Embedding") plot_matrix(shrunk_connectivities[0], title="PoSCE")
stat, pvalues = stats.ttest_rel(zero_back, two_back) import statsmodels.stats.multitest as ssm _, pvals_corrected, _, _ = ssm.multipletests(pvalues, alpha = 0.05, method = "fdr_bh") pvals_corrected_thr = np.zeros((len(pvals_corrected))) pvals = np.array([0 if p >= 0.05 else 1 for p in pvals_corrected]) sum(pvals) wei_vector = stat * pvals diag = np.zeros((264)) matrix_wei = vec_to_sym_matrix(wei_vector, diagonal = diag) matrix_bin = vec_to_sym_matrix(pvals, diagonal = diag) plotting.plot_matrix(matrix_wei) # Load coordinates from activation analysis. activ = pd.read_csv("/home/finc/Dropbox/GitHub/nilearn_task_networks/support/coordinates_0back_2back.csv") activ_coords = pd.DataFrame(activ, columns = ["X", "Y", "Z"]).values radius = 10 # Estimate neighbors clf = neighbors.NearestNeighbors(radius = radius) # Compute neighborhood matrix A = clf.fit(activ_coords).radius_neighbors_graph().toarray()
[outcome_pain_sens.min(), outcome_pain_sens.max()], 'k--', lw=4) ax.set_xlabel('Measured') ax.set_ylabel('Predicted by ' + kind) plt.show() from nilearn import plotting ####################################################################### svr.fit(X_train_scaled, outcome_pain_sens) import numpy as np from nilearn.connectome import vec_to_sym_matrix m = vec_to_sym_matrix(svr.coef_, diagonal=np.repeat(0, len(mist_64_labels))) mist_64_labels[kind] = abs(m).max(axis=1) #mist_64_labels[kind] = m[55] #m = (abs(m) > (0.9) ) * m print("************") plotting.plot_connectome(m, mist_64_labels[['x', 'y', 'z']], display_mode='lzry', title="%s %s" % ("mean connectivity", kind), colorbar=True) plotting.show() plotting.plot_matrix(m, figure=(15, 15), labels=mist_64_labels['label'],