aic_comps = pickle.load(f) triu_inds = np.triu_indices(mat_n, k=1) cnx_masks = np.zeros((mat_n,mat_n)) mod_idx = 2 for n_idx in range(node_n): if aic_comps["single_winner_ids"][n_idx] == mod_idx: cnx_masks[triu_inds[0][n_idx],triu_inds[1][n_idx]] = 1 cnx_masks[triu_inds[1][n_idx],triu_inds[0][n_idx]] = 1 group_id = [] columns = ("Brain","Subj","Block","ROI","OutRegion","Hemi","RT") data_dict = {col:[] for col in columns} for sub_idx,sub in enumerate(subjs): # make the df and data object for this particular subject for cond_idx,cond in enumerate(conds): dPTE = load_sparse("{}nc_{}_{}_byresp_dPTE_{}.sps".format(proc_dir, sub, cond, band)) epo = mne.read_epochs("{}{}_{}_byresp-epo.fif".format(proc_dir, sub, cond)) for epo_idx in range(len(dPTE)): this_epo = dPTE[epo_idx,].copy() this_epo[triu_inds[1],triu_inds[0]] = 1 - this_epo[triu_inds[0],triu_inds[1]] for ROI in ROIs: ROI_idx = label_names.index(ROI) cnx_col_inds = list(np.where(cnx_masks[ROI_idx,])[0]) for col_idx in cnx_col_inds: this_point = this_epo[ROI_idx,col_idx].copy() outname = label_names[col_idx] outhemi = "lh" if "lh" in outname else "rh" data_dict["Brain"].append(this_point) data_dict["Subj"].append(sub) data_dict["Block"].append(cond)
alpha = 1 precomputed = False epo_avg = False subjs = [ "ATT_10", "ATT_11", "ATT_12", "ATT_13", "ATT_14", "ATT_15", "ATT_16", "ATT_17", "ATT_18", "ATT_19", "ATT_20", "ATT_21", "ATT_22", "ATT_23", "ATT_24", "ATT_25", "ATT_26", "ATT_28", "ATT_29", "ATT_31", "ATT_33", "ATT_34", "ATT_35", "ATT_36", "ATT_37" ] conds = ["rest", "audio", "visual", "visselten"] freq = "alpha_1" if precomputed: affinity = "precomputed" distmat = load_sparse("{}{}_dist.sps".format(proc_dir, filename)) distmat += distmat.T distmat = 1 - distmat inmat = distmat with open("{}{}_dist.labels".format(proc_dir, filename), "rb") as f: labels = pickle.load(f) else: affinity = "nearest_neighbors" dPTEs = [] sub_inds = [] cond_inds = [] for sub in subjs: for cond in conds: dPTE = load_sparse("{}nc_{}_{}_dPTE_{}.sps".format( proc_dir, sub, cond, freq)) if epo_avg:
"view": "caudal", "distance": 850 } } region_names = [lab.name for lab in labels] regions = [] for rn in region_names: for l in labels: if l.name == rn: regions.append(l) data = [] for sub_idx, sub in enumerate(subjs): # we actually only need the dPTE to get the number of trials data_temp = load_sparse("{}nc_{}_{}_dPTE_{}.sps".format( proc_dir, sub, cond, band)) for epo_idx in range(data_temp.shape[0]): data.append(data_temp[epo_idx, ]) data = np.array(data) data = data.mean(axis=0) fontsize = 110 top = 100 background = (1, 1, 1) brain_a = plot_directed_cnx(data, regions, parc, top_cnx=top, centre=0.5, background=background) vmin, vmax = get_vminmax(data, top)
"ATT_17", "ATT_18", "ATT_19", "ATT_20", "ATT_21", "ATT_22", "ATT_23", "ATT_24", "ATT_25", "ATT_26", "ATT_28", "ATT_29", "ATT_31", "ATT_33", "ATT_34", "ATT_35", "ATT_36", "ATT_37" ] freq = "gamma_2" conds = ["rest", "audio", "visual", "visselten"] n_jobs = 8 epo_avg = True dPTEs = [] sub_inds = [] cond_inds = [] for sub in subjs: for cond in conds: dPTE = load_sparse("{}nc_{}_{}_dPTE_{}.sps".format( proc_dir, sub, cond, freq)) if epo_avg: dPTE = dPTE.mean(axis=0, keepdims=True) dPTEs.append(dPTE) sub_inds += [sub for idx in range(len(dPTE))] cond_inds += [cond for idx in range(len(dPTE))] labels = {"sub": sub_inds, "cond": cond_inds} dPTE = np.vstack(dPTEs) mat_n = len(dPTE) print("Creating and saving pairwise distance matrices...") # make the indices of the chunks used for parallel comb = np.array(np.triu_indices(mat_n, k=1), dtype="int32").T a = np.arange(0, len(comb), len(comb) // n_jobs).astype("int32") b = a + len(comb) // n_jobs b[-1] += len(comb) % n_jobs inds = tuple(zip(a, b))
labels = mne.read_labels_from_annot("fsaverage", parc) subjs = [ "ATT_10", "ATT_11", "ATT_12", "ATT_13", "ATT_14", "ATT_15", "ATT_16", "ATT_17", "ATT_18", "ATT_19", "ATT_20", "ATT_21", "ATT_22", "ATT_23", "ATT_24", "ATT_25", "ATT_26", "ATT_28", "ATT_31", "ATT_33", "ATT_34", "ATT_35", "ATT_36", "ATT_37" ] top_cnx = 150 # average by subject, hold resting state separate because it was baseline dPTEs = [[] for k in eff_dict.keys()] rests = [] for sub in subjs: idx = 0 for k in eff_dict.keys(): dPTE = load_sparse("{}nc_{}_{}_dPTE_{}.sps".format( proc_dir, sub, k, band)) dPTEs[idx].append(dPTE.mean(axis=0)) idx += 1 dPTE = load_sparse("{}nc_{}_rest_dPTE_{}.sps".format(proc_dir, sub, band)) rests.append(dPTE.mean(axis=0)) dPTEs = np.mean(dPTEs, axis=1) rest = np.mean(rests, axis=0) # mask by significant edges contrasts = { "audio": dPTEs[0] - rest, "visselten": dPTEs[1] - rest, "visual": dPTEs[2] - rest, "zaehlen": dPTEs[3] - rest, } for k, v in contrasts.items():
conds = ["audio", "visual", "visselten"] conds = ["audio", "rest"] try: edges = np.load("{}{}-{}_{}_c{}.npy".format(proc_dir, conds[0], conds[1], band, comp)) except: edges = np.load("{}{}-{}_{}_c{}.npy".format(proc_dir, conds[1], conds[0], band, comp)) dPTEs = [] for cond in conds: dPTE = [] for sub in subjs: fname = "{}nc_{}_{}_dPTE_{}.sps".format(mat_dir, sub, cond, band) temp_dPTE = load_sparse(fname) temp_dPTE[np.abs(temp_dPTE) == np.inf] = np.nan temp_dPTE = np.nanmean(temp_dPTE, axis=0, keepdims=True) dPTE.append(temp_dPTE) temp_pte = np.array(dPTE).mean(axis=0)[0, ] temp_pte[np.where(temp_pte)] = temp_pte[np.where(temp_pte)] - 0.5 temp_pte /= np.abs(temp_pte).max() thresh_pte = np.zeros(temp_pte.shape) # threshold by statistically significant edges for edge_idx in range(len(edges)): thresh_pte[edges[edge_idx,0],edges[edge_idx,1]] = \ temp_pte[edges[edge_idx,0],edges[edge_idx,1]] # threshold by top x connections top_thresh = np.sort(np.abs(thresh_pte.flatten()))[-top_cnx] thresh_pte[np.abs(thresh_pte) < top_thresh] = 0 dPTEs.append(thresh_pte)
"ATT_35", "ATT_36", "ATT_37" ] runs = ["audio", "visselten", "visual", "zaehlen"] runs = ["zaehlen"] wavs = ["4000fftf", "4000Hz", "7000Hz", "4000cheby"] cyc_names = [ "theta_0", "alpha_0", "alpha_1", "beta_0", "beta_1", "gamma_0", "gamma_1" ] cyc_names = ["alpha_0", "alpha_1"] for sub in subjs: for run in runs: epos = [] for wav_idx, wav_name in enumerate(wavs): epo_name = "{dir}nc_{sub}_{run}_{wav}_hand-epo.fif".format( dir=proc_dir, sub=sub, run=run, wav=wav_name) temp_epo = mne.read_epochs(epo_name) epos.append(temp_epo) for cn in cyc_names: dPTE = load_sparse("{dir}nc_{sub}_{run}_dPTE_{cyc}.sps".format( dir=proc_dir, sub=sub, run=run, cyc=cn)) current_idx = 0 for wav, epo in zip(wavs, epos): this_dPTE = dPTE[current_idx:current_idx + len(epo), ] current_idx += len(epo) this_dPTE = TriuSparse(this_dPTE) this_dPTE.save( "{dir}nc_{sub}_{run}_{wav}_dPTE_{cyc}.sps".format( dir=proc_dir, sub=sub, run=run, wav=wav, cyc=cn))