def get_Tpermutation_distribution(dat_all, nperm, time_start, time_end, labels): dat = dat_all[:, :, int(time_start * 1000) + 100:int(time_end * 1000) + 100] # permuta the data and calculate the largest sumed p values for each permutation across labels highest_permutation_values = [] sign = np.ones((dat.shape[0], )) sign[:np.int32(np.round(len(sign) / 2.))] = -1 for index in range(nperm): #to permute the conditions print("permutation:" + str(index)) sign = np.random.permutation(sign) dat_perm = sign * np.transpose(dat, [1, 2, 0]) stats_perm, pval_perm = ttest_1samp(dat_perm, 0, axis=2) stc_perm = SourceEstimate( -np.log10(pval_perm), vertices=[np.arange(10242), np.arange(10242)], tmin=time_start, tstep=1 / 1000., subject='fsaverage') means_of_thresholded_labels = [] for label in labels: label_fname = op.join( '/autofs/cluster/kuperberg/nonconMM/MEG/MNE/250Labels', label + '.label') label_name = mne.read_label(label_fname, subject='fsaverage', color='r') stc_label_perm = stc_perm.in_label(label_name) stc_label_perm = threshold_by_pvalue(stc_label_perm, 2) means_of_thresholded_labels.append(stc_label_perm.data.mean()) highest_permutation_values.append(max(means_of_thresholded_labels)) return highest_permutation_values
# ############################################################################ # construct appropriate brain activity print('Constructing original (simulated) sources') tmin, tmax = -0.2, 0.8 vertices = [s['vertno'] for s in src] n_vertices = sum(s['nuse'] for s in src) data = np.ones((n_vertices, int((tmax - tmin) * sfreq))) stc = SourceEstimate(data, vertices, -0.2, 1. / sfreq, subject) # limit activation to a square pulse in time at two vertices in space labels = [read_labels_from_annot(subject, 'aparc.a2009s', hemi, regexp='G_temp_sup-G_T_transv')[0] for hi, hemi in enumerate(('lh', 'rh'))] stc = stc.in_label(labels[0] + labels[1]) stc.data.fill(0) stc.data[:, np.where(np.logical_and(stc.times >= pulse_tmin, stc.times <= pulse_tmax))[0]] = 10e-9 # ############################################################################ # Simulate data # Simulate data with movement with warnings.catch_warnings(record=True): raw = Raw(fname_raw, allow_maxshield=True) raw_movement = simulate_movement(raw, fname_pos_orig, stc, trans, src, bem, interp='zero', n_jobs=6, verbose=True) # Simulate data with no movement (use initial head position) raw_stationary = simulate_movement(raw, None, stc, trans, src, bem,
print('Constructing original (simulated) sources') tmin, tmax = -0.2, 0.8 vertices = [s['vertno'] for s in src] n_vertices = sum(s['nuse'] for s in src) data = np.zeros((n_vertices, int((tmax - tmin) * sfreq))) stc = SourceEstimate(data, vertices, -0.2, 1. / sfreq, subject) # limit activation to a square pulse in time at two vertices in space labels = [ read_labels_from_annot(subject, 'aparc.a2009s', hemi, regexp='G_temp_sup-G_T_transv')[0] for hi, hemi in enumerate(('lh', 'rh')) ] stc = stc.in_label(labels[0] + labels[1]) stc.data.fill(0) stc.data[:, (stc.times >= pulse_tmin) & (stc.times <= pulse_tmax)] = 10e-9 # ############################################################################ # Simulate data # Simulate data with movement with warnings.catch_warnings(record=True): raw = Raw(fname_raw, allow_maxshield=True) raw_movement = simulate_raw(raw, stc, trans, src, bem, chpi=True,