sub = dict(loc='workstation', id=i) param = get_subject_info_wmConfidence(sub) #get the epoched data epochs = mne.read_epochs( fname=param['fblocked'], preload=True) #this is loaded in with the metadata epochs.set_eeg_reference(['RM']) epochs.apply_baseline((-.25, 0)) #baseline 250ms prior to feedback epochs.resample(500) #resample to 500Hz ntrials = len(epochs) #will do an automated process of looking for trials with heightened variance (noise) and output which trials to keep _, keeps = plot_AR(epochs, method='gesd', zthreshold=1.5, p_out=.1, alpha=.05, outlier_side=1) plt.close() keeps = keeps.flatten() discards = np.ones(len(epochs), dtype='bool') discards[keeps] = False epochs = epochs.drop( discards) #first we'll drop trials with excessive noise in the EEG epochs = epochs['DTcheck == 0 and clickresp == 1'] print('a total of %d trials have been dropped for this subjects' % (ntrials - len(epochs))) glmdata = glm.data.TrialGLMData(data=epochs.get_data(),
cuelocked = cuelocked.pick_types( eeg=True, misc=True ) #if you don't do this, and run the gesd, then it can fail if the EOG variance makes trialwise variance estimation terrible #we don't need the eogs at this point anyways because we ica'd out blinks etc, so its just extra data to take up space bdata = cuelocked.metadata prevtrlerr = bdata.shift(1).absrdif.to_numpy() bdata['prevtrlerr'] = bdata.shift(1).absrdif.to_numpy() bdata['prevtrlconf'] = bdata.shift(1).confwidth.to_numpy() cuelocked.metadata = bdata #will do an automated process of looking for trials with heightened variance (noise) and output which trials to keep _, keeps = plot_AR(cuelocked, method='gesd', zthreshold=1.5, p_out=.1, alpha=.05, outlier_side=1) keeps = keeps.flatten() discards = np.ones(len(cuelocked), dtype='bool') discards[keeps] = False cuelocked = cuelocked.drop( discards ) #first we'll drop trials with excessive noise in the EEG #now we'll drop trials with behaviour problems (reaction time +/- 2.5 SDs of mean, didn't click to report orientation) cuelocked = cuelocked['DTcheck == 0 and clickresp == 1'] cuelocked.set_eeg_reference(ref_channels=[ 'RM' ]) #re-reference to average of the two mastoids
bdata2 = pd.read_csv(param['behaviour_blinkchecked2'], index_col=None) bdata2['prevtrlconfdiff'] = bdata2.confdiff.shift( 1 ) #write down on each trial what the previous trial error awareness was bdata2['nexttrlconfdiff'] = bdata2.confdiff.shift( -1 ) #write down on each trial what the next trials error awareness is (used in glm) arraylocked1.metadata = bdata1 arraylocked2.metadata = bdata2 # arraylocked = mne.concatenate_epochs([arraylocked1, arraylocked2]) #combine the epoched data with aligned metadata _, keeps = plot_AR(deepcopy(arraylocked1).pick_types(eeg=True), method='gesd', zthreshold=1.5, p_out=.1, alpha=.05, outlier_side=1) keeps = keeps.flatten() discards = np.ones(len(arraylocked1), dtype='bool') discards[keeps] = False arraylocked1 = arraylocked1.drop( discards) #first we'll drop trials with excessive noise in the EEG #now we'll drop trials with behaviour problems (reaction time +/- 2.5 SDs of mean, didn't click to report orientation) arraylocked1 = arraylocked1['DTcheck == 0 and clickresp == 1'] _, keeps = plot_AR(deepcopy(arraylocked2).pick_types(eeg=True), method='gesd', zthreshold=1.5,
print('\n\nworking on subject ' + str(i) + '\n\n') sub = dict(loc='workstation', id=i) param = get_subject_info_wmConfidence(sub) probelocked = mne.epochs.read_epochs(fname=param['probelocked'], preload=True) #read raw data probelocked.resample( 100 ) #downsample to 250Hz so don't overwork the workstation (100hz should be fine for frequencies up to 50Hz in brain) #will do an automated process of looking for trials with heightened variance (noise) and output which trials to keep if i != 18: _, keeps = plot_AR( probelocked, method='gesd', zthreshold=1.5, p_out=.1, alpha=.05, outlier_side=1) #this fails on subject 18 for some fking reason keeps = keeps.flatten() discards = np.ones(len(probelocked), dtype='bool') discards[keeps] = False probelocked = probelocked.drop( discards) #first we'll drop trials with excessive noise in the EEG #now we'll drop trials with behaviour problems (reaction time +/- 2.5 SDs of mean, didn't click to report orientation) probelocked = probelocked[ 'DTcheck == 0 and clickresp == 1 and arraycueblink == 0'] #also exclude trials where blinks happened in the array or cue period probelocked.set_eeg_reference( ref_channels=['RM']) #re-reference to average of the two mastoids
#bad channels get marked and cant concatenate epochs unless these are the same if cuelocked1.info['bads'] != []: cuelocked1.interpolate_bads(reset_bads=True) if cuelocked2.info['bads'] != []: cuelocked2.interpolate_bads(reset_bads=True) cuelocked = mne.concatenate_epochs( [cuelocked1, cuelocked2]) #combine the epoched data with aligned metadata #trial rejection here #step1 - automated gesd removal #step 2, just check these epochs to make sure some haven't slipped through the net by accident. looking for more catastrophic failures, or noise in baseline _, keeps = plot_AR(cuelocked.pick_types(eeg=True), method='gesd', zthreshold=1.5, p_out=.1, alpha=.05, outlier_side=1) keeps = keeps.flatten() plt.close() discards = np.ones(len(cuelocked), dtype='bool') discards[keeps] = False cuelocked = cuelocked.drop( discards) #first we'll drop trials with excessive noise in the EEG cuelocked = cuelocked[ 'DTcheck == 0 and clickresp == 1'] #the last trial of the session doesn't have a following trial! #now go through manually # cuelocked.plot(n_channels=62, scalings = dict(eeg=200e-6), n_epochs=3)