Пример #1
0
        plot=False)

###############################################################################
# 4) Create summary plots to show signal correction on main experimental
# condition

# create a-cue epochs
a_evs = events_from_annotations(raw, regexp='^(70)')[0]
a_epo = Epochs(raw,
               a_evs,
               tmin=-2.0,
               tmax=2.0,
               reject_by_annotation=True,
               proj=False,
               preload=True)
a_epo.apply_baseline(baseline=(-0.3, -0.05))
a_evo = a_epo.average()

# loop over identified "bad" components
bad_components = []
for label in ica.labels_:
    bad_components.extend(ica.labels_[label])

for bad_comp in np.unique(bad_components):
    # show component frequency spectrum
    fig_comp = ica.plot_properties(a_epo,
                                   picks=bad_comp,
                                   psd_args={'fmax': 35.},
                                   show=False)[0]

    # show how the signal is affected by component rejection
Пример #2
0
	def __init__(self, sj: int, epochs: mne.Epochs, beh: pd.DataFrame, to_decode: str, nr_folds: int, 
				classifier: str = 'LDA', method: str = 'auc', elec_oi: Union[str, list] = 'all', downsample: int = 128, 
				avg_runs: int = 1, avg_trials: int= 1, sliding_window: tuple = (1, True, False), scale: dict = {'standardize': False, 'scale': False}, 
				pca_components: tuple = (0, 'across'), bdm_filter: Optional[dict] = None, 
				baseline: Optional[tuple] = None, seed: Union[int, bool] = 42213):
		"""set decoding parameters that will be used in BDM class

		Args:
			sj (int): Subject number
			beh (pd.DataFrame): Dataframe with behavioral parameters per epoch (see eeg)
			eeg (mne.Epochs): epoched eeg data (linked to beh)
			to_decode (str): column in beh that contains classes used for decoding
			nr_folds (int): specifies how many folds will be used for k-fold cross validation
			classifier (str, optional): Sets which classifier is used for decoding. Supports 'LDA' (linear discriminant analysis),
			'svm' (support vector machine), 'GNB' (Gaussian Naive Bayes)
			method (str, optional): [description]. Defaults to 'auc'.
			elec_oi (Optional[str, list], optional): [description]. Defaults to 'all'.
			downsample (int, optional): [description]. Defaults to 128.
			avg_runs (int, optional): Determines how often (random) cross-validation procedure is performed. 
			Decoding output reflects the average of all cross-validation runs
			avg_trials (int, Optional): If larger then 1, specifies the number of trials that are averaged together before cross validation.
			Averaging is done across each unique combination of condition and decoding label. Defaults to  1 (i.e., no trial averaging). 
			sliding_window (tuple, optional): Increases the  number of features used for decoding by a factor of the size of the sliding_window
			by giving the classifier access to all time points in the window (see Grootswagers et al. 2017, JoCN). Second argument in tuple specifies 
			whether (True) or not (False) the activity in each sliding window is demeaned (see Hajonides et al. 2021, NeuroImage). If thethird argument
			is set to True rather than increasing the number of features, each  time point reflects the average within the sliding window.
			Defaults to (1,True,False) meaning that no data transformation will be applied.
			scale (dict): Dictinary with two keys specifying whether data should be standardized (True) or not (False). The scale argument specifies whether or not 
			data should also be scaled to unit variance (or equivalently, unit standard deviation). This step is always performed before PCA. Defaults to {'standardize': False, 'scale': False}, no standardization
			pca_components (tuple, optional): Apply dimensionality reduction before decoding. The first arguments specifies how features should reduce to N principal components,
            if N < 1 it indicates the % of explained variance (and the number of components is inferred). The secnd argument specifies whether transfrmation is estimated
			on both training and test data ('all') or estimated on training data only and applied to the test data in each cross validation step.		
			Defaults to (0, 'across') (i.e., no PCA reduction)
			bdm_filter (Optional[dict], optional): [description]. Defaults to None.
			baseline (Optional[tuple], optional): [description]. Defaults to None.
			seed (Optional[int]): Sets a random seed such that cross-validation procedure can be repeated. 
			In case of False, no seed is applied before cross validation. In case avg_runs > 1, seed will 
			be increased by 1 for each run. Defaults to 42213 (A1Z26 cipher of DvM)
		"""	
		self.sj = sj					
		self.beh = beh
		if bdm_filter != None:
			self.bdm_type, self.bdm_band = list(bdm_filter.items())[0]
			self.epochs = epochs # baseline correction is done at a later stage
		else:	 
			self.bdm_type = 'broad'
			self.epochs = epochs.apply_baseline(baseline = baseline)
		self.classifier = classifier
		self.baseline = baseline
		self.to_decode = to_decode
		self.nr_folds = nr_folds
		self.elec_oi = elec_oi
		self.downsample = downsample
		self.window_size = sliding_window
		self.scale = scale
		self.pca_components = pca_components
		self.bdm_filter = bdm_filter
		self.method = method
		self.avg_runs = avg_runs
		self.avg_trials = avg_trials
		self.seed = seed
Пример #3
0
                epochs_baseline = Epochs(raw,
                                         events,
                                         event_id=event_id_bsl,
                                         tmin=-.200,
                                         tmax=0.,
                                         preload=True,
                                         baseline=None,
                                         decim=10)
                # Apply baseline of Target
                bsl_channels = pick_types(epochs.info, meg=True)
                bsl_data = epochs_baseline.get_data()[:, bsl_channels, :]
                bsl_data = np.mean(bsl_data, axis=2)
                epochs._data[:, bsl_channels, :] -= bsl_data[:, :, np.newaxis]
            else:
                # Apply baseline from beginning of epoch to t0
                epochs.apply_baseline((-0.2, 0.))
            epochs_list.append(epochs)
        epochs = concatenate_epochs(epochs_list)
        # Save epochs and hdf5 behavior
        suffix = '' if target_baseline else '_bsl'
        session = '_2' if subject[-1:] == '2' else '_1'
        fname = op.join(path_data, subject,
                        'behavior_%s%s.hdf5' % (event_type, session))
        write_hdf5(fname, events_behavior_type, overwrite=True)
        fname = op.join(path_data, subject,
                        'epochs_%s%s%s.fif' % (event_type, suffix, session))
        epochs.save(fname)

# concatenate the two sessions when 2nd one is existing
subject = sys.argv[1]
suffix = '' if target_baseline else '_bsl'
def make_cue_epoch_tf(subject):
    """Create cue epochs during WM task """
    fname_raw = op.join(path_data, subject)
    # Read behavioral file
    fname_bhv = list()
    files = os.listdir(op.join(path_data, subject, 'behavdata'))
    fname_bhv.extend(([
        op.join(fname_raw + '/behavdata/') + f for f in files if 'WorkMem' in f
    ]))
    for fname_behavior in fname_bhv:
        events_behavior = get_events_from_mat(fname_behavior)
    # Read raw MEG data and extract event triggers
    runs = list()
    files = os.listdir(fname_raw)
    runs.extend(([op.join(fname_raw + '/') + f for f in files if '.ds' in f]))
    events_meg = list()
    for run_number, this_run in enumerate(runs):
        fname_raw = op.join(path_data, subject, this_run)
        print(fname_raw)
        raw = read_raw_ctf(fname_raw, preload=True, system_clock='ignore')
        channel_trigger = np.where(np.array(raw.ch_names) == 'USPT001')[0][0]
        # replace 255 values with 0
        trigger_baseline = np.where(raw._data[channel_trigger, :] == 255)[0]
        raw._data[channel_trigger, trigger_baseline] = 0.
        # find triggers
        events_meg_ = mne.find_events(raw)
        # Add 48ms to the trigger events (according to delay with photodiod)
        events_meg_ = np.array(events_meg_, float)
        events_meg_[:, 0] += round(.048 * raw.info['sfreq'])
        events_meg_ = np.array(events_meg_, int)
        # to keep the run from which the event was found
        events_meg_[:, 1] = run_number
        events_meg.append(events_meg_)
    # concatenate all meg events
    events_meg = np.vstack(events_meg)
    # add trigger index to meg_events
    events_target = range(1, 126)
    events_cue = range(126, 130)
    events_probe = range(130, 141)
    triggidx_array = []
    for trigg in events_meg[:, 2]:
        if trigg in events_target:
            triggidx = 1
        elif trigg in events_cue:
            triggidx = 2
        elif trigg in events_probe:
            triggidx = 3
        triggidx_array.append(triggidx)
    events_meg = np.insert(events_meg, 3, triggidx_array, axis=1)
    # Compare MEG and bhv triggers and save events_behavior for each event
    event_type = 'Cue'
    events_behavior_type = []
    events_behavior_type = fix_triggers(events_meg,
                                        events_behavior,
                                        event_type='trigg' + event_type)
    epochs_list = list()
    # Read raw MEG, filter and epochs
    for run_number, this_run in enumerate(runs):
        fname_raw = op.join(path_data, subject, this_run)
        print(fname_raw)
        raw = read_raw_ctf(fname_raw, preload=True, system_clock='ignore')
        events_meg_run = make_events_run(events_behavior_type, run_number)
        event_id = {
            'ttl_%i' % ii: ii
            for ii in np.unique(events_meg_run[:, 2])
        }
        tmin = -.200
        tmax = 1.500
        epochs = Epochs(raw,
                        events_meg_run,
                        event_id=event_id,
                        tmin=tmin,
                        tmax=tmax,
                        preload=True,
                        baseline=None,
                        decim=10)
        # Copy dev_head_t of the first run to others run
        if run_number == 0:
            dev_head_t = epochs.info['dev_head_t']
        else:
            epochs.info['dev_head_t'] = dev_head_t
        # Apply baseline from beginning of epoch to t0
        epochs.apply_baseline((-0.2, 0.))
        epochs_list.append(epochs)
    epochs = concatenate_epochs(epochs_list)
    epochs.pick_types(meg=True, ref_meg=False)
    events = events_behavior_type
    events = complete_behavior(events)
    return epochs, events
Пример #5
0
        plot=False)

###############################################################################
# 4) Create summary plots to show signal correction on main experimental
# condition

# create target epochs
target_evs = events_from_annotations(raw, regexp='(11)|(12)|(21)|(22)')[0]
target_epo = Epochs(raw,
                    target_evs,
                    tmin=-1.5,
                    tmax=1.5,
                    reject_by_annotation=True,
                    proj=False,
                    preload=True)
target_epo.apply_baseline(baseline=(-0.3, -0.05))
target_evo = target_epo.average()

# loop over identified "bad" components
bad_components = []
for label in ica.labels_:
    bad_components.extend(ica.labels_[label])

for bad_comp in np.unique(bad_components):
    # show component frequency spectrum
    fig_comp = ica.plot_properties(target_epo,
                                   picks=bad_comp,
                                   psd_args={'fmax': 35.},
                                   show=False)[0]

    # show how the signal is affected by component rejection