Example #1
0
def discover_channel_psd_difference():
    highscore_epoch, lowscore_epoch = None, None
    highscorenum, lowscorenum = 0, 0
    for i in range(1, 60):
        try:
            data_score = get_score(i)[0]
            data_state = get_state(i, 3)
            data_eeg = get_epoch_eeg(i).drop(["condition"], axis=1)
            data_raw_eeg = get_raw_eeg(i)
            if not (len(data_score) == len(data_eeg["epoch"].value_counts())
                    and len(data_score) == len(
                        data_state[data_state["markerText"] == "ShotOps"])):
                continue
            print("yes")
            events, event_id = mne.events_from_annotations(data_raw_eeg)
            epochs = mne.Epochs(data_raw_eeg,
                                events,
                                event_id,
                                tmin=-3,
                                tmax=0,
                                event_repeated='drop',
                                preload=True)
            first, second = get_score(i + 1)
            rest_eeg = epochs["s1001"][0]
            total_shot_num = len(first) + len(second)
            first_shoot_eeg = epochs["s1002"][-total_shot_num:-len(second)]
            for j in range(len(data_score)):
                if data_score[j] > 9:
                    if highscore_epoch == None:
                        highscore_epoch = first_shoot_eeg["s1002"][j]
                    else:
                        highscore_epoch = mne.concatenate_epochs(
                            [highscore_epoch, first_shoot_eeg["s1002"][j]])
                    highscorenum += 1

                if data_score[j] < 8:
                    if lowscore_epoch == None:
                        lowscore_epoch = first_shoot_eeg["s1002"][j]
                    else:
                        lowscore_epoch = mne.concatenate_epochs(
                            [lowscore_epoch, first_shoot_eeg["s1002"][j]])
                    lowscorenum += 1
            print(highscorenum)
            print(lowscorenum)
        except Exception as e:
            traceback.print_exc()
    channelnames = ["Fz", "F3", "F4", "P3", "Pz", "P4", "O1", "O2", "POz"]
    print("test")
    for channelname in channelnames:
        plot_channel_psd(highscore_epoch, channelname)

    for channelname in channelnames:
        plot_channel_psd(lowscore_epoch, channelname)
Example #2
0
def getEpochs(experiments, tasks):
    if (experiments >= 1):
        raw = mne.io.read_raw_bdf("../data/e01.bdf")
        events = mne.find_events(raw, shortest_event=1)
        events = reduceEvents(events)
        picks = mne.pick_types(raw.info, eeg=True, stim=True, exclude='bads')
        epochs = mne.Epochs(raw,
                            events,
                            event_id,
                            tmin=0.1,
                            tmax=5,
                            proj=True,
                            picks=picks,
                            baseline=None,
                            preload=True)
    if (experiments >= 2):
        raw_02 = mne.io.read_raw_bdf("../data/e02.bdf")
        events2 = mne.find_events(raw_02, shortest_event=1)
        events2 = reduceEvents(events2)
        epochs2 = mne.Epochs(raw_02,
                             events2,
                             event_id,
                             tmin=0.1,
                             tmax=5,
                             proj=True,
                             picks=picks,
                             baseline=None)
        epochs = mne.concatenate_epochs([epochs, epochs2])
    if (experiments >= 3):
        raw_03 = mne.io.read_raw_bdf("../data/e03.bdf")
        events3 = mne.find_events(raw_03, shortest_event=1)
        events3 = reduceEvents(events3)
        epochs3 = mne.Epochs(raw_03,
                             events3,
                             event_id,
                             tmin=0.1,
                             tmax=5,
                             proj=True,
                             picks=picks,
                             baseline=None)
        epochs = mne.concatenate_epochs([epochs, epochs3])

    epochs.rename_channels(ch_dic)
    epochs.set_montage(montage)
    epochs.filter(7., 30., fir_design='firwin')

    if (tasks):
        return epochs[tasks]
    else:
        return epochs
Example #3
0
def extract_epochs():
    global result_epochs, event_id
    epochs_list = []
    for file in files:
        # band pass filter
        # file.filter(0.1, 100, method='fir')
        event, _ = mne.events_from_annotations(file)
        # build event id and filter 1-7 id

        for i in _:  # handle event_id
            if i not in eventDescription_offline_paradigm:
                continue
            event_id[eventDescription_offline_paradigm[i]] = _[i]

        print(f'event id: {event_id}')
        epochs = mne.Epochs(file,
                            event,
                            event_id,
                            tmin=tmin,
                            tmax=tmax,
                            baseline=None,
                            event_repeated='merge',
                            preload=True)
        epochs_list.append(epochs)

    result_epochs = mne.concatenate_epochs(epochs_list)
Example #4
0
def balance_epochs_violation_positions(epochs,
                                       balance_violation_standards=True):
    """
    This function balances violations and standards by position for each sequence
    :param epochs:
    :return:
    """

    epochs_balanced_allseq = []

    events_IDS = []
    for seqID in range(1, 8):

        epochs_seq = epochs['SequenceID == "' + str(seqID) +
                            '" and TrialNumber>10'].copy()
        tmp = epochs_seq['ViolationOrNot == "1"']  # Deviant trials
        devpos = np.unique(tmp.metadata.StimPosition)  # Position of deviants
        epochs_seq = epochs_seq['StimPosition == "' + str(devpos[0]) +
                                '" or StimPosition == "' + str(devpos[1]) +
                                '" or StimPosition == "' + str(devpos[2]) +
                                '" or StimPosition == "' + str(devpos[3]) +
                                '"']

        epochs_seq_noviol = epochs_seq["ViolationInSequence == 0"]
        epochs_seq_viol = epochs_seq[
            "ViolationInSequence > 0 and ViolationOrNot ==1"]

        epochs_balanced_allseq.append([epochs_seq_noviol, epochs_seq_viol])
        print('We appended the balanced epochs for SeqID%i' % seqID)

        events_IDS.append(
            [[seqID * 1000 + dev * 100, seqID * 1000 + dev * 100 + 10]
             for dev in devpos])

    epochs_balanced = mne.concatenate_epochs(
        list(np.hstack(epochs_balanced_allseq)))

    events_IDS = np.concatenate(events_IDS)

    all_combinations = [
        list(zip(each_permutation, list2))
        for each_permutation in itertools.permutations(list1, len(list2))
    ]
    if balance_violation_standards:
        print("we are balancing the number of standards and of violations")
        metadata_epochs = epochs_balanced.metadata
        events = [
            int(metadata_epochs['SequenceID'].values[i] * 10000 +
                metadata_epochs['StimPosition'].values[i] * 100 +
                metadata_epochs['ViolationOrNot'].values[i] * 10 +
                metadata_epochs['StimID'].values[i])
            for i in range(len(epochs_balanced))
        ]
        epochs_balanced.events[:, 2] = events
        epochs_balanced.event_id = {'%i' % i: i for i in np.unique(events)}
        epochs_balanced.equalize_event_counts(
            epochs_balanced.event_id
        )  # ===== to train the filter do not consider the habituation trials to later test on them separately ================

    return epochs_balanced
Example #5
0
 def gather_epoch(self, pklpath_list):
     epoch_list = []
     for i in range(len(pklpath_list)):
         epochs_mne = self.read_cleandata_path(pklpath_list[i])
         epoch_list.append(epochs_mne)
     epochs_mne = mne.concatenate_epochs(epoch_list)
     return epochs_mne
Example #6
0
def load(avr_directory, subject, conditions):
    '''
    A loading utility idiosyncratic to this project. Navigates 'exports'
    directory structure to load files for a given subject and 
    conditions (e.g. ["funded", "unfunded"])

    Then, re-references to average before returning

    Expects a directory structure like avr_directory/subject/condition/*.avr
    '''
    data = []
    cond_count = 1
    for cond in conditions:
        path = os.path.join(avr_directory, subject, cond)
        try:
            epochs_cond = read_epochs_avr(path,
                                          "GSN-HydroCel-129",
                                          (cond_count, cond),
                                          highpass=.3,
                                          lowpass=100)
            data.append(epochs_cond)
        except:
            print("No %s files for sub-%s" % (cond, subject))
        cond_count += 1
    epochs = mne.concatenate_epochs(data, add_offset=True)
    epochs.set_eeg_reference('average', projection=False)
    return epochs
Example #7
0
def load_to_epochs_perc(fnames, event_ids, im_times, filt):
    import mne
    import numpy as np
    import os.path as op
    #from mne.channels import make_standard_montage
    get_ipython().run_line_magic('run', 'general_tools.ipynb')
    baseline = (None, 0)

    #montage = make_standard_montage('biosemi64')

    epochs = []
    for fname in fnames:
        #fname = op.join(infolder,fname)
        raw = mne.io.read_raw_bdf(fname, preload=True).filter(filt[0],
                                                              filt[1],
                                                              method='iir')
        #raw.set_montage(montage)
        events = mne.find_events(raw,
                                 initial_event=True,
                                 consecutive=True,
                                 shortest_event=1,
                                 verbose=0)
        temp = mne.Epochs(raw,
                          events,
                          event_ids,
                          im_times[0],
                          im_times[1],
                          baseline=baseline,
                          preload=True,
                          detrend=1)
        temp = temp[100:]
        epochs.append(temp)

    epochs = mne.concatenate_epochs(epochs)
    return epochs
def decoding_analysis(c1, c2):
    c1.events[:, 2] = 0
    c2.events[:, 2] = 1
    c1.event_id['exp_sup_lon'] = 0
    c2.event_id['exp_sup_sho'] = 1
    epochs = mne.concatenate_epochs([c1, c2])

    # td = TimeDecoding(predict_mode='cross-validation', n_jobs=1, scorer=roc_auc_score)
    # td.fit(epochs)
    # td.score(epochs)
    # td.plot('Subject: ', c1.info['subject_info'], chance=True)

    # GAT
    y = epochs.events[:, 2]
    # y = np.zeros(len(epochs.events), dtype=int)
    # y[epochs.events[:, 2] == 90] = 1
    cv = StratifiedKFold(y=y)  # do a stratified cross-validation

    gat = GeneralizationAcrossTime(predict_mode='cross-validation', n_jobs=1,
                                   cv=cv, scorer=roc_auc_score)

    # fit and score
    gat.fit(epochs, y=y)
    gat.score(epochs)

    # # plot
    # gat.plot(vmin=0, vmax=1)
    # gat.plot_diagonal()
    return gat
Example #9
0
def combine_windows_into_epochs(windows, epochs_fname='', overwrite=False):
    if op.isfile(epochs_fname) and not overwrite:
        epochs = mne.read_epochs(epochs_fname)
        return epochs
    epochs_list, info = [], None
    for window_fname in windows:
        window_name = utils.namebase(window_fname)
        evoked = mne.read_evokeds(window_fname)[0]
        if info is None:
            C, T = evoked.data.shape
            info = evoked.info
        else:
            _C, _T = evoked.data.shape
            if _C != C or _T != T:
                print('{}: dims mismatch! {} != {} or {} != {}'.format(
                    window_name, _C, C, _T, T))
                continue
        epoch = mne.EpochsArray(evoked.data.reshape((1, C, T)), evoked.info,
                                np.array([[0, 0, 1]]), 0, 1)[0]
        epochs_list.append(epoch)
    epochs = mne.concatenate_epochs(epochs_list, True)
    if epochs_fname != '':
        print('Saving epochs to {}'.format(epochs_fname))
        epochs.save(epochs_fname)
    return epochs
Example #10
0
def load_conds(conds, epochs_dir, include_info=False):
    # get subject files
    fnames = [f for f in os.listdir(epochs_dir) if conds in f]
    subj_nums = [re.search("\d+", f).group(0) for f in fnames]

    # load in data
    eeg = []
    subj_idx = []  # keeps track of which trial belongs to which subj
    trial_cts = []
    for i in range(len(subj_nums)):
        epochs = mne.read_epochs(os.path.join(epochs_dir, fnames[i]),
                                 verbose=False)
        epochs.crop(-.1, .8)
        n_trials = epochs.events.shape[0]
        subj_idx += n_trials * [subj_nums[i]]
        trial_cts.append(n_trials)
        eeg.append(epochs)
    eeg = mne.concatenate_epochs(eeg)

    # and format for classification
    X = eeg.get_data()
    y = eeg.events[:, 2]
    y = np.where(y == 2, 0, y)  # so funded/yes = 1 and unfunded/no = 0

    if include_info:
        return X, y, subj_idx, epochs.times, epochs.info
    else:
        return X, y, subj_idx, epochs.times
Example #11
0
def get_concat_epos(subject, exp_type):
    """Load all epochs for one experiment and return concatenated object

    Parameters
    ==========
    subject: str
        Subject directory in string form
    exp_type: str
        "motor" or "rest"

    Returns
    =======
    epochs: Epochs
    """
    if exp_type is 'motor':
        runs = cf.motor_params['runs']
    elif exp_type is 'rest':
        runs = cf.rest_params['runs']
    else:
        raise RuntimeError('Incorrect trial designation: %s' % exp_type)

    # XXX: check if proj should be false
    epo_list = []
    for run_i in runs:
        epo_fname = op.join(hcp_path, '%s' % subject, 'epochs',
                            '%s_%s_run%i-epo.fif' % (subject, exp_type, run_i))
        epo = read_epochs(epo_fname, proj=False)
        epo_list.append(epo)

    print '\nConcatenating %i epoch files' % len(epo_list)
    return concatenate_epochs(epo_list)
Example #12
0
def load_eeg_bci(targets=4, tmin=0, tlen=3, t_ev=0, t_sub=None, normalizer=zscore, low_f=None, high_f=None,
                 alignment=True):

    paths = [eegbci.load_data(s+1, IMAGERY_FISTS, path=str(TOPLEVEL_EEGBCI), update_path=False) for s in SUBJECTS_EEGBCI]
    raws = [mne.io.concatenate_raws([mne.io.read_raw_edf(p, preload=True) for p in path])
            for path in tqdm.tqdm(paths, unit='subj', desc='Loading')]
    datasets = OrderedDict()
    for i, raw in tqdm.tqdm(list(zip(SUBJECTS_EEGBCI, raws)), desc='Preprocessing'):
        if raw.info['sfreq'] != 160:
            tqdm.tqdm.write('Skipping..., sampling frequency: {}'.format(raw.info['sfreq']))
            continue
        raw.rename_channels(lambda x: x.strip('.'))
        if low_f or high_f:
            raw.filter(low_f, high_f, fir_design='firwin', skip_by_annotation='edge')
        events, _ = mne.events_from_annotations(raw, event_id=dict(T1=0, T2=1))
        picks = mne.pick_types(raw.info, meg=False, eeg=True, stim=False, eog=False, exclude='bads')
        epochs = mne.Epochs(raw, events[:41, ...], tmin=tmin, tmax=tmin + tlen - 1 / raw.info['sfreq'], picks=picks,
                            baseline=None, reject_by_annotation=False)#.drop_bad()
        if targets > 2:
            paths = eegbci.load_data(i + 1, BASELINE_EYES_OPEN, path=str(TOPLEVEL_EEGBCI), update_path=False)
            raw = mne.io.concatenate_raws([mne.io.read_raw_edf(p, preload=True) for p in paths])
            raw.rename_channels(lambda x: x.strip('.'))
            if low_f or high_f:
                raw.filter(low_f, high_f, fir_design='firwin', skip_by_annotation='edge')
            events = np.zeros((events.shape[0] // 2, 3)).astype('int')
            events[:, -1] = 2
            events[:, 0] = np.linspace(0, raw.info['sfreq'] * (60 - 2 * tlen), num=events.shape[0]).astype(np.int)
            picks = mne.pick_types(raw.info, meg=False, eeg=True, stim=False, eog=False, exclude='bads')
            eyes_epochs = mne.Epochs(raw, events, tmin=tmin, tmax=tmin + tlen - 1 / raw.info['sfreq'], picks=picks,
                                     baseline=None, reject_by_annotation=False)#.drop_bad()
            epochs = mne.concatenate_epochs([eyes_epochs, epochs])
        if targets > 3:
            paths = eegbci.load_data(i+1, IMAGERY_FEET_V_FISTS, path=str(TOPLEVEL_EEGBCI), update_path=False)
            raw = mne.io.concatenate_raws([mne.io.read_raw_edf(p, preload=True) for p in paths])
            raw.rename_channels(lambda x: x.strip('.'))
            if low_f or high_f:
                raw.filter(low_f, high_f, fir_design='firwin', skip_by_annotation='edge')
            events, _ = mne.events_from_annotations(raw, event_id=dict(T2=3))
            picks = mne.pick_types(raw.info, meg=False, eeg=True, stim=False, eog=False, exclude='bads')
            feet_epochs = mne.Epochs(raw, events[:20, ...], tmin=tmin, tmax=tmin + tlen - 1 / raw.info['sfreq'],
                                     picks=picks, baseline=None, reject_by_annotation=False)#.drop_bad()
            epochs = mne.concatenate_epochs([epochs, feet_epochs])

        datasets[i] = EpochsDataset(epochs, preproccesors=EuclideanAlignment if alignment else [],
                                    normalizer=normalizer, runs=3)

    return datasets
def run_time_decoding(subject, condition1, condition2, session=None):
    print("Processing subject: %s (%s vs %s)"
          % (subject, condition1, condition2))

    # Construct the search path for the data file. `sub` is mandatory
    subject_path = op.join('sub-{}'.format(subject))
    # `session` is optional
    if session is not None:
        subject_path = op.join(subject_path, 'ses-{}'.format(session))

    subject_path = op.join(subject_path, config.kind)

    bids_basename = make_bids_basename(subject=subject,
                                       session=session,
                                       task=config.task,
                                       acquisition=config.acq,
                                       run=None,
                                       processing=config.proc,
                                       recording=config.rec,
                                       space=config.space
                                       )

    fpath_deriv = op.join(config.bids_root, 'derivatives',
                          config.PIPELINE_NAME, subject_path)
    fname_in = \
        op.join(fpath_deriv, bids_basename + '-epo.fif')

    epochs = mne.read_epochs(fname_in)

    # We define the epochs and the labels
    epochs = mne.concatenate_epochs([epochs[condition1],
                                     epochs[condition2]])
    epochs.apply_baseline()

    # Get the data and labels
    X = epochs.get_data()
    n_cond1 = len(epochs[condition1])
    n_cond2 = len(epochs[condition2])
    y = np.r_[np.ones(n_cond1), np.zeros(n_cond2)]

    # Use AUC because chance level is same regardless of the class balance
    se = SlidingEstimator(
        make_pipeline(StandardScaler(),
                      LogisticRegression(solver='liblinear',
                                         random_state=config.random_state)),
        scoring=config.decoding_metric, n_jobs=config.N_JOBS)
    cv = StratifiedKFold(random_state=config.random_state,
                         n_splits=config.decoding_n_splits)
    scores = cross_val_multiscore(se, X=X, y=y, cv=cv)

    # let's save the scores now
    a_vs_b = '%s_vs_%s' % (condition1, condition2)
    a_vs_b = a_vs_b.replace(op.sep, '')
    fname_td = op.join(config.bids_root, 'derivatives', config.PIPELINE_NAME,
                       '%s_%s_%s_%s.mat' %
                       (subject, config.study_name, a_vs_b,
                        config.decoding_metric))
    savemat(fname_td, {'scores': scores, 'times': epochs.times})
Example #14
0
def comine_sessions(file_list):
    tmp = []
    for f in file_list:
        full_path = os.path.join(PATH, f)
        epoch = mne.io.read_epochs_fieldtrip(full_path,
                                             info=info,
                                             data_name='data_reref',
                                             trialinfo_column=0)
        tmp.append(epoch)
    return mne.concatenate_epochs(tmp)
def run_time_decoding(subject, condition1, condition2, session=None):
    msg = f'Contrasting conditions: {condition1} – {condition2}'
    logger.info(
        gen_log_message(message=msg, step=7, subject=subject, session=session))

    fname_epochs = BIDSPath(subject=subject,
                            session=session,
                            task=config.get_task(),
                            acquisition=config.acq,
                            run=None,
                            recording=config.rec,
                            space=config.space,
                            suffix='epo',
                            extension='.fif',
                            datatype=config.get_datatype(),
                            root=config.deriv_root,
                            check=False)

    epochs = mne.read_epochs(fname_epochs)

    # We define the epochs and the labels
    epochs = mne.concatenate_epochs([epochs[condition1], epochs[condition2]])
    X = epochs.get_data()
    n_cond1 = len(epochs[condition1])
    n_cond2 = len(epochs[condition2])
    y = np.r_[np.ones(n_cond1), np.zeros(n_cond2)]

    clf = make_pipeline(
        StandardScaler(),
        LogisticRegression(solver='liblinear',
                           random_state=config.random_state))

    se = SlidingEstimator(clf,
                          scoring=config.decoding_metric,
                          n_jobs=config.N_JOBS)
    scores = cross_val_multiscore(se, X=X, y=y, cv=config.decoding_n_splits)

    # let's save the scores now
    a_vs_b = f'{condition1}-{condition2}'.replace(op.sep, '')
    processing = f'{a_vs_b}+{config.decoding_metric}'
    processing = processing.replace('_', '-').replace('-', '')

    fname_mat = fname_epochs.copy().update(suffix='decoding',
                                           processing=processing,
                                           extension='.mat')
    savemat(fname_mat, {'scores': scores, 'times': epochs.times})

    fname_tsv = fname_mat.copy().update(extension='.tsv')
    tabular_data = pd.DataFrame(
        dict(cond_1=[condition1] * len(epochs.times),
             cond_2=[condition2] * len(epochs.times),
             time=epochs.times,
             mean_crossval_score=scores.mean(axis=0),
             metric=[config.decoding_metric] * len(epochs.times)))
    tabular_data.to_csv(fname_tsv, sep='\t', index=False)
Example #16
0
def concatenate_epochs(epoch_S1: mne.Epochs, epoch_S2: mne.Epochs) -> mne.Epochs:
    """
    Concatenates a list of Epochs in one Epochs object.

    Arguments:
        epoch_S1: list of Epochs for participant 1 (for example the
          list samples different experimental realizations
          of the baseline condition).
        epoch_S2: list of Epochs for participant 2.  
          Epochs are MNE objects.

    Returns:
        epoch_S1_concat, epoch_S2_concat: list of concatenate Epochs
          (for example one epoch with all the experimental realizations
          of the baseline condition) for each participant.
    """
    epoch_S1_concat = mne.concatenate_epochs(epoch_S1)
    epoch_S2_concat = mne.concatenate_epochs(epoch_S2)

    return epoch_S1_concat, epoch_S2_concat
Example #17
0
def concatenate_epochs(epoch_S1, epoch_S2):
    """
    Concatenates a list of Epochs in one Epochs object.

    Arguments:
        epoch_S1, epoch_S2: list of Epochs for each subject (for example the
          list samples the different occurences of the baseline condition
          across experiments).
          Epochs are MNE objects (data are stored in an array of shape
          (n_epochs, n_channels, n_times) and info is a dictionnary sampling
          parameters).

    Returns:
        epoch_S1_concat, epoch_S2_concat: list of concatenate Epochs
          (for example one epoch with all the occurences of the baseline
          condition across experiments) for each subject.
    """
    epoch_S1_concat = mne.concatenate_epochs(epoch_S1)
    epoch_S2_concat = mne.concatenate_epochs(epoch_S2)

    return epoch_S1_concat, epoch_S2_concat
def _concatenate_epochs(subject, overwrite=False):
    """Concatenate epoched blocks and check that matches with behavior file."""
    epo_fname = paths('epochs', subject=subject)
    if op.exists(epo_fname) and not overwrite:
        return
    print(subject)
    epochs = list()
    for block in range(1, 6):
        this_epochs = load('epo_block', subject=subject, block=block,
                           preload=False)
        epochs.append(this_epochs)
    epochs = concatenate_epochs(epochs)
    save(epochs, 'epochs', subject=subject, overwrite=True, upload=False)
Example #19
0
def get_localizer(snum, reject=dict(mag=4e-12)):
    files = glob.glob('/home/nwilming/conf_meg/raw/s%02i-*.ds' % snum)
    files += glob.glob('/home/nwilming/conf_meg/raw/S%02i-*.ds' % snum)
    if snum == 14:
        files += glob.glob('/home/nwilming/conf_meg/raw/%02i-*.ds' % snum)
    epochs = [get_localizer_epochs(f) for f in files]
    epochs = [e for e in epochs if e is not None]
    dt = epochs[0].info['dev_head_t']
    for e in epochs:
        e.info['dev_head_t'] = dt
        e.reject = reject
        e.load_data()
    return mne.concatenate_epochs([e for e in epochs if len(e) > 0])
Example #20
0
    def leave_one_session_out(self, includes, excludes):
        # Perform leave one session out on [self.epochs_list]

        def align_epochs():
            # Inner method for align epochs [self.epochs_list]
            dev_head_t = self.epochs_list[0].info['dev_head_t']
            for epochs in self.epochs_list:
                epochs.info['dev_head_t'] = dev_head_t
            pass

        # Align epochs
        align_epochs()

        # Separate [includes] and [excludes] epochs
        include_epochs = mne.concatenate_epochs(
            [self.epochs_list[j] for j in includes])
        if len(excludes) == 0:
            exclude_epochs = None
        else:
            exclude_epochs = mne.concatenate_epochs(
                [self.epochs_list[j] for j in excludes])

        return include_epochs, exclude_epochs
def concatenate_epochs(epochs, metas):
    '''
    Concatenate a list of epoch and meta objects and set their dev_head_t projection to
    that of the first epoch.
    '''
    dev_head_t = epochs[0].info['dev_head_t']
    epoch_arrays = []
    processed_metas = []
    for e in ensure_iter(epochs):
        e.info['dev_head_t'] = dev_head_t
        e = mne.epochs.EpochsArray(e._data,
                                   e.info,
                                   events=e.events,
                                   tmin=e.tmin)
        epoch_arrays.append(e)

    if metas is not None:
        for m in ensure_iter(metas):
            processed_metas.append(m)

        return mne.concatenate_epochs(epoch_arrays), pd.concat(processed_metas)
    else:
        return mne.concatenate_epochs(epoch_arrays)
def run_time_decoding(subject, condition1, condition2, session=None):
    msg = f'Contrasting conditions: {condition1} – {condition2}'
    logger.info(
        gen_log_message(message=msg, step=8, subject=subject, session=session))

    deriv_path = config.get_subject_deriv_path(subject=subject,
                                               session=session,
                                               kind=config.get_kind())

    fname_in = BIDSPath(subject=subject,
                        session=session,
                        task=config.get_task(),
                        acquisition=config.acq,
                        run=None,
                        recording=config.rec,
                        space=config.space,
                        prefix=deriv_path,
                        kind='epo',
                        extension='.fif',
                        check=False)

    epochs = mne.read_epochs(fname_in)

    # We define the epochs and the labels
    epochs = mne.concatenate_epochs([epochs[condition1], epochs[condition2]])
    epochs.apply_baseline()

    # Get the data and labels
    X = epochs.get_data()
    n_cond1 = len(epochs[condition1])
    n_cond2 = len(epochs[condition2])
    y = np.r_[np.ones(n_cond1), np.zeros(n_cond2)]

    se = SlidingEstimator(make_pipeline(
        StandardScaler(),
        LogisticRegression(solver='liblinear',
                           random_state=config.random_state)),
                          scoring=config.decoding_metric,
                          n_jobs=config.N_JOBS)
    scores = cross_val_multiscore(se, X=X, y=y, cv=config.decoding_n_splits)

    # let's save the scores now
    a_vs_b = f'{condition1}-{condition2}'.replace(op.sep, '')
    processing = f'{a_vs_b}+{config.decoding_metric}'
    processing = processing.replace('_', '-').replace('-', '')
    fname_td = fname_in.copy().update(kind='decoding',
                                      processing=processing,
                                      extension='.mat')
    savemat(fname_td, {'scores': scores, 'times': epochs.times})
Example #23
0
    def append_data(self, names):
        """Append the given raw data sets."""
        files = [self.current["data"]]
        for d in self.data:
            if d["name"] in names:
                files.append(d["data"])

        names.insert(0, self.current["name"])
        if self.current["dtype"] == "raw":
            self.current["data"] = mne.concatenate_raws(files)
            self.history.append(f"mne.concatenate_raws({names})")
        elif self.current["dtype"] == "epochs":
            self.current["data"] = mne.concatenate_epochs(files)
            self.history.append(f"mne.concatenate_epochs({names})")
        self.current["name"] += " (appended)"
def _concatenate_epochs(subject, overwrite=False):
    """Concatenate epoched blocks and check that matches with behavior file."""
    epo_fname = paths('epochs', subject=subject)
    if op.exists(epo_fname) and not overwrite:
        return
    print(subject)
    epochs = list()
    for block in range(1, 6):
        this_epochs = load('epo_block',
                           subject=subject,
                           block=block,
                           preload=False)
        epochs.append(this_epochs)
    epochs = concatenate_epochs(epochs)
    save(epochs, 'epochs', subject=subject, overwrite=True, upload=False)
Example #25
0
def run_time_decoding(subject, condition1, condition2, session=None):
    msg = f'Contrasting conditions: {condition1} – {condition2}'
    logger.info(
        gen_log_message(message=msg, step=8, subject=subject, session=session))

    deriv_path = config.get_subject_deriv_path(subject=subject,
                                               session=session,
                                               kind=config.get_kind())

    bids_basename = make_bids_basename(subject=subject,
                                       session=session,
                                       task=config.get_task(),
                                       acquisition=config.acq,
                                       run=None,
                                       processing=config.proc,
                                       recording=config.rec,
                                       space=config.space)

    fname_in = op.join(deriv_path, bids_basename + '-epo.fif')
    epochs = mne.read_epochs(fname_in)

    # We define the epochs and the labels
    epochs = mne.concatenate_epochs([epochs[condition1], epochs[condition2]])
    epochs.apply_baseline()

    # Get the data and labels
    X = epochs.get_data()
    n_cond1 = len(epochs[condition1])
    n_cond2 = len(epochs[condition2])
    y = np.r_[np.ones(n_cond1), np.zeros(n_cond2)]

    # Use AUC because chance level is same regardless of the class balance
    se = SlidingEstimator(make_pipeline(
        StandardScaler(),
        LogisticRegression(solver='liblinear',
                           random_state=config.random_state)),
                          scoring=config.decoding_metric,
                          n_jobs=config.N_JOBS)
    scores = cross_val_multiscore(se, X=X, y=y, cv=config.decoding_n_splits)

    # let's save the scores now
    a_vs_b = '%s_vs_%s' % (condition1, condition2)
    a_vs_b = a_vs_b.replace(op.sep, '')
    fname_td = op.join(
        config.bids_root, 'derivatives', config.PIPELINE_NAME,
        '%s_%s_%s_%s.mat' %
        (subject, config.study_name, a_vs_b, config.decoding_metric))
    savemat(fname_td, {'scores': scores, 'times': epochs.times})
Example #26
0
def epochs(ftype, tasktype, **kwargs):

    for rawlist, eventlist in \
        zip(raws(ftype, tasktype),
            events(ftype, tasktype)):

        epochlist = []
        for raw, eventarray in zip(rawlist, eventlist):
            epochlist.append(mne.Epochs(raw, eventarray, **kwargs))
        print(raw.info['subject_info'])
        # ensure the bads are the same
        bads = list(set(sum((e.info['bads'] for e in epochlist), [])))
        for epoch in epochlist:
            epoch.info['bads'] = bads

        # concatenate the two epoch structs since they are equivalent
        yield mne.concatenate_epochs(epochlist)
def getAllEpochsForSub(labelStruct, sub, winSize):
    subTable = labelStruct[sub - 1]
    del labelStruct
    nFiles = len(subTable)
    epochsList = list()
    labelsList = list()

    for k in range(nFiles):  # careful here change to nFiles
        epochs, labels = makeMyEpochs(subTable, sub, k, winSize)
        labelsList.extend(labels)
        epochsList.append(epochs)


#    xx=2
#    yy=2
    return mne.concatenate_epochs(epochsList,
                                  add_offset=True), np.asarray(labelsList)
def epochs(**kwargs):

    for rawlist, eventlist in zip(
            data.eeg.raws(ftype, tasktype),
            data.eeg.events(ftype, tasktype)):
        pid = rawlist[0].info['subject_info']
        epochlist = []
        for raw, event in zip(rawlist, eventlist):
            duration = ((event[0, (event[:, 2] > 100) & (event[:, 2] < 110)] -
                         event[0, (event[:, 2] > 80) & (event[:, 2] < 90)]) /
                        raw.info['sfreq'])[0]

            epochlist.append(mne.Epochs(raw, event, event_id=[81, 82, 83, 84],
                                        tmax=duration, on_missing='ignore',
                                        **kwargs))

        yield mne.concatenate_epochs(epochlist)
Example #29
0
def get_session_erp_epochs(session_datas, markers_const, tmin=-0.1, tmax=0.8):
    event_id = {}
    counter = 1
    for marker in markers_const:
        event_id[marker] = counter
        counter += 1
    epochs_list = []
    for session_data in session_datas:
        df = get_session_df(session_data)
        channels = df.columns.tolist()
        n_channel = len(channels)

        info = create_info(ch_names=channels + ["stim"],
                           ch_types=["eeg"] * n_channel + ["stim"],
                           sfreq=samplingRate)

        markers = get_markers(session_data)

        df["stim"] = [0] * len(df)
        for marker in markers:
            marker_timestamp = marker["timestamp"]
            pandas_timestamp = df.index[df.index.get_loc(marker_timestamp,
                                                         method='nearest')]
            if marker["label"] in markers_const:
                df.at[pandas_timestamp, "stim"] = event_id[marker["label"]]

        nparr = df.to_numpy().T
        raw = RawArray(data=nparr, info=info, verbose=False)

        events = find_events(raw)

        # Create an MNE Epochs object representing all the epochs around stimulus presentation
        epochs = Epochs(raw,
                        events=events,
                        event_id=event_id,
                        tmin=tmin,
                        tmax=tmax,
                        baseline=None,
                        preload=True,
                        verbose=False)
        print('sample drop %: ', (1 - len(epochs.events) / len(events)) * 100)
        epochs_list.append(epochs)
    concat_epochs = concatenate_epochs(epochs_list)

    return concat_epochs
def get_data():
    '''
    Returns a single MNE epochs object and associated meta data for both
    data raw data files.
    '''
    ea = get_epochs(
        '/home/pmurphy/Decoding_tests/meg_data/DC1_TimeScale_20170201_01.ds')
    ea2 = get_epochs(
        '/home/pmurphy/Decoding_tests/meg_data/DC1_TimeScale_20170201_02.ds')
    # Treat both files as if head was in the same position.
    ea2.info['dev_head_t'] = ea.info['dev_head_t']
    # Align with metadata
    ea, df = align(ea, epoch_offset=10)
    ea2, df2 = align(ea2, epoch_offset=0, block_mapping=block_mapping_2)
    df2 = df2.set_index(2025 + np.arange(len(df2)))
    meta = pd.concat([df, df2])
    epochs = mne.concatenate_epochs([ea, ea2])
    return epochs, meta
Example #31
0
def randEpochSeqNInitTrainData(oEpochs: mne.Epochs, min_trials: int,
                               epochLen_s: int,
                               eventId_dict: dict) -> mne.Epochs:
    event_name = list(eventId_dict.keys())
    print(event_name)
    if (len(event_name) / 2 > int(len(event_name) / 2)):
        epoch_eventCls1Name_list = event_name[0:int(len(event_name) / 2) + 1]
        epoch_eventCls2Name_list = event_name[int(len(event_name) / 2) +
                                              1:len(event_name)]
        print("randEpochSeqNInitTrainData warning")
    else:
        epoch_eventCls1Name_list = event_name[0:int(len(event_name) / 2)]
        epoch_eventCls2Name_list = event_name[int(len(event_name) /
                                                  2):len(event_name)]
    epoch_eventCls1_list = list()
    epoch_eventCls2_list = list()

    epoch_eventCls1_list = getEpochBasedOnNames(oEpochs,
                                                epoch_eventCls1Name_list)
    epoch_eventCls2_list = getEpochBasedOnNames(oEpochs,
                                                epoch_eventCls2Name_list)

    epoch_subEventCls1_list = list()
    epoch_subEventCls2_list = list()

    epoch_subEventCls1_list = getSubEpochBasedOnSegLen(epoch_eventCls1_list,
                                                       oEpochs.tmax,
                                                       epochLen_s)
    epoch_subEventCls2_list = getSubEpochBasedOnSegLen(epoch_eventCls2_list,
                                                       oEpochs.tmax,
                                                       epochLen_s)
    shuffle(epoch_subEventCls1_list)
    shuffle(epoch_subEventCls2_list)

    trainEpochs_list = epoch_subEventCls1_list[
        0:min_trials] + epoch_subEventCls2_list[0:min_trials]
    testEpochs_list = epoch_subEventCls1_list[
        min_trials:] + epoch_subEventCls2_list[min_trials:]
    shuffle(testEpochs_list)

    #    print(len(trainEpochs_list), len(testEpochs_list))
    finalList = trainEpochs_list + testEpochs_list
    return concatenate_epochs(finalList)
def run_time_decoding(subject_id, condition1, condition2):
    subject = "sub%03d" % subject_id
    data_path = os.path.join(meg_dir, subject)
    epochs = mne.read_epochs(os.path.join(data_path, '%s-epo.fif' % subject))

    # We define the epochs and the labels
    n_cond1 = len(epochs[condition1])
    n_cond2 = len(epochs[condition2])
    y = np.r_[np.ones((n_cond1, )), np.zeros((n_cond2, ))]
    epochs = mne.concatenate_epochs([epochs[condition1],
                                    epochs[condition2]])
    epochs.apply_baseline()

    # Let us restrict ourselves to the occipital channels
    from mne.selection import read_selection
    ch_names = [ch_name.replace(' ', '') for ch_name
                in read_selection('occipital')]
    epochs.pick_types(meg='mag').pick_channels(ch_names)

    # Now we fit and plot the time decoder
    from mne.decoding import TimeDecoding

    times = dict(step=0.005)  # fit a classifier only every 5 ms
    # Use AUC because chance level is same regardless of the class balance
    td = TimeDecoding(predict_mode='cross-validation',
                      times=times, scorer='roc_auc')
    td.fit(epochs, y)

    # let's save the scores now
    a_vs_b = '%s_vs_%s' % (os.path.basename(condition1),
                           os.path.basename(condition2))
    fname_td = os.path.join(data_path, '%s-td-auc-%s.mat'
                            % (subject, a_vs_b))
    from scipy.io import savemat
    savemat(fname_td, {'scores': td.score(epochs),
                       'times': td.times_['times']})
# default. To turn off rejection by bad segments (as was done earlier with
# saccades) you can use keyword ``reject_by_annotation=False``.
picks = mne.pick_types(raw.info, meg=True, eeg=False, stim=False, eog=True,
                       exclude='bads')

epochs = mne.Epochs(raw, events, event_id, tmin, tmax, picks=picks,
                    baseline=(None, 0), reject=reject, preload=False,
                    proj=True)

###############################################################################
# We only use first 40 good epochs from each run. Since we first drop the bad
# epochs, the indices of the epochs are no longer same as in the original
# epochs collection. Investigation of the event timings reveals that first
# epoch from the second run corresponds to index 182.
epochs.drop_bad()
epochs_standard = mne.concatenate_epochs([epochs['standard'][range(40)],
                                          epochs['standard'][182:222]])
epochs_standard.load_data()  # Resampling to save memory.
epochs_standard.resample(600, npad='auto')
epochs_deviant = epochs['deviant'].load_data()
epochs_deviant.resample(600, npad='auto')
del epochs, picks

###############################################################################
# The averages for each conditions are computed.
evoked_std = epochs_standard.average()
evoked_dev = epochs_deviant.average()
del epochs_standard, epochs_deviant

###############################################################################
# Typical preprocessing step is the removal of power line artifact (50 Hz or
# 60 Hz). Here we notch filter the data at 60, 120 and 180 to remove the
# -*- coding: utf-8 -*-
"""
Created on Tue Aug 15 12:58:38 2017

@author: ning
"""

working_dir = 'D:\\Epochs\\'
import os
os.chdir(working_dir)
import mne
import numpy as np
epochs = []
epoch = [mne.read_epochs('Ex10_Suj19_Run%d-epo.fif'% ii) for ii in np.arange(1,5)]
epochs = mne.concatenate_epochs(epoch)
old = epochs['after'].average()
new_new = epochs['new'].average()
new_old = epochs['before'].average()
scramble = epochs['scramble'].average()
mne.combine_evoked([old, -new_old],weights='equal').plot_joint(times=[0,.4,.8,1.2],title='old vs [11,21,31]')
mne.combine_evoked([old, -new_new],weights='equal').plot_joint(times=[0,.4,.8,1.2],title='old vs new' )
mne.combine_evoked([old, -scramble],weights='equal').plot_joint(times=[0,.4,.8,1.2],title='old vs scramble')
old.pick_channels(['PO8']).plot(titles='old')
new_new.pick_channels(['PO8']).plot(titles='new')
# Crop and downsmample to make it faster
epochs.crop(None, tmax=1)
epochs.pick_types(meg="grad")

epochs_clt_left = epochs["ctl/left"].copy()
epochs_clt_right = epochs["ctl/right"].copy()

del epochs

epochs_clt_left.events[:, 2] = 0
epochs_clt_right.events[:, 2] = 1

epochs_clt_left.event_id = {"0": 0}
epochs_clt_right.event_id = {"1": 1}

epochs_data = mne.concatenate_epochs([epochs_clt_left, epochs_clt_right])

# Equalise channels and epochs, and concatenate epochs
epochs_data.equalize_event_counts(["0", "1"])

# Classifier
clf = make_pipeline(StandardScaler(), LogisticRegression(C=1))

# Setup the y vector and GAT
gat = GeneralizationAcrossTime(
    predict_mode='mean-prediction', scorer="roc_auc", n_jobs=1)

# Fit model
print("fitting GAT")
gat.fit(epochs_data)
def _get_epochs(subject):
    # if already computed, lets load it from disk
    epo_fname = paths('epochs_vhp', subject=subject)
    if op.exists(epo_fname):
        return load('epochs_vhp', subject=subject, preload=True)

    # high pass filter and epoch
    for block in range(1, 6):

        raw = load('sss', subject=subject, block=block, preload=True)

        # Explicit picking of channel to ensure same channels across subjects
        picks = ['STI101', 'EEG060', 'EOG061', 'EOG062', 'ECG063', 'EEG064',
                 'MISC004']

        # Potentially add forgotten channels
        ch_type = dict(STI='stim', EEG='eeg', EOG='eog', ECG='ecg',
                       MIS='misc')
        missing_chans = list()
        for channel in picks:
            if channel not in raw.ch_names:
                missing_chans.append(channel)
        if missing_chans:
            info = create_info(missing_chans, raw.info['sfreq'],
                               [ch_type[ch[:3]] for ch in missing_chans])
            raw.add_channels([RawArray(
                np.zeros((len(missing_chans), raw.n_times)), info,
                raw.first_samp)], force_update_info=True)

        # Select same channels order across subjects
        picks = [np.where(np.array(raw.ch_names) == ch)[0][0] for ch in picks]
        picks = np.r_[np.arange(306), picks]

        # Filtered
        raw.filter(2, 30, l_trans_bandwidth=.5, filter_length='30s',
                   n_jobs=1)

        # Ensure same sampling rate
        if raw.info['sfreq'] != 1000.0:
            raw.resample(1000.0)

        # Select events
        events = find_events(raw, stim_channel='STI101', shortest_event=1)
        sel = np.where(events[:, 2] <= 255)[0]
        events = events[sel, :]

        # Compensate for delay (as measured manually with photodiod
        events[1, :] += int(.050 * raw.info['sfreq'])

        # Epoch continuous data
        this_epochs = Epochs(raw, events, reject=None, tmin=-.200, tmax=1.6,
                             picks=picks, baseline=None, decim=10)
        save(this_epochs, 'epo_block', subject=subject, block=block)
        this_epochs._data = None
        raw.data = None
        del this_epochs, raw

    epochs = list()
    for block in range(1, 6):
        this_epochs = load('epo_block', subject=subject, block=block)
        epochs.append(this_epochs)
    epochs = concatenate_epochs(epochs)

    # save for faster retest
    save(epochs, 'epochs_vhp', subject=subject, overwrite=True, upload=False)

    return epochs
    subject))
epochs_plan = mne.read_epochs(epochs_folder + "%s_plan_ar-epo.fif" % (subject))

# Fix the events for the plan epochs so they can be concatenated
epochs_plan.event_id["press"] = 2
epochs_plan.event_id["plan"] = epochs_plan.event_id.pop("press")
epochs_plan.events[:, 2] = 2

# Equalise channels and epochs, and concatenate epochs
mne.equalize_channels([epochs_classic, epochs_plan])
mne.epochs.equalize_epoch_counts([epochs_classic, epochs_plan])

# Dirty hack # TODO: Check this from the Maxfilter side
# epochs_classic.info['dev_head_t'] = epochs_plan.info['dev_head_t']

epochs = mne.concatenate_epochs([epochs_classic, epochs_plan])

# Crop and downsmample to make it faster
epochs.crop(tmin=-3.5, tmax=0)
epochs.resample(250)

# Setup the y vector and GAT
y = np.concatenate(
    (np.zeros(len(epochs["press"])), np.ones(len(epochs["plan"]))))
gat = GeneralizationAcrossTime(
    predict_mode='mean-prediction', scorer="roc_auc", n_jobs=1)

# Fit model

# Scoring and visualise result
gat.score(epochs, y=y)