def using_preprocessed_epochs(all_events, eventss, baseline, hcp_params):
    from collections import  defaultdict
    evokeds_from_epochs_hcp = defaultdict(list)
    all_epochs_hcp = list()

    for run_index, events in zip([0, 1], all_events):

        unique_subset = np.nonzero(np.r_[1, np.diff(events[:, 0])])[0]

        epochs_hcp = hcp.read_epochs(run_index=run_index, **hcp_params)
        # subset epochs, add events and id
        subset = np.in1d(events[:, 2], list(eventss.values()))

        epochs_hcp = epochs_hcp[unique_subset][subset]
        epochs_hcp.events[:, 2] = events[subset, 2]
        epochs_hcp.event_id = eventss
        break
        # all_epochs_hcp.append(epochs_hcp)

    for event_name, event_id in eventss.items():
        evoked = epochs_hcp[event_name].average()
        evoked.baseline = baseline
        evoked.apply_baseline()
        evoked = preproc.interpolate_missing(evoked, **hcp_params)
        evokeds_from_epochs_hcp[event_name].append(evoked)

    return epochs_hcp, evokeds_from_epochs_hcp
Beispiel #2
0
def using_preprocessed_epochs(all_events, events_ids):
    from collections import defaultdict
    evokeds_from_epochs_hcp = defaultdict(list)
    all_epochs_hcp = list()

    for run_index, events in zip([0, 1], all_events):

        unique_subset = np.nonzero(np.r_[1, np.diff(events[:, 0])])[0]
        # use diff to find first unique events
        # this_events = events[unique_subset]

        epochs_hcp = hcp.read_epochs(run_index=run_index, **hcp_params)
        # subset epochs, add events and id
        subset = np.in1d(events[:, 2], list(events_ids.values()))

        epochs_hcp = epochs_hcp[unique_subset][subset]
        epochs_hcp.events[:, 2] = events[subset, 2]
        epochs_hcp.event_id = events_ids
        break
        # all_epochs_hcp.append(epochs_hcp)

    # del epochs_hcp
    # These epochs have different channels.
    # We use a designated function to re-apply the channels and interpolate
    # them.
    for event_name, event_id in events_ids.items():
        evoked = epochs_hcp[event_name].average()
        evoked.baseline = baseline
        evoked.apply_baseline()
        evoked = preproc.interpolate_missing(evoked, **hcp_params)
        evokeds_from_epochs_hcp[event_name].append(evoked)

    return epochs_hcp, evokeds_from_epochs_hcp
def test_interpolate_missing():
    """Test interpolation of missing channels."""
    data_type = 'task_working_memory'
    raw = hcp.read_raw(data_type='task_working_memory', run_index=0,
                       **hcp_params)
    raw.load_data()
    n_chan = len(raw.ch_names)
    raw.drop_channels(['A1'])
    assert_equal(len(raw.ch_names), n_chan - 1)
    raw = interpolate_missing(raw, data_type=data_type, **hcp_params)
    assert_equal(len(raw.ch_names), n_chan)

    evoked = hcp.read_evokeds(data_type=data_type, **hcp_params)[0]
    assert_equal(len(evoked.ch_names), 243)
    evoked_int = interpolate_missing(evoked, data_type=data_type, **hcp_params)
    assert_equal(len(evoked_int.ch_names), 248)
def test_interpolate_missing():
    """Test interpolation of missing channels."""
    data_type = 'task_working_memory'
    raw = hcp.read_raw(data_type='task_working_memory',
                       run_index=0,
                       **hcp_params)
    raw.load_data()
    n_chan = len(raw.ch_names)
    raw.drop_channels(['A1'])
    assert_equal(len(raw.ch_names), n_chan - 1)
    raw = interpolate_missing(raw, data_type=data_type, **hcp_params)
    assert_equal(len(raw.ch_names), n_chan)

    evoked = hcp.read_evokeds(data_type=data_type, **hcp_params)[0]
    assert_equal(len(evoked.ch_names), 243)
    evoked_int = interpolate_missing(evoked, data_type=data_type, **hcp_params)
    assert_equal(len(evoked_int.ch_names), 248)
Beispiel #5
0
def official_ERF():
    hcp_evokeds = hcp.read_evokeds(onset='stim', **hcp_params)

    for ev in hcp_evokeds:
        if not ev.comment == 'Wrkmem_LM-TIM-face_BT-diff_MODE-mag':
            continue

    # Once more we add and interpolate missing channels
    evoked_hcp = preproc.interpolate_missing(ev, **hcp_params)
    return evoked_hcp
# and set the IO params

storage_dir = op.expanduser('~')

hcp_params = dict(hcp_path=op.join(storage_dir, 'mne-hcp-data', 'HCP'),
                  subject='105923',
                  data_type='task_working_memory')

##############################################################################
# we take the some evoked and create an interpolated copy

evoked = hcp.read_evokeds(**hcp_params)[0]

# The HCP pipelines don't interpolate missing channels
print('%i channels out of 248 expected' % len(evoked.ch_names))

evoked_interpolated = preproc.interpolate_missing(evoked, **hcp_params)

##############################################################################
# Let's visualize what has changed!

# we calculate the difference ...
bads = set(evoked_interpolated.ch_names) - set(evoked.ch_names)
print(bads)

# ... and mark the respective channels as bad ...
evoked_interpolated.info['bads'] += list(bads)

# ... such that MNE is displaying the interpolated time series in red ...
evoked_interpolated.plot(exclude=[])
Beispiel #7
0
def reprocess_the_data_from_scratch(all_events, event_id, tmin, tmax, baseline,
                                    decim):
    # now we can go ahead
    evokeds = list()
    all_epochs = list()
    for run_index, events in zip([0, 1], all_events):

        raw = hcp.read_raw(run_index=run_index, **hcp_params)
        raw.load_data()
        # apply ref channel correction and drop ref channels
        # preproc.apply_ref_correction(raw)

        annots = hcp.read_annot(run_index=run_index, **hcp_params)
        # construct MNE annotations
        bad_seg = (annots['segments']['all']) / raw.info['sfreq']
        annotations = mne.Annotations(bad_seg[:, 0],
                                      (bad_seg[:, 1] - bad_seg[:, 0]),
                                      description='bad')

        raw.annotations = annotations
        raw.info['bads'].extend(annots['channels']['all'])
        raw.pick_types(meg=True, ref_meg=False)

        #  Note: MNE complains on Python 2.7
        raw.filter(0.50,
                   None,
                   method='iir',
                   iir_params=dict(order=4, ftype='butter'),
                   n_jobs=1)
        raw.filter(None,
                   60,
                   method='iir',
                   iir_params=dict(order=4, ftype='butter'),
                   n_jobs=1)

        # read ICA and remove EOG ECG
        # note that the HCP ICA assumes that bad channels have already been removed
        ica_mat = hcp.read_ica(run_index=run_index, **hcp_params)

        # We will select the brain ICs only
        exclude = annots['ica']['ecg_eog_ic']
        preproc.apply_ica_hcp(raw, ica_mat=ica_mat, exclude=exclude)

        # now we can epoch
        events = np.sort(events, 0)
        epochs = mne.Epochs(raw,
                            events=events[events[:, 2] == 1],
                            event_id=event_id,
                            tmin=tmin,
                            tmax=tmax,
                            reject=None,
                            baseline=baseline,
                            decim=decim,
                            preload=True)

        all_epochs.append(epochs)
        evoked = epochs.average()
        # now we need to add back out channels for comparison across runs.
        evoked = preproc.interpolate_missing(evoked, **hcp_params)
        evokeds.append(evoked)
    return all_epochs, evokeds
    ica_mat = hcp.read_ica(run_index=run_index, **hcp_params)

    # We will select the brain ICs only
    exclude = annots['ica']['ecg_eog_ic']
    preproc.apply_ica_hcp(raw, ica_mat=ica_mat, exclude=exclude)

    # now we can epoch
    events = np.sort(events, 0)
    epochs = mne.Epochs(raw, events=events[events[:, 2] == 1],
                        event_id=event_id, tmin=tmin, tmax=tmax,
                        reject=None, baseline=baseline, decim=decim,
                        preload=True)

    evoked = epochs.average()
    # now we need to add back out channels for comparison across runs.
    evoked = preproc.interpolate_missing(evoked, **hcp_params)
    evokeds.append(evoked)
    del epochs, raw

##############################################################################
# Now we can compute the same ERF based on the preprocessed epochs
#
# These are obtained from the 'tmegpreproc' pipeline.
# Things are pythonized and simplified however, so

evokeds_from_epochs_hcp = list()

for run_index, events in zip([0, 1], all_events):

    unique_subset = np.nonzero(np.r_[1, np.diff(events[:, 0])])[0]
    # use diff to find first unique events
Beispiel #9
0
    events = np.c_[trial_info['stim']['codes'][:, 6] - 1,  # time sample
                   np.zeros(len(trial_info['stim']['codes'])),
                   trial_info['stim']['codes'][:, 3]  # event codes
                   ].astype(int)

    # for some reason in the HCP data the time events may not always be unique
    unique_subset = np.nonzero(np.r_[1, np.diff(events[:, 0])])[0]
    events = events[unique_subset]  # use diff to find first unique events
    subset = np.in1d(events[:, 2], event_id.values())
    epochs_hcp = hcp.read_epochs(**hcp_params).decimate(decim)
    epochs_hcp = epochs_hcp[unique_subset][subset]
    epochs_hcp.events[:, 2] = events[subset, 2]
    epochs_hcp.event_id = event_id
    epochs_hcp.crop(-0.1, 0.5)
    epochs.append(preproc.interpolate_missing(epochs_hcp, **hcp_params))

epochs = mne.concatenate_epochs(epochs)
del epochs_hcp

##############################################################################
# Now we can proceed as shown in the MNE-Python decoding tutorials

y = LabelBinarizer().fit_transform(epochs.events[:, 2]).ravel()

cv = StratifiedKFold(y=y)  # do a stratified cross-validation

gat = GeneralizationAcrossTime(predict_mode='cross-validation',
                               n_jobs=1,
                               cv=cv,
                               scorer=roc_auc_score)