def using_preprocessed_epochs(all_events, eventss, baseline, hcp_params):
    from collections import  defaultdict
    evokeds_from_epochs_hcp = defaultdict(list)
    all_epochs_hcp = list()

    for run_index, events in zip([0, 1], all_events):

        unique_subset = np.nonzero(np.r_[1, np.diff(events[:, 0])])[0]

        epochs_hcp = hcp.read_epochs(run_index=run_index, **hcp_params)
        # subset epochs, add events and id
        subset = np.in1d(events[:, 2], list(eventss.values()))

        epochs_hcp = epochs_hcp[unique_subset][subset]
        epochs_hcp.events[:, 2] = events[subset, 2]
        epochs_hcp.event_id = eventss
        break
        # all_epochs_hcp.append(epochs_hcp)

    for event_name, event_id in eventss.items():
        evoked = epochs_hcp[event_name].average()
        evoked.baseline = baseline
        evoked.apply_baseline()
        evoked = preproc.interpolate_missing(evoked, **hcp_params)
        evokeds_from_epochs_hcp[event_name].append(evoked)

    return epochs_hcp, evokeds_from_epochs_hcp
Beispiel #2
0
def using_preprocessed_epochs(all_events, events_ids):
    from collections import defaultdict
    evokeds_from_epochs_hcp = defaultdict(list)
    all_epochs_hcp = list()

    for run_index, events in zip([0, 1], all_events):

        unique_subset = np.nonzero(np.r_[1, np.diff(events[:, 0])])[0]
        # use diff to find first unique events
        # this_events = events[unique_subset]

        epochs_hcp = hcp.read_epochs(run_index=run_index, **hcp_params)
        # subset epochs, add events and id
        subset = np.in1d(events[:, 2], list(events_ids.values()))

        epochs_hcp = epochs_hcp[unique_subset][subset]
        epochs_hcp.events[:, 2] = events[subset, 2]
        epochs_hcp.event_id = events_ids
        break
        # all_epochs_hcp.append(epochs_hcp)

    # del epochs_hcp
    # These epochs have different channels.
    # We use a designated function to re-apply the channels and interpolate
    # them.
    for event_name, event_id in events_ids.items():
        evoked = epochs_hcp[event_name].average()
        evoked.baseline = baseline
        evoked.apply_baseline()
        evoked = preproc.interpolate_missing(evoked, **hcp_params)
        evokeds_from_epochs_hcp[event_name].append(evoked)

    return epochs_hcp, evokeds_from_epochs_hcp
def analyze_rest(subject, args, hcp_params, run_index=0, calc_rest_from_raw=False, calc_rest_from_epochs=True):
    flags = {}
    if not op.isfile(meg.RAW):
        raw = read_raw_data(run_index, hcp_params, 1, 60)
        raw.save(meg.RAW)
    else:
        raw = mne.io.read_raw_fif(meg.RAW)
    meg.COR = op.join(op.join(HCP_DIR, 'hcp-meg', subject, '{}-head_mri-trans.fif'.format(subject)))
    epo_fname = meg.EPO.format(cond='all')
    evo_fname = meg.EVO.format(cond='all')
    if not op.isfile(epo_fname) or not op.isfile(evo_fname):
        epochs = hcp.read_epochs(run_index=run_index, **hcp_params)
        evoked = epochs.average()
        epochs.save(epo_fname)
        mne.write_evokeds(evo_fname, evoked)
    else:
        epochs = mne.read_epochs(epo_fname)
    meg.calc_fwd_inv_wrapper(subject, args)
    args.snr = 1.0  # use smaller SNR for raw data
    # args.overwrite_labels_data = True
    # args.n_jobs = 1
    if calc_rest_from_raw:
        meg.calc_labels_avg_for_rest_wrapper(args, raw)
    elif calc_rest_from_epochs:
        args.single_trial_stc = True
        flags, stcs_conds, stcs_num = meg.calc_stc_per_condition_wrapper(
            subject, None, args.inverse_method, args, flags, None, epochs)
        flags = meg.calc_labels_avg_per_condition_wrapper(
            subject, None, args.atlas, args.inverse_method, stcs_conds, args, flags, stcs_num, raw, epochs)

    print('sdf')
def test_read_epochs_rest():
    """Test reading epochs for resting state"""
    for run_index in tconf.run_inds[:tconf.max_runs][:2]:
        annots = hcp.read_annot(
            data_type='rest', run_index=run_index, **hcp_params)

        epochs = hcp.read_epochs(
            data_type='rest', run_index=run_index, **hcp_params)

        _epochs_basic_checks(epochs, annots, data_type='rest')
def test_read_epochs_task():
    """Test reading epochs for task"""
    for run_index in tconf.run_inds[:tconf.max_runs][:2]:
        for data_type in tconf.task_types:
            annots = hcp.read_annot(
                data_type=data_type, run_index=run_index, **hcp_params)

            epochs = hcp.read_epochs(
                data_type=data_type, run_index=run_index, **hcp_params)

            _epochs_basic_checks(epochs, annots, data_type)
Beispiel #6
0
f_type = 'Motort'  # 'Wrkmem' 'Motort'
run_index = 0
onset = 'stim'  #
# scan subjects
for subject in os.listdir(hcp_path):
    # if exist data_type in this sub
    fname = os.path.join(hcp_path, subject, 'MEG', f_type)
    if not os.path.exists(fname):
        continue

    data_path2save = os.path.join(storage_dir, 'wpli', subject, f_type, onset)
    if os.path.exists(data_path2save):
        continue
    # let's get the rpochs data
    hcp_epochs = hcp.read_epochs(onset=onset,
                                 subject=subject,
                                 data_type=data_type,
                                 hcp_path=hcp_path)

    hcp_epochs.resample(sfreq=256)  # save memory
    # lets use a convenience function to get our forward and source models
    src_outputs = hcp.anatomy.compute_forward_stack(
        subject=subject,
        subjects_dir=subjects_dir,
        hcp_path=hcp_path,
        recordings_path=recordings_path,
        # seed up computations here. Setting add_dist to True may improve the accuracy
        src_params=dict(add_dist=False),
        info_from=dict(data_type=data_type, run_index=run_index))
    fwd = src_outputs['fwd']
    del src_outputs
    #=================================================================
Beispiel #7
0
##############################################################################
# Now we can compute the same ERF based on the preprocessed epochs
#
# These are obtained from the 'tmegpreproc' pipeline.
# Things are pythonized and simplified however, so

evokeds_from_epochs_hcp = list()

for run_index, events in zip([0, 1], all_events):

    unique_subset = np.nonzero(np.r_[1, np.diff(events[:, 0])])[0]
    # use diff to find first unique events
    this_events = events[unique_subset]
    subset = np.in1d(events[:, 2], event_id.values())

    epochs_hcp = hcp.read_epochs(run_index=run_index, **hcp_params)

    # subset epochs, add events and id
    epochs_hcp = epochs_hcp[unique_subset][subset]
    epochs_hcp.events[:, 2] = events[subset, 2]
    epochs_hcp.event_id = event_id

    evoked = epochs_hcp['face'].average()

    del epochs_hcp
    # These epochs have different channels.
    # We use a designated function to re-apply the channels and interpolate
    # them.

    evoked.baseline = baseline
    evoked.apply_baseline()
Beispiel #8
0
import hcp
import os
import numpy as np

hcp_path = "/media/robbis/DATA/meg/hcp/"

subject = '106521'
hcp_task = 'task_motor'
recordings_path = os.path.join(hcp_path, 'recordings')
fs_path = os.path.join(hcp_path, 'subjects')
hcp.make_mne_anatomy(subject,
                     subjects_dir=fs_path,
                     hcp_path=hcp_path,
                     recordings_path=recordings_path)

epochs = hcp.read_epochs(subject, hcp_task, onset='resp', hcp_path=hcp_path)
info = hcp.read_info(subject=subject,
                     hcp_path=hcp_path,
                     data_type=hcp_task,
                     run_index=0)

#
#    cfg=[];
#    cfg.method  = 'mtmfft';
#    cfg.channel = 'MEG';
#    cfg.trials  = 1;
#    cfg.output  = 'powandcsd'; % gives power and cross-spectral density matrices
#    cfg.foi = 22;
#    cfg.tapsmofrq = 8; %this is 'in both directions' ie nhz up and nhz down
#    cfg.taper = 'dpss';
#    cfg.keeptapers = 'no';
##############################################################################
# Now we can compute the same ERF based on the preprocessed epochs
#
# These are obtained from the 'tmegpreproc' pipeline.
# Things are pythonized and simplified however, so

evokeds_from_epochs_hcp = list()

for run_index, events in zip([0, 1], all_events):

    unique_subset = np.nonzero(np.r_[1, np.diff(events[:, 0])])[0]
    # use diff to find first unique events
    this_events = events[unique_subset]
    subset = np.in1d(events[:, 2], event_id.values())

    epochs_hcp = hcp.read_epochs(run_index=run_index, **hcp_params)

    # subset epochs, add events and id
    epochs_hcp = epochs_hcp[unique_subset][subset]
    epochs_hcp.events[:, 2] = events[subset, 2]
    epochs_hcp.event_id = event_id

    evoked = epochs_hcp['face'].average()

    del epochs_hcp
    # These epochs have different channels.
    # We use a designated function to re-apply the channels and interpolate
    # them.

    evoked.baseline = baseline
    evoked.apply_baseline()
Beispiel #10
0
epochs = list()
for run_index in [0, 1]:
    hcp_params['run_index'] = run_index
    trial_info = hcp.read_trial_info(**hcp_params)

    events = np.c_[trial_info['stim']['codes'][:, 6] - 1,  # time sample
                   np.zeros(len(trial_info['stim']['codes'])),
                   trial_info['stim']['codes'][:, 3]  # event codes
                   ].astype(int)

    # for some reason in the HCP data the time events may not always be unique
    unique_subset = np.nonzero(np.r_[1, np.diff(events[:, 0])])[0]
    events = events[unique_subset]  # use diff to find first unique events
    subset = np.in1d(events[:, 2], event_id.values())
    epochs_hcp = hcp.read_epochs(**hcp_params).decimate(decim)
    epochs_hcp = epochs_hcp[unique_subset][subset]
    epochs_hcp.events[:, 2] = events[subset, 2]
    epochs_hcp.event_id = event_id
    epochs_hcp.crop(-0.1, 0.5)
    epochs.append(preproc.interpolate_missing(epochs_hcp, **hcp_params))

epochs = mne.concatenate_epochs(epochs)
del epochs_hcp

##############################################################################
# Now we can proceed as shown in the MNE-Python decoding tutorials

y = LabelBinarizer().fit_transform(epochs.events[:, 2]).ravel()

cv = StratifiedKFold(y=y)  # do a stratified cross-validation