for id_ in [event_ids[k] for k in conditions]
    ])
    epochs_train = mne.Epochs(raw,
                              events_,
                              event_ids,
                              tmin,
                              tmax,
                              picks=picks,
                              baseline=baseline,
                              preload=True,
                              reject=reject)
    epochs_train.equalize_event_counts(event_ids, copy=False)

    noise_covs = compute_covariance(
        epochs_train,
        method=method,
        tmin=None,
        tmax=0,  # baseline only
        return_estimators=True)  # returns list
    # prepare contrast
    evokeds = [epochs_train[k].average() for k in conditions]

    # compute stc based on worst and best
    for est, ax, kind, color in zip(noise_covs, (ax_stc_worst, ax_stc_best),
                                    ['best', 'worst'], best_colors):
        # We skip empirical rank estimation that we introduced in response to
        # the findings in reference [1] to use the naive code path that
        # triggered the behavior described in [1]. The expected true rank is
        # 274 for this dataset. Please do not do this with your data but
        # rely on the default rank estimator that helps regularizing the
        # covariance.
        inverse_operator = make_inverse_operator(epochs_train.info,
                    tmin,
                    tmax,
                    picks=picks,
                    baseline=None,
                    reject=reject,
                    preload=True)

# Uncomment next line to use fewer samples and study regularization effects
# epochs = epochs[:20]  # For your data, use as many samples as you can!

###############################################################################
# Compute covariance using automated regularization
noise_covs = compute_covariance(epochs,
                                tmin=None,
                                tmax=0,
                                method='auto',
                                return_estimators=True,
                                verbose=True,
                                n_jobs=1,
                                projs=None)

# With "return_estimator=True" all estimated covariances sorted
# by log-likelihood are returned.

print('Covariance estimates sorted from best to worst')
for c in noise_covs:
    print("%s : %s" % (c['method'], c['loglik']))

###############################################################################
# Show whitening

evoked = epochs.average()
Esempio n. 3
0
                            cond,
                            tmin=-0.5,
                            proj=useProj,
                            tmax=2.0,
                            baseline=(-0.3, 0.0),
                            name=condstem,
                            reject=dict(grad=5000e-13, mag=5e-12))
        epo_name = (fpath + subj + '_' + para + '_' + condstem + ssstag +
                    '-epo.fif')
        epochs.save(epo_name)
        evokeds += [
            epochs.average(),
        ]

    avename = subj + '_' + para + ssstag + '-ave.fif'
    mne.write_evokeds(fpath + avename, evokeds)

    # Compute covariance
    epochs_all = mne.Epochs(raw,
                            eves,
                            condlist,
                            tmin=-0.3,
                            proj=useProj,
                            tmax=0.0,
                            baseline=(-0.3, 0.0),
                            name=condstem,
                            reject=dict(grad=5000e-13, mag=5e-12))
    cov = compute_covariance(epochs_all, tmin=-0.3, tmax=0.0)
    covname = subj + '_' + para + ssstag + '_collapse-cov.fif'
    cov.save(fpath + covname)
Esempio n. 4
0
        if saveEpochs:
            fname_epochs = fstem + '_' + condstem + '-epo.fif'
            epochs.save(respath + fname_epochs)

    # Now save overall onset N100
    epochs = mne.Epochs(raw,
                        eves,
                        event_id=condlists,
                        tmin=-0.4,
                        proj=True,
                        tmax=1.0,
                        baseline=(-0.2, 0.0),
                        reject=dict(grad=5000e-13, mag=5e-12))
    evokeds += [
        epochs.average(),
    ]
    if saveEpochs:
        fname_epochs = fstem + '_onset-epo.fif'
        epochs.save(respath + fname_epochs)

    if saveAve:
        avename = subj + ssstag + '_' + para + '_collapse-ave.fif'
        mne.write_evokeds(respath + avename, evokeds)

    if saveCov:
        # Compute covatiance
        cov = compute_covariance(epochs, tmin=-0.2, tmax=0.0)
        covname = subj + ssstag + '_' + para + '_collapse-cov.fif'
        cov.save(respath + covname)
for n_train in samples_epochs:
    # estimate covs based on a subset of samples
    # make sure we have the same number of conditions.
    events_ = np.concatenate([events[events[:, 2] == id_][:n_train]
                              for id_ in [event_ids[k] for k in conditions]])
    events_ = events_[np.argsort(events_[:, 0])]
    epochs_train = mne.Epochs(raw, events_, event_ids, tmin, tmax, picks=picks,
                              baseline=baseline, preload=True, reject=reject,
                              decim=8)
    epochs_train.equalize_event_counts(event_ids)
    assert len(epochs_train) == 2 * n_train

    # We know some of these have too few samples, so suppress warning
    # with verbose='error'
    noise_covs = compute_covariance(
        epochs_train, method=method, tmin=None, tmax=0,  # baseline only
        return_estimators=True, verbose='error')  # returns list
    # prepare contrast
    evokeds = [epochs_train[k].average() for k in conditions]
    del epochs_train, events_
    # do contrast

    # We skip empirical rank estimation that we introduced in response to
    # the findings in reference [1] to use the naive code path that
    # triggered the behavior described in [1]. The expected true rank is
    # 274 for this dataset. Please do not do this with your data but
    # rely on the default rank estimator that helps regularizing the
    # covariance.
    stcs.append(list())
    methods_ordered.append(list())
    for cov in noise_covs:
Esempio n. 6
0
# let's look at rare events, button presses
event_id, tmin, tmax = 2, -0.2, 0.5
picks = mne.pick_types(raw.info, meg=True, eeg=True, eog=True, exclude='bads')
reject = dict(mag=4e-12, grad=4000e-13, eeg=80e-6)

epochs = mne.Epochs(raw, events, event_id, tmin, tmax, picks=picks,
                    baseline=None, reject=reject, preload=True)

# Uncomment next line to use fewer samples and study regularization effects
# epochs = epochs[:20]  # For your data, use as many samples as you can!

###############################################################################
# Compute covariance using automated regularization
noise_covs = compute_covariance(epochs, tmin=None, tmax=0, method='auto',
                                return_estimators=True, verbose=True, n_jobs=1,
                                projs=None)

# With "return_estimator=True" all estimated covariances sorted
# by log-likelihood are returned.

print('Covariance estimates sorted from best to worst')
for c in noise_covs:
    print("%s : %s" % (c['method'], c['loglik']))

###############################################################################
# Show whitening

evoked = epochs.average()

evoked.plot()  # plot evoked response
# %%
# Compute covariances
# -------------------
# ERS activity starts at 0.5 seconds after stimulus onset. Because these
# data have been processed by MaxFilter directly (rather than MNE-Python's
# version), we have to be careful to compute the rank with a more conservative
# threshold in order to get the correct data rank (64). Once this is used in
# combination with an advanced covariance estimator like "shrunk", the rank
# will be correctly preserved.

rank = mne.compute_rank(epochs, tol=1e-6, tol_kind='relative')
active_win = (0.5, 1.5)
baseline_win = (-1, 0)
baseline_cov = compute_covariance(epochs,
                                  tmin=baseline_win[0],
                                  tmax=baseline_win[1],
                                  method='shrunk',
                                  rank=rank,
                                  verbose=True)
active_cov = compute_covariance(epochs,
                                tmin=active_win[0],
                                tmax=active_win[1],
                                method='shrunk',
                                rank=rank,
                                verbose=True)

# Weighted averaging is already in the addition of covariance objects.
common_cov = baseline_cov + active_cov
mne.viz.plot_cov(baseline_cov, epochs.info)

# %%
# Compute some source estimates
                         subjects_dir=subjects_dir,
                         overwrite=True)

# Covariance (based on prestimulus window) ------------------------------------
from mne.cov import compute_covariance
if True:
    for meg_subject, subject in zip(range(1, 21), subjects_id):
        if subject in bad_mri:
            continue
        # Preproc
        epochs = load('epochs_decim', subject=meg_subject, preload=True)
        epochs.pick_types(meg=True, eeg=False, eog=False)
        epochs.apply_baseline((None, 0))
        # Compute covariance on same window as baseline
        cov = compute_covariance(epochs,
                                 tmin=epochs.times[0],
                                 tmax=0.,
                                 method='shrunk')
        save(cov, 'cov', subject=meg_subject, overwrite=True)

# Estimate inverse operator ---------------------------------------------------
from mne.minimum_norm import make_inverse_operator
if True:
    for meg_subject, subject in zip(range(1, 21), subjects_id):
        if subject in bad_mri:
            continue
        raw_fname = paths('sss', subject=meg_subject, block=1)
        inv_fname = paths('inv', subject=meg_subject)
        cov = load('cov', subject=meg_subject)
        fwd = load('fwd', subject=meg_subject)
        info = read_info(raw_fname)
        inv = make_inverse_operator(info, fwd, cov, loose=0.2, depth=0.8)
                         trans_fname=trans_fname,
                         subject=subject, subjects_dir=subjects_dir,
                         overwrite=True)

# Covariance (based on prestimulus window) ------------------------------------
from mne.cov import compute_covariance
if True:
    for meg_subject, subject in zip(range(1, 21), subjects_id):
        if subject in bad_mri:
            continue
        # Preproc
        epochs = load('epochs_decim', subject=meg_subject, preload=True)
        epochs.pick_types(meg=True, eeg=False, eog=False)
        epochs.apply_baseline((None, 0))
        # Compute covariance on same window as baseline
        cov = compute_covariance(epochs, tmin=epochs.times[0], tmax=0.,
                                 method='shrunk')
        save(cov, 'cov', subject=meg_subject, overwrite=True)

# Estimate inverse operator ---------------------------------------------------
from mne.minimum_norm import make_inverse_operator
if True:
    for meg_subject, subject in zip(range(1, 21), subjects_id):
        if subject in bad_mri:
            continue
        raw_fname = paths('sss', subject=meg_subject, block=1)
        inv_fname = paths('inv', subject=meg_subject)
        cov = load('cov', subject=meg_subject)
        fwd = load('fwd', subject=meg_subject)
        info = read_info(raw_fname)
        inv = make_inverse_operator(info, fwd, cov, loose=0.2, depth=0.8)
        save(inv, 'inv', subject=meg_subject, overwrite=True)
    im = imread(tmp_path + "_ven.png")
    os.remove(tmp_path + "_ven.png")
    return im


for n_train, (ax_stc_worst, ax_dynamics, ax_stc_best) in zip(samples_epochs, (axes1, axes2)):
    # estimate covs based on a subset of samples
    # make sure we have the same number of conditions.
    events_ = np.concatenate([events[events[:, 2] == id_][:n_train] for id_ in [event_ids[k] for k in conditions]])
    epochs_train = mne.Epochs(
        raw, events_, event_ids, tmin, tmax, picks=picks, baseline=baseline, preload=True, reject=reject
    )
    epochs_train.equalize_event_counts(event_ids, copy=False)

    noise_covs = compute_covariance(
        epochs_train, method=method, tmin=None, tmax=0, return_estimators=True  # baseline only
    )  # returns list
    # prepare contrast
    evokeds = [epochs_train[k].average() for k in conditions]

    # compute stc based on worst and best
    for est, ax, kind, color in zip(noise_covs, (ax_stc_worst, ax_stc_best), ["best", "worst"], best_colors):
        # We skip empirical rank estimation that we introduced in response to
        # the findings in reference [1] to use the naive code path that
        # triggered the behavior described in [1]. The expected true rank is
        # 274 for this dataset. Please do not do this with your data but
        # rely on the default rank estimator that helps regularizing the
        # covariance.
        inverse_operator = make_inverse_operator(epochs_train.info, forward, est, loose=0.2, depth=0.8, rank=274)
        stc_a, stc_b = (apply_inverse(e, inverse_operator, lambda2, "dSPM", pick_ori=None) for e in evokeds)
        stc = stc_a - stc_b
Esempio n. 11
0
    idx = np.sort(
        np.concatenate([
            np.where(epochs.events[:, 2] == event_ids[cond])[0][:n_train]
            for cond in conditions
        ]))
    epochs_train = epochs[idx]
    epochs_train.equalize_event_counts(event_ids)
    assert len(epochs_train) == 2 * n_train

    # We know some of these have too few samples, so suppress warning
    # with verbose='error'
    noise_covs.append(
        compute_covariance(
            epochs_train,
            method=method,
            tmin=None,
            tmax=0,  # baseline only
            return_estimators=True,
            rank=None,
            verbose='error'))  # returns list
    # prepare contrast
    evokeds.append([epochs_train[k].average() for k in conditions])
    del epochs_train
del epochs

# Make forward
trans = op.join(data_path, 'MEG', 'spm',
                'SPM_CTF_MEG_example_faces1_3D_raw-trans.fif')
# oct5 and add_dist are just for speed, not recommended in general!
src = mne.setup_source_space('spm',
                             spacing='oct5',
                             subjects_dir=subjects_dir,