Esempio n. 1
0
def get_data():
    tmin, tmax = -1., 4.
    event_id = dict(hands=2, feet=3)
    subject = 1
    runs = [6, 10, 14]  # motor imagery: hands vs feet

    raw_fnames = eegbci.load_data(subject, runs)
    raw = concatenate_raws([read_raw_edf(f, preload=True) for f in raw_fnames])

    # strip channel names of "." characters
    raw.rename_channels(lambda x: x.strip('.'))

    # Apply band-pass filter
    raw.filter(7., 30., fir_design='firwin', skip_by_annotation='edge')

    events, _ = events_from_annotations(raw)

    picks = pick_types(raw.info, meg=False, eeg=True, stim=False, eog=False,
                       exclude='bads')

    # Read epochs (train will be done only between 1 and 2s)
    # Testing will be done with a running classifier
    epochs = Epochs(raw, events, event_id, tmin, tmax, proj=True, picks=picks,
                    baseline=None, preload=True)
    epochs.crop(tmin=1., tmax=None)
    labels = epochs.events[:, 2] - 2
    return epochs.get_data()[:, :, :256], labels
Esempio n. 2
0
def test_crop():
    """Test of crop of epochs
    """
    raw, events, picks = _get_data()
    epochs = Epochs(raw, events[:5], event_id, tmin, tmax, picks=picks,
                    baseline=(None, 0), preload=False,
                    reject=reject, flat=flat)
    assert_raises(RuntimeError, epochs.crop, None, 0.2)  # not preloaded
    data_normal = epochs.get_data()

    epochs2 = Epochs(raw, events[:5], event_id, tmin, tmax,
                     picks=picks, baseline=(None, 0), preload=True,
                     reject=reject, flat=flat)
    with warnings.catch_warnings(record=True) as w:
        epochs2.crop(-20, 200)
    assert_true(len(w) == 2)

    # indices for slicing
    tmin_window = tmin + 0.1
    tmax_window = tmax - 0.1
    tmask = (epochs.times >= tmin_window) & (epochs.times <= tmax_window)
    assert_true(tmin_window > tmin)
    assert_true(tmax_window < tmax)
    epochs3 = epochs2.crop(tmin_window, tmax_window, copy=True)
    data3 = epochs3.get_data()
    epochs2.crop(tmin_window, tmax_window)
    data2 = epochs2.get_data()
    assert_array_equal(data2, data_normal[:, :, tmask])
    assert_array_equal(data3, data_normal[:, :, tmask])
Esempio n. 3
0
def test_compute_covariance_auto_reg():
    """Test automated regularization"""

    raw = Raw(raw_fname, preload=False)
    events = find_events(raw, stim_channel='STI 014')
    event_ids = [1, 2, 3, 4]
    reject = dict(mag=4e-12)

    # cov with merged events and keep_sample_mean=True
    events_merged = merge_events(events, event_ids, 1234)
    picks = pick_types(raw.info, meg='mag', eeg=False)
    epochs = Epochs(raw, events_merged, 1234, tmin=-0.2, tmax=0,
                    picks=picks[:5], baseline=(-0.2, -0.1), proj=True,
                    reject=reject, preload=True)
    epochs.crop(None, 0)[:10]

    method_params = dict(factor_analysis=dict(iter_n_components=[30]),
                         pca=dict(iter_n_components=[30]))

    with warnings.catch_warnings(record=True) as w:
        covs = compute_covariance(epochs, method='auto',
                                  method_params=method_params,
                                  projs=True,
                                  return_estimators=True)
        warnings.simplefilter('always')
        assert_equal(len(w), 1)

    logliks = [c['loglik'] for c in covs]
    assert_true(np.diff(logliks).max() <= 0)  # descending order

    methods = ['empirical',
               'factor_analysis',
               'ledoit_wolf',
               # 'pca',  XXX FAILS
               ]
    with warnings.catch_warnings(record=True) as w:
        cov3 = compute_covariance(epochs, method=methods,
                                  method_params=method_params, projs=False,
                                  return_estimators=True)
        warnings.simplefilter('always')
        assert_equal(len(w), 1)

    assert_equal(set([c['method'] for c in cov3]),
                 set(methods))

    # projs not allowed with FA or PCA
    assert_raises(ValueError, compute_covariance, epochs, method='pca',
                  projs=True)

    # invalid prespecified method
    assert_raises(ValueError, compute_covariance, epochs, method='pizza')

    # invalid scalings
    assert_raises(ValueError, compute_covariance, epochs, method='shrunk',
                  scalings=dict(misc=123))
Esempio n. 4
0
def test_compute_covariance_auto_reg():
    """Test automated regularization"""

    raw = Raw(raw_fname, preload=False)
    events = find_events(raw, stim_channel='STI 014')
    event_ids = [1, 2, 3, 4]
    reject = dict(mag=4e-12)

    # cov with merged events and keep_sample_mean=True
    events_merged = merge_events(events, event_ids, 1234)
    picks = pick_types(raw.info, meg='mag', eeg=False)
    epochs = Epochs(raw, events_merged, 1234, tmin=-0.2, tmax=0,
                    picks=picks[:5], baseline=(-0.2, -0.1), proj=True,
                    reject=reject, preload=True)
    epochs.crop(None, 0)[:10]

    method_params = dict(factor_analysis=dict(iter_n_components=[30]),
                         pca=dict(iter_n_components=[30]))

    with warnings.catch_warnings(record=True) as w:
        covs = compute_covariance(epochs, method='auto',
                                  method_params=method_params,
                                  projs=True,
                                  return_estimators=True)
        warnings.simplefilter('always')
        assert_equal(len(w), 1)

    logliks = [c['loglik'] for c in covs]
    assert_true(np.diff(logliks).max() <= 0)  # descending order

    with warnings.catch_warnings(record=True) as w:
        cov3 = compute_covariance(epochs, method=['empirical',
                                                  'factor_analysis'],
                                  method_params=method_params, projs=False,
                                  return_estimators=True)
        warnings.simplefilter('always')
        assert_equal(len(w), 1)

    assert_equal(set([c['method'] for c in cov3]),
                 set(['empirical', 'factor_analysis']))

    # projs not allowed with FA or PCA
    assert_raises(ValueError, compute_covariance, epochs, method='pca',
                  projs=True)

    # invalid prespecified method
    assert_raises(ValueError, compute_covariance, epochs, method='pizza')

    # invalid scalings
    assert_raises(ValueError, compute_covariance, epochs, method='shrunk',
                  scalings=dict(misc=123))
Esempio n. 5
0
def test_compute_covariance_auto_reg():
    """Test automated regularization."""
    raw = read_raw_fif(raw_fname, preload=True, add_eeg_ref=False)
    raw.resample(100, npad='auto')  # much faster estimation
    events = find_events(raw, stim_channel='STI 014')
    event_ids = [1, 2, 3, 4]
    reject = dict(mag=4e-12)

    # cov with merged events and keep_sample_mean=True
    events_merged = merge_events(events, event_ids, 1234)
    # we need a few channels for numerical reasons in PCA/FA
    picks = pick_types(raw.info, meg='mag', eeg=False)[:10]
    raw.pick_channels([raw.ch_names[pick] for pick in picks])
    raw.info.normalize_proj()
    epochs = Epochs(raw,
                    events_merged,
                    1234,
                    tmin=-0.2,
                    tmax=0,
                    baseline=(-0.2, -0.1),
                    proj=True,
                    reject=reject,
                    preload=True,
                    add_eeg_ref=False)
    epochs = epochs.crop(None, 0)[:10]

    method_params = dict(factor_analysis=dict(iter_n_components=[3]),
                         pca=dict(iter_n_components=[3]))

    covs = compute_covariance(epochs,
                              method='auto',
                              method_params=method_params,
                              projs=True,
                              return_estimators=True)

    logliks = [c['loglik'] for c in covs]
    assert_true(np.diff(logliks).max() <= 0)  # descending order

    methods = ['empirical', 'factor_analysis', 'ledoit_wolf', 'pca']
    cov3 = compute_covariance(epochs,
                              method=methods,
                              method_params=method_params,
                              projs=None,
                              return_estimators=True)

    assert_equal(set([c['method'] for c in cov3]), set(methods))

    # invalid prespecified method
    assert_raises(ValueError, compute_covariance, epochs, method='pizza')

    # invalid scalings
    assert_raises(ValueError,
                  compute_covariance,
                  epochs,
                  method='shrunk',
                  scalings=dict(misc=123))
Esempio n. 6
0
def test_crop():
    """Test of crop of epochs
    """
    epochs = Epochs(raw,
                    events[:5],
                    event_id,
                    tmin,
                    tmax,
                    picks=picks,
                    baseline=(None, 0),
                    preload=False,
                    reject=reject,
                    flat=flat)
    data_normal = epochs.get_data()

    epochs2 = Epochs(raw,
                     events[:5],
                     event_id,
                     tmin,
                     tmax,
                     picks=picks,
                     baseline=(None, 0),
                     preload=True,
                     reject=reject,
                     flat=flat)

    # indices for slicing
    tmin_window = tmin + 0.1
    tmax_window = tmax - 0.1
    tmask = (epochs.times >= tmin_window) & (epochs.times <= tmax_window)
    assert_true(tmin_window > tmin)
    assert_true(tmax_window < tmax)
    epochs3 = epochs2.crop(tmin_window, tmax_window, copy=True)
    data3 = epochs3.get_data()
    epochs2.crop(tmin_window, tmax_window)
    data2 = epochs2.get_data()
    assert_array_equal(data2, data_normal[:, :, tmask])
    assert_array_equal(data3, data_normal[:, :, tmask])
Esempio n. 7
0
def test_crop():
    """Test of crop of epochs
    """
    epochs = Epochs(
        raw, events[:5], event_id, tmin, tmax, picks=picks, baseline=(None, 0), preload=False, reject=reject, flat=flat
    )
    data_normal = epochs.get_data()

    epochs2 = Epochs(
        raw, events[:5], event_id, tmin, tmax, picks=picks, baseline=(None, 0), preload=True, reject=reject, flat=flat
    )

    # indices for slicing
    tmin_window = tmin + 0.1
    tmax_window = tmax - 0.1
    tmask = (epochs.times >= tmin_window) & (epochs.times <= tmax_window)
    assert_true(tmin_window > tmin)
    assert_true(tmax_window < tmax)
    epochs3 = epochs2.crop(tmin_window, tmax_window, copy=True)
    data3 = epochs3.get_data()
    epochs2.crop(tmin_window, tmax_window)
    data2 = epochs2.get_data()
    assert_array_equal(data2, data_normal[:, :, tmask])
    assert_array_equal(data3, data_normal[:, :, tmask])
Esempio n. 8
0
def test_compute_covariance_auto_reg():
    """Test automated regularization"""

    raw = read_raw_fif(raw_fname, preload=True)
    raw.resample(100, npad='auto')  # much faster estimation
    events = find_events(raw, stim_channel='STI 014')
    event_ids = [1, 2, 3, 4]
    reject = dict(mag=4e-12)

    # cov with merged events and keep_sample_mean=True
    events_merged = merge_events(events, event_ids, 1234)
    # we need a few channels for numerical reasons in PCA/FA
    picks = pick_types(raw.info, meg='mag', eeg=False)[:10]
    raw.pick_channels([raw.ch_names[pick] for pick in picks])
    raw.info.normalize_proj()
    epochs = Epochs(
        raw, events_merged, 1234, tmin=-0.2, tmax=0,
        baseline=(-0.2, -0.1), proj=True, reject=reject, preload=True)
    epochs = epochs.crop(None, 0)[:10]

    method_params = dict(factor_analysis=dict(iter_n_components=[3]),
                         pca=dict(iter_n_components=[3]))

    covs = compute_covariance(epochs, method='auto',
                              method_params=method_params,
                              projs=True,
                              return_estimators=True)

    logliks = [c['loglik'] for c in covs]
    assert_true(np.diff(logliks).max() <= 0)  # descending order

    methods = ['empirical',
               'factor_analysis',
               'ledoit_wolf',
               'pca']
    cov3 = compute_covariance(epochs, method=methods,
                              method_params=method_params, projs=None,
                              return_estimators=True)

    assert_equal(set([c['method'] for c in cov3]),
                 set(methods))

    # invalid prespecified method
    assert_raises(ValueError, compute_covariance, epochs, method='pizza')

    # invalid scalings
    assert_raises(ValueError, compute_covariance, epochs, method='shrunk',
                  scalings=dict(misc=123))
Esempio n. 9
0
def test_compute_covariance_auto_reg(rank):
    """Test automated regularization."""
    raw = read_raw_fif(raw_fname, preload=True)
    raw.resample(100, npad='auto')  # much faster estimation
    events = find_events(raw, stim_channel='STI 014')
    event_ids = [1, 2, 3, 4]
    reject = dict(mag=4e-12)

    # cov with merged events and keep_sample_mean=True
    events_merged = merge_events(events, event_ids, 1234)
    # we need a few channels for numerical reasons in PCA/FA
    picks = pick_types(raw.info, meg='mag', eeg=False)[:10]
    raw.pick_channels([raw.ch_names[pick] for pick in picks])
    raw.info.normalize_proj()
    epochs = Epochs(raw,
                    events_merged,
                    1234,
                    tmin=-0.2,
                    tmax=0,
                    baseline=(-0.2, -0.1),
                    proj=True,
                    reject=reject,
                    preload=True)
    epochs = epochs.crop(None, 0)[:5]

    method_params = dict(factor_analysis=dict(iter_n_components=[3]),
                         pca=dict(iter_n_components=[3]))

    covs = compute_covariance(epochs,
                              method='auto',
                              method_params=method_params,
                              return_estimators=True,
                              rank=rank)
    # make sure regularization produces structured differencess
    diag_mask = np.eye(len(epochs.ch_names)).astype(bool)
    off_diag_mask = np.invert(diag_mask)
    for cov_a, cov_b in itt.combinations(covs, 2):
        if (cov_a['method'] == 'diagonal_fixed' and
                # here we have diagnoal or no regularization.
                cov_b['method'] == 'empirical' and rank == 'full'):

            assert not np.any(
                cov_a['data'][diag_mask] == cov_b['data'][diag_mask])

            # but the rest is the same
            assert_allclose(cov_a['data'][off_diag_mask],
                            cov_b['data'][off_diag_mask],
                            rtol=1e-12)

        else:
            # and here we have shrinkage everywhere.
            assert not np.any(
                cov_a['data'][diag_mask] == cov_b['data'][diag_mask])

            assert not np.any(
                cov_a['data'][diag_mask] == cov_b['data'][diag_mask])

    logliks = [c['loglik'] for c in covs]
    assert np.diff(logliks).max() <= 0  # descending order

    methods = ['empirical', 'ledoit_wolf', 'oas', 'shrunk', 'shrinkage']
    if rank == 'full':
        methods.extend(['factor_analysis', 'pca'])
    with catch_logging() as log:
        cov3 = compute_covariance(epochs,
                                  method=methods,
                                  method_params=method_params,
                                  projs=None,
                                  return_estimators=True,
                                  rank=rank,
                                  verbose=True)
    log = log.getvalue().split('\n')
    if rank is None:
        assert '    Setting small MAG eigenvalues to zero (without PCA)' in log
        assert 'Reducing data rank from 10 -> 7' in log
    else:
        assert 'Reducing' not in log
    method_names = [cov['method'] for cov in cov3]
    best_bounds = [-45, -35]
    bounds = [-55, -45] if rank == 'full' else best_bounds
    for method in set(methods) - {'empirical', 'shrunk'}:
        this_lik = cov3[method_names.index(method)]['loglik']
        assert bounds[0] < this_lik < bounds[1]
    this_lik = cov3[method_names.index('shrunk')]['loglik']
    assert best_bounds[0] < this_lik < best_bounds[1]
    this_lik = cov3[method_names.index('empirical')]['loglik']
    bounds = [-110, -100] if rank == 'full' else best_bounds
    assert bounds[0] < this_lik < bounds[1]

    assert_equal({c['method'] for c in cov3}, set(methods))

    cov4 = compute_covariance(epochs,
                              method=methods,
                              method_params=method_params,
                              projs=None,
                              return_estimators=False,
                              rank=rank)
    assert cov3[0]['method'] == cov4['method']  # ordering

    # invalid prespecified method
    pytest.raises(ValueError, compute_covariance, epochs, method='pizza')

    # invalid scalings
    pytest.raises(ValueError,
                  compute_covariance,
                  epochs,
                  method='shrunk',
                  scalings=dict(misc=123))
Esempio n. 10
0
# strip channel names of "." characters
raw.rename_channels(lambda x: x.strip('.'))

# Apply band-pass filter
raw.filter(7., 30., method='iir')

events = find_events(raw, shortest_event=0, stim_channel='STI 014')

picks = pick_types(raw.info, meg=False, eeg=True, stim=False, eog=False,
                   exclude='bads')

# Read epochs (train will be done only between 1 and 2s)
# Testing will be done with a running classifier
epochs = Epochs(raw, events, event_id, tmin, tmax, proj=True, picks=picks,
                baseline=None, preload=True, add_eeg_ref=False)
epochs_train = epochs.crop(tmin=1., tmax=2., copy=True)
labels = epochs.events[:, -1] - 2

###############################################################################
# Classification with linear discrimant analysis

from sklearn.lda import LDA  # noqa
from sklearn.cross_validation import ShuffleSplit  # noqa

# Assemble a classifier
svc = LDA()
csp = CSP(n_components=4, reg=None, log=True)

# Define a monte-carlo cross-validation generator (reduce variance):
cv = ShuffleSplit(len(labels), 10, test_size=0.2, random_state=42)
scores = []
# strip channel names
raw.info['ch_names'] = [chn.strip('.') for chn in raw.info['ch_names']]

# Apply band-pass filter
raw.filter(7., 30., method='iir')

events = find_events(raw, shortest_event=0, stim_channel='STI 014')

picks = pick_types(raw.info, meg=False, eeg=True, stim=False, eog=False,
                   exclude='bads')

# Read epochs (train will be done only between 1 and 2s)
# Testing will be done with a running classifier
epochs = Epochs(raw, events, event_id, tmin, tmax, proj=True, picks=picks,
                baseline=None, preload=True, add_eeg_ref=False)
epochs_train = epochs.crop(tmin=1., tmax=2., copy=True)
labels = epochs.events[:, -1] - 2

###############################################################################
# Classification with linear discrimant analysis

from sklearn.lda import LDA  # noqa
from sklearn.cross_validation import ShuffleSplit  # noqa

# Assemble a classifier
svc = LDA()
csp = CSP(n_components=4, reg=None, log=True)

# Define a monte-carlo cross-validation generator (reduce variance):
cv = ShuffleSplit(len(labels), 10, test_size=0.2, random_state=42)
scores = []
Esempio n. 12
0
     # strip channel names of "." characters
    raw.rename_channels(lambda x: x.strip('.'))

    # Apply band-pass filter
    raw.filter(7., 30., method='iir')

    events = find_events(raw, shortest_event=0, stim_channel='STI 014')

    picks = pick_types(raw.info, meg=False, eeg=True, stim=False, eog=False,
                       exclude='bads')

    # Read epochs (train will be done only between 1 and 2s)
    # Testing will be done with a running classifier
    epochs = Epochs(raw, events, event_id, tmin, tmax, proj=True, picks=picks,
                    baseline=None, preload=True)
    epochs_train = epochs.crop(tmin=1., tmax=2.)
    labels = epochs.events[:, -1] - 2


    # extract raw data. scale by 1000 due to scaling sensitivity in deep learning
    #(45,64,161)
    data = epochs.get_data()*1000 # format is in (trials, channels, samples)
    X.extend(data)
    y.extend(labels+1)
X = np.array(X)
y = np.array(y)
print(len(X))
kernels, chans, samples = 1, 64, 161

# take 50/25/25 percent of the data to train/validate/test
X_train,X_test,Y_train,Y_test = train_test_split(X,y, train_size=0.75, shuffle=True,random_state=1004)
Esempio n. 13
0
    inv_fname = meg_dir + subject + '_shepard-inv.fif'
    print("Reading raw with ICA applied...")
    raw = read_raw_fif(ica_raw_fname, preload=True)

    print("Finding events...")
    events = find_events(raw)  # the output of this is a 3 x n_trial np array

    print("Epoching data...")
    epochs = Epochs(raw,
                    events,
                    tmin=-0.2,
                    tmax=0.6,
                    decim=10,
                    baseline=(-0.2, 0.6),
                    preload=True)
    epochs = epochs.crop(tmin=-0.1, tmax=0.5)

    del raw

    rejfile = eelbrain.load.unpickle(ica_rej_fname)
    rejs = rejfile['accept'].x
    epochs = epochs[rejs]

    # load vars
    print("Loading trial info...")
    X_fname = meg_dir + '/stcs/%s_shepard_rej_trialinfo.csv' % (subject)
    events = pd.read_csv(X_fname)

    # GET RID OF SHEPARD
    epochs.metadata = events
    epochs = epochs[epochs.metadata['condition'] != 'shepard']
Esempio n. 14
0
def extract_delays(raw_fname,
                   stim_chan='STI101',
                   misc_chan='MISC001',
                   trig_codes=None,
                   baseline=(-0.100, 0),
                   l_freq=None,
                   h_freq=None,
                   plot_figures=True,
                   crop_plot_time=None):
    """Estimate onset delay of analogue (misc) input relative to trigger

    Parameters
    ==========
    raw_fname : str
        Raw file name
    stim_chan : str
        Default stim channel is 'STI101'
    misc_chan : str
        Default misc channel is 'MISC001' (default, usually visual)
    trig_codes : int | list of int
        Trigger values to compare analogue signal to
    baseline : tuple of int
        Pre- and post-trigger time to calculate trigger limits from.
        Defaults to (-0.100, 0.)
    l_freq : float | None
        Low cut-off frequency in Hz. Uses mne.io.Raw.filter.
    h_freq : float | None
        High cut-off frequency in Hz. Uses mne.io.Raw.filter.
    plot_figures : bool
        Plot histogram and "ERP image" of delays (default: True)
    crop_plot_time : tuple, optional
        A 2-tuple with (tmin, tmax) being the limits to plot in the figure
    """
    raw = Raw(raw_fname, preload=True)
    if l_freq is not None or h_freq is not None:
        picks = pick_types(raw.info, misc=True)
        raw.filter(l_freq, h_freq, picks=picks)

    if trig_codes is not None:
        include_trigs = trig_codes  # do some checking here!
    events = pick_events(find_events(raw,
                                     stim_channel=stim_chan,
                                     min_duration=0.002),
                         include=include_trigs)
    delays = np.zeros(events.shape[0])
    pick = pick_channels(raw.info['ch_names'], include=[misc_chan])

    ana_data = np.sqrt(raw._data[pick, :].squeeze()**2)  # rectify!

    tmin, tmax = baseline
    offlevel, onlimit = _find_analogue_trigger_limit_sd(raw,
                                                        events,
                                                        pick,
                                                        tmin=tmin,
                                                        tmax=tmax)

    for row, unpack_me in enumerate(events):
        ind, before, after = unpack_me
        raw_ind = ind - raw.first_samp  # really indices into raw!
        anatrig_ind = _find_next_analogue_trigger(ana_data,
                                                  raw_ind,
                                                  offlevel,
                                                  onlimit,
                                                  maxdelay_samps=1000)
        delays[row] = anatrig_ind / raw.info['sfreq'] * 1.e3

    if plot_figures:
        import matplotlib.pyplot as plt
        plt.ion()
        fig, axs = plt.subplots(1, 1)

        axs.hist(delays)
        axs.set_title('Delay histogram (ms)')

        imgfig, _ = plt.subplots(1, 2)
        epochs = Epochs(raw, events, preload=True)
        if crop_plot_time is not None:
            epochs.crop(*crop_plot_time)
        epochs.plot_image(pick, fig=imgfig)
        # mnefig[0].get_axes()[1].set_title('')

    stats = dict()
    stats['mean'] = np.mean(delays)
    stats['std'] = np.std(delays)
    stats['median'] = np.median(delays)
    stats['q10'] = np.percentile(delays, 10.)
    stats['q90'] = np.percentile(delays, 90.)
    stats['max_amp'] = np.max(epochs._data[:, pick, :])  # ovr epochs & times
    stats['min_amp'] = np.min(epochs._data[:, pick, :])  # ovr epochs & times
    return (delays, stats)
    #                        eeg=150e-6,  # 150 μV
    #                        eog=250e-6)

    choice_epochs = Epochs(raw,
                           new_evs,
                           choice_event_id,
                           on_missing='ignore',
                           tmin=-1,
                           tmax=1,
                           baseline=(-.550, -.300),
                           reject_by_annotation=True,
                           preload=True,
                           picks=['FCz', 'Cz'])

    choice_epochs = choice_epochs.resample(sfreq=100, npad='auto')
    choice_epochs = choice_epochs.crop(tmin=.0, tmax=0.1)

    index, scaling_time, scalings = ['epoch', 'time'], 1e3, dict(grad=1e13)

    df = choice_epochs.to_data_frame(picks=None,
                                     scalings=scalings,
                                     scaling_time=scaling_time,
                                     index=index)

    df = df.reset_index()

    factors = ['condition', 'epoch']
    df = df.assign(subject=subj)
    df = pd.DataFrame(df)

    df.to_csv(os.path.join(output_dir, 'sub-%s.tsv' % subj),
Esempio n. 16
0
def test_compute_covariance_auto_reg():
    """Test automated regularization."""
    raw = read_raw_fif(raw_fname, preload=True)
    raw.resample(100, npad='auto')  # much faster estimation
    events = find_events(raw, stim_channel='STI 014')
    event_ids = [1, 2, 3, 4]
    reject = dict(mag=4e-12)

    # cov with merged events and keep_sample_mean=True
    events_merged = merge_events(events, event_ids, 1234)
    # we need a few channels for numerical reasons in PCA/FA
    picks = pick_types(raw.info, meg='mag', eeg=False)[:10]
    raw.pick_channels([raw.ch_names[pick] for pick in picks])
    raw.info.normalize_proj()
    epochs = Epochs(
        raw, events_merged, 1234, tmin=-0.2, tmax=0,
        baseline=(-0.2, -0.1), proj=True, reject=reject, preload=True)
    epochs = epochs.crop(None, 0)[:10]

    method_params = dict(factor_analysis=dict(iter_n_components=[3]),
                         pca=dict(iter_n_components=[3]))

    covs = compute_covariance(epochs, method='auto',
                              method_params=method_params,
                              return_estimators=True)
    # make sure regularization produces structured differencess
    diag_mask = np.eye(len(epochs.ch_names)).astype(bool)
    off_diag_mask = np.invert(diag_mask)
    for cov_a, cov_b in itt.combinations(covs, 2):
        if (cov_a['method'] == 'diagonal_fixed' and
                # here we have diagnoal or no regularization.
                cov_b['method'] == 'empirical'):

            assert_true(not np.any(
                        cov_a['data'][diag_mask] ==
                        cov_b['data'][diag_mask]))

            # but the rest is the same
            assert_array_equal(
                cov_a['data'][off_diag_mask],
                cov_b['data'][off_diag_mask])

        else:
            # and here we have shrinkage everywhere.
            assert_true(not np.any(
                        cov_a['data'][diag_mask] ==
                        cov_b['data'][diag_mask]))

            assert_true(not np.any(
                        cov_a['data'][diag_mask] ==
                        cov_b['data'][diag_mask]))

    logliks = [c['loglik'] for c in covs]
    assert_true(np.diff(logliks).max() <= 0)  # descending order

    methods = ['empirical', 'factor_analysis', 'ledoit_wolf', 'oas', 'pca',
               'shrunk', 'shrinkage']
    cov3 = compute_covariance(epochs, method=methods,
                              method_params=method_params, projs=None,
                              return_estimators=True)
    method_names = [cov['method'] for cov in cov3]
    for method in ['factor_analysis', 'ledoit_wolf', 'oas', 'pca',
                   'shrinkage']:
        this_lik = cov3[method_names.index(method)]['loglik']
        assert -55 < this_lik < -45
    this_lik = cov3[method_names.index('empirical')]['loglik']
    assert -110 < this_lik < -100
    this_lik = cov3[method_names.index('shrunk')]['loglik']
    assert -45 < this_lik < -35

    assert_equal(set([c['method'] for c in cov3]), set(methods))

    cov4 = compute_covariance(epochs, method=methods,
                              method_params=method_params, projs=None,
                              return_estimators=False)
    assert cov3[0]['method'] == cov4['method']  # ordering

    # invalid prespecified method
    assert_raises(ValueError, compute_covariance, epochs, method='pizza')

    # invalid scalings
    assert_raises(ValueError, compute_covariance, epochs, method='shrunk',
                  scalings=dict(misc=123))
Esempio n. 17
0
def test_compute_covariance_auto_reg():
    """Test automated regularization."""
    raw = read_raw_fif(raw_fname, preload=True)
    raw.resample(100, npad='auto')  # much faster estimation
    events = find_events(raw, stim_channel='STI 014')
    event_ids = [1, 2, 3, 4]
    reject = dict(mag=4e-12)

    # cov with merged events and keep_sample_mean=True
    events_merged = merge_events(events, event_ids, 1234)
    # we need a few channels for numerical reasons in PCA/FA
    picks = pick_types(raw.info, meg='mag', eeg=False)[:10]
    raw.pick_channels([raw.ch_names[pick] for pick in picks])
    raw.info.normalize_proj()
    epochs = Epochs(raw,
                    events_merged,
                    1234,
                    tmin=-0.2,
                    tmax=0,
                    baseline=(-0.2, -0.1),
                    proj=True,
                    reject=reject,
                    preload=True)
    epochs = epochs.crop(None, 0)[:10]

    method_params = dict(factor_analysis=dict(iter_n_components=[3]),
                         pca=dict(iter_n_components=[3]))

    covs = compute_covariance(epochs,
                              method='auto',
                              method_params=method_params,
                              return_estimators=True)
    # make sure regularization produces structured differencess
    diag_mask = np.eye(len(epochs.ch_names)).astype(bool)
    off_diag_mask = np.invert(diag_mask)
    for cov_a, cov_b in itt.combinations(covs, 2):
        if (cov_a['method'] == 'diagonal_fixed' and
                # here we have diagnoal or no regularization.
                cov_b['method'] == 'empirical'):

            assert_true(not np.any(
                cov_a['data'][diag_mask] == cov_b['data'][diag_mask]))

            # but the rest is the same
            assert_array_equal(cov_a['data'][off_diag_mask],
                               cov_b['data'][off_diag_mask])

        else:
            # and here we have shrinkage everywhere.
            assert_true(not np.any(
                cov_a['data'][diag_mask] == cov_b['data'][diag_mask]))

            assert_true(not np.any(
                cov_a['data'][diag_mask] == cov_b['data'][diag_mask]))

    logliks = [c['loglik'] for c in covs]
    assert_true(np.diff(logliks).max() <= 0)  # descending order

    methods = ['empirical', 'factor_analysis', 'ledoit_wolf', 'pca']
    cov3 = compute_covariance(epochs,
                              method=methods,
                              method_params=method_params,
                              projs=None,
                              return_estimators=True)

    assert_equal(set([c['method'] for c in cov3]), set(methods))

    # invalid prespecified method
    assert_raises(ValueError, compute_covariance, epochs, method='pizza')

    # invalid scalings
    assert_raises(ValueError,
                  compute_covariance,
                  epochs,
                  method='shrunk',
                  scalings=dict(misc=123))
Esempio n. 18
0
def test_compute_covariance_auto_reg(rank):
    """Test automated regularization."""
    raw = read_raw_fif(raw_fname, preload=True)
    raw.resample(100, npad='auto')  # much faster estimation
    events = find_events(raw, stim_channel='STI 014')
    event_ids = [1, 2, 3, 4]
    reject = dict(mag=4e-12)

    # cov with merged events and keep_sample_mean=True
    events_merged = merge_events(events, event_ids, 1234)
    # we need a few channels for numerical reasons in PCA/FA
    picks = pick_types(raw.info, meg='mag', eeg=False)[:10]
    raw.pick_channels([raw.ch_names[pick] for pick in picks])
    raw.info.normalize_proj()
    epochs = Epochs(
        raw, events_merged, 1234, tmin=-0.2, tmax=0,
        baseline=(-0.2, -0.1), proj=True, reject=reject, preload=True)
    epochs = epochs.crop(None, 0)[:5]

    method_params = dict(factor_analysis=dict(iter_n_components=[3]),
                         pca=dict(iter_n_components=[3]))

    covs = compute_covariance(epochs, method='auto',
                              method_params=method_params,
                              return_estimators=True, rank=rank)
    # make sure regularization produces structured differencess
    diag_mask = np.eye(len(epochs.ch_names)).astype(bool)
    off_diag_mask = np.invert(diag_mask)
    for cov_a, cov_b in itt.combinations(covs, 2):
        if (cov_a['method'] == 'diagonal_fixed' and
                # here we have diagnoal or no regularization.
                cov_b['method'] == 'empirical' and rank == 'full'):

            assert not np.any(cov_a['data'][diag_mask] ==
                              cov_b['data'][diag_mask])

            # but the rest is the same
            assert_array_equal(cov_a['data'][off_diag_mask],
                               cov_b['data'][off_diag_mask])

        else:
            # and here we have shrinkage everywhere.
            assert not np.any(cov_a['data'][diag_mask] ==
                              cov_b['data'][diag_mask])

            assert not np.any(cov_a['data'][diag_mask] ==
                              cov_b['data'][diag_mask])

    logliks = [c['loglik'] for c in covs]
    assert np.diff(logliks).max() <= 0  # descending order

    methods = ['empirical', 'ledoit_wolf', 'oas', 'shrunk', 'shrinkage']
    if rank == 'full':
        methods.extend(['factor_analysis', 'pca'])
    with catch_logging() as log:
        cov3 = compute_covariance(epochs, method=methods,
                                  method_params=method_params, projs=None,
                                  return_estimators=True, rank=rank,
                                  verbose=True)
    log = log.getvalue().split('\n')
    if rank is None:
        assert '    Setting small MAG eigenvalues to zero (without PCA)' in log
        assert 'Reducing data rank from 10 -> 7' in log
    else:
        assert 'Reducing' not in log
    method_names = [cov['method'] for cov in cov3]
    best_bounds = [-45, -35]
    bounds = [-55, -45] if rank == 'full' else best_bounds
    for method in set(methods) - {'empirical', 'shrunk'}:
        this_lik = cov3[method_names.index(method)]['loglik']
        assert bounds[0] < this_lik < bounds[1]
    this_lik = cov3[method_names.index('shrunk')]['loglik']
    assert best_bounds[0] < this_lik < best_bounds[1]
    this_lik = cov3[method_names.index('empirical')]['loglik']
    bounds = [-110, -100] if rank == 'full' else best_bounds
    assert bounds[0] < this_lik < bounds[1]

    assert_equal({c['method'] for c in cov3}, set(methods))

    cov4 = compute_covariance(epochs, method=methods,
                              method_params=method_params, projs=None,
                              return_estimators=False, rank=rank)
    assert cov3[0]['method'] == cov4['method']  # ordering

    # invalid prespecified method
    pytest.raises(ValueError, compute_covariance, epochs, method='pizza')

    # invalid scalings
    pytest.raises(ValueError, compute_covariance, epochs, method='shrunk',
                  scalings=dict(misc=123))