Example #1
0
def test_cov_mismatch():
    """Test estimation with MEG<->Head mismatch."""
    raw = read_raw_fif(raw_fname, add_eeg_ref=False).crop(0, 5).load_data()
    events = find_events(raw, stim_channel="STI 014")
    raw.pick_channels(raw.ch_names[:5])
    raw.add_proj([], remove_existing=True)
    epochs = Epochs(raw, events, None, tmin=-0.2, tmax=0.0, preload=True, add_eeg_ref=False)
    for kind in ("shift", "None"):
        epochs_2 = epochs.copy()
        # This should be fine
        with warnings.catch_warnings(record=True) as w:
            compute_covariance([epochs, epochs_2])
            assert_equal(len(w), 0)
            if kind == "shift":
                epochs_2.info["dev_head_t"]["trans"][:3, 3] += 0.001
            else:  # None
                epochs_2.info["dev_head_t"] = None
            assert_raises(ValueError, compute_covariance, [epochs, epochs_2])
            assert_equal(len(w), 0)
            compute_covariance([epochs, epochs_2], on_mismatch="ignore")
            assert_equal(len(w), 0)
            compute_covariance([epochs, epochs_2], on_mismatch="warn")
            assert_raises(ValueError, compute_covariance, epochs, on_mismatch="x")
        assert_true(any("transform mismatch" in str(ww.message) for ww in w))
    # This should work
    epochs.info["dev_head_t"] = None
    epochs_2.info["dev_head_t"] = None
    compute_covariance([epochs, epochs_2], method=None)
Example #2
0
def test_pick_seeg_ecog():
    """Test picking with sEEG and ECoG
    """
    names = 'A1 A2 Fz O OTp1 OTp2 E1 OTp3 E2 E3'.split()
    types = 'mag mag eeg eeg seeg seeg ecog seeg ecog ecog'.split()
    info = create_info(names, 1024., types)
    idx = channel_indices_by_type(info)
    assert_array_equal(idx['mag'], [0, 1])
    assert_array_equal(idx['eeg'], [2, 3])
    assert_array_equal(idx['seeg'], [4, 5, 7])
    assert_array_equal(idx['ecog'], [6, 8, 9])
    assert_array_equal(pick_types(info, meg=False, seeg=True), [4, 5, 7])
    for i, t in enumerate(types):
        assert_equal(channel_type(info, i), types[i])
    raw = RawArray(np.zeros((len(names), 10)), info)
    events = np.array([[1, 0, 0], [2, 0, 0]])
    epochs = Epochs(raw, events, {'event': 0}, -1e-5, 1e-5, add_eeg_ref=False)
    evoked = epochs.average(pick_types(epochs.info, meg=True, seeg=True))
    e_seeg = evoked.copy().pick_types(meg=False, seeg=True)
    for l, r in zip(e_seeg.ch_names, [names[4], names[5], names[7]]):
        assert_equal(l, r)
    # Deal with constant debacle
    raw = read_raw_fif(op.join(io_dir, 'tests', 'data',
                               'test_chpi_raw_sss.fif'), add_eeg_ref=False)
    assert_equal(len(pick_types(raw.info, meg=False, seeg=True, ecog=True)), 0)
Example #3
0
def test_cov_mismatch():
    """Test estimation with MEG<->Head mismatch."""
    raw = read_raw_fif(raw_fname).crop(0, 5).load_data()
    events = find_events(raw, stim_channel='STI 014')
    raw.pick_channels(raw.ch_names[:5])
    raw.add_proj([], remove_existing=True)
    epochs = Epochs(raw, events, None, tmin=-0.2, tmax=0., preload=True)
    for kind in ('shift', 'None'):
        epochs_2 = epochs.copy()
        # This should be fine
        compute_covariance([epochs, epochs_2])
        if kind == 'shift':
            epochs_2.info['dev_head_t']['trans'][:3, 3] += 0.001
        else:  # None
            epochs_2.info['dev_head_t'] = None
        pytest.raises(ValueError, compute_covariance, [epochs, epochs_2])
        compute_covariance([epochs, epochs_2], on_mismatch='ignore')
        with pytest.raises(RuntimeWarning, match='transform mismatch'):
            compute_covariance([epochs, epochs_2], on_mismatch='warn')
        pytest.raises(ValueError, compute_covariance, epochs,
                      on_mismatch='x')
    # This should work
    epochs.info['dev_head_t'] = None
    epochs_2.info['dev_head_t'] = None
    compute_covariance([epochs, epochs_2], method=None)
Example #4
0
def test_unsupervised_spatial_filter():
    """Test unsupervised spatial filter."""
    from sklearn.decomposition import PCA
    from sklearn.kernel_ridge import KernelRidge
    raw = io.read_raw_fif(raw_fname)
    events = read_events(event_name)
    picks = pick_types(raw.info, meg=True, stim=False, ecg=False,
                       eog=False, exclude='bads')
    picks = picks[1:13:3]
    epochs = Epochs(raw, events, event_id, tmin, tmax, picks=picks,
                    preload=True, baseline=None, verbose=False)

    # Test estimator
    assert_raises(ValueError, UnsupervisedSpatialFilter, KernelRidge(2))

    # Test fit
    X = epochs.get_data()
    n_components = 4
    usf = UnsupervisedSpatialFilter(PCA(n_components))
    usf.fit(X)
    usf1 = UnsupervisedSpatialFilter(PCA(n_components))

    # test transform
    assert_equal(usf.transform(X).ndim, 3)
    # test fit_transform
    assert_array_almost_equal(usf.transform(X), usf1.fit_transform(X))
    # assert shape
    assert_equal(usf.transform(X).shape[1], n_components)

    # Test with average param
    usf = UnsupervisedSpatialFilter(PCA(4), average=True)
    usf.fit_transform(X)
    assert_raises(ValueError, UnsupervisedSpatialFilter, PCA(4), 2)
Example #5
0
def test_mockclient():
    """Test the RtMockClient."""

    raw = mne.io.read_raw_fif(raw_fname, preload=True, verbose=False,
                              add_eeg_ref=False)
    picks = mne.pick_types(raw.info, meg='grad', eeg=False, eog=True,
                           stim=True, exclude=raw.info['bads'])

    event_id, tmin, tmax = 1, -0.2, 0.5

    epochs = Epochs(raw, events[:7], event_id=event_id, tmin=tmin, tmax=tmax,
                    picks=picks, baseline=(None, 0), preload=True,
                    add_eeg_ref=False)
    data = epochs.get_data()

    rt_client = MockRtClient(raw)
    rt_epochs = RtEpochs(rt_client, event_id, tmin, tmax, picks=picks,
                         isi_max=0.5, add_eeg_ref=False)

    rt_epochs.start()
    rt_client.send_data(rt_epochs, picks, tmin=0, tmax=10, buffer_size=1000)

    rt_data = rt_epochs.get_data()

    assert_true(rt_data.shape == data.shape)
    assert_array_equal(rt_data, data)
def test_scaler():
    """Test methods of Scaler."""
    raw = io.read_raw_fif(raw_fname)
    events = read_events(event_name)
    picks = pick_types(raw.info, meg=True, stim=False, ecg=False,
                       eog=False, exclude='bads')
    picks = picks[1:13:3]

    epochs = Epochs(raw, events, event_id, tmin, tmax, picks=picks,
                    baseline=(None, 0), preload=True)
    epochs_data = epochs.get_data()
    scaler = Scaler(epochs.info)
    y = epochs.events[:, -1]

    # np invalid divide value warnings
    with warnings.catch_warnings(record=True):
        X = scaler.fit_transform(epochs_data, y)
        assert_true(X.shape == epochs_data.shape)
        X2 = scaler.fit(epochs_data, y).transform(epochs_data)

    assert_array_equal(X2, X)

    # Test inverse_transform
    with warnings.catch_warnings(record=True):  # invalid value in mult
        Xi = scaler.inverse_transform(X, y)
    assert_array_almost_equal(epochs_data, Xi)

    for kwargs in [{'with_mean': False}, {'with_std': False}]:
        scaler = Scaler(epochs.info, **kwargs)
        scaler.fit(epochs_data, y)
        assert_array_almost_equal(
            X, scaler.inverse_transform(scaler.transform(X)))
    # Test init exception
    assert_raises(ValueError, scaler.fit, epochs, y)
    assert_raises(ValueError, scaler.transform, epochs, y)
Example #7
0
def test_ica_ctf():
    """Test run ICA computation on ctf data with/without compensation."""
    method = 'fastica'
    raw = read_raw_ctf(ctf_fname, preload=True)
    events = make_fixed_length_events(raw, 99999)
    for comp in [0, 1]:
        raw.apply_gradient_compensation(comp)
        epochs = Epochs(raw, events, None, -0.2, 0.2, preload=True)
        evoked = epochs.average()

        # test fit
        for inst in [raw, epochs]:
            ica = ICA(n_components=2, random_state=0, max_iter=2,
                      method=method)
            with pytest.warns(UserWarning, match='did not converge'):
                ica.fit(inst)

        # test apply and get_sources
        for inst in [raw, epochs, evoked]:
            ica.apply(inst)
            ica.get_sources(inst)

    # test mixed compensation case
    raw.apply_gradient_compensation(0)
    ica = ICA(n_components=2, random_state=0, max_iter=2, method=method)
    with pytest.warns(UserWarning, match='did not converge'):
        ica.fit(raw)
    raw.apply_gradient_compensation(1)
    epochs = Epochs(raw, events, None, -0.2, 0.2, preload=True)
    evoked = epochs.average()
    for inst in [raw, epochs, evoked]:
        with pytest.raises(RuntimeError, match='Compensation grade of ICA'):
            ica.apply(inst)
        with pytest.raises(RuntimeError, match='Compensation grade of ICA'):
            ica.get_sources(inst)
Example #8
0
def test_xdawn_apply_transform():
    """Test Xdawn apply and transform."""
    # get data
    raw, events, picks = _get_data()
    epochs = Epochs(raw, events, event_id, tmin, tmax, picks=picks,
                    preload=True, baseline=None, verbose=False)
    n_components = 2
    # Fit Xdawn
    xd = Xdawn(n_components=n_components, correct_overlap='auto')
    xd.fit(epochs)

    # apply on raw
    xd.apply(raw)
    # apply on epochs
    denoise = xd.apply(epochs)
    # apply on evoked
    xd.apply(epochs.average())
    # apply on other thing should raise an error
    assert_raises(ValueError, xd.apply, 42)

    # transform on epochs
    xd.transform(epochs)
    # transform on ndarray
    xd.transform(epochs._data)
    # transform on someting else
    assert_raises(ValueError, xd.transform, 42)

    # check numerical results with shuffled epochs
    idx = np.arange(len(epochs))
    np.random.shuffle(idx)
    xd.fit(epochs[idx])
    denoise_shfl = xd.apply(epochs)
    assert_array_equal(denoise['cond2']._data, denoise_shfl['cond2']._data)
Example #9
0
def test_stockwell_api():
    """Test stockwell functions."""
    raw = read_raw_fif(raw_fname)
    event_id, tmin, tmax = 1, -0.2, 0.5
    event_name = op.join(base_dir, 'test-eve.fif')
    events = read_events(event_name)
    epochs = Epochs(raw, events,  # XXX pick 2 has epochs of zeros.
                    event_id, tmin, tmax, picks=[0, 1, 3])
    for fmin, fmax in [(None, 50), (5, 50), (5, None)]:
        with warnings.catch_warnings(record=True):  # zero papdding
            power, itc = tfr_stockwell(epochs, fmin=fmin, fmax=fmax,
                                       return_itc=True)
        if fmax is not None:
            assert_true(power.freqs.max() <= fmax)
        with warnings.catch_warnings(record=True):  # padding
            power_evoked = tfr_stockwell(epochs.average(), fmin=fmin,
                                         fmax=fmax, return_itc=False)
        # for multitaper these don't necessarily match, but they seem to
        # for stockwell... if this fails, this maybe could be changed
        # just to check the shape
        assert_array_almost_equal(power_evoked.data, power.data)
    assert_true(isinstance(power, AverageTFR))
    assert_true(isinstance(itc, AverageTFR))
    assert_equal(power.data.shape, itc.data.shape)
    assert_true(itc.data.min() >= 0.0)
    assert_true(itc.data.max() <= 1.0)
    assert_true(np.log(power.data.max()) * 20 <= 0.0)
    assert_true(np.log(power.data.max()) * 20 <= 0.0)
Example #10
0
def test_drop_epochs():
    """Test dropping of epochs.
    """
    raw, events, picks = _get_data()
    epochs = Epochs(raw, events, event_id, tmin, tmax, picks=picks,
                    baseline=(None, 0))
    events1 = events[events[:, 2] == event_id]

    # Bound checks
    assert_raises(IndexError, epochs.drop_epochs, [len(epochs.events)])
    assert_raises(IndexError, epochs.drop_epochs, [-1])
    assert_raises(ValueError, epochs.drop_epochs, [[1, 2], [3, 4]])

    # Test selection attribute
    assert_array_equal(epochs.selection,
                       np.where(events[:, 2] == event_id)[0])
    assert_equal(len(epochs.drop_log), len(events))
    assert_true(all(epochs.drop_log[k] == ['IGNORED']
                for k in set(range(len(events))) - set(epochs.selection)))

    selection = epochs.selection.copy()
    n_events = len(epochs.events)
    epochs.drop_epochs([2, 4], reason='d')
    assert_equal(epochs.drop_log_stats(), 2. / n_events * 100)
    assert_equal(len(epochs.drop_log), len(events))
    assert_equal([epochs.drop_log[k]
                  for k in selection[[2, 4]]], [['d'], ['d']])
    assert_array_equal(events[epochs.selection], events1[[0, 1, 3, 5, 6]])
    assert_array_equal(events[epochs[3:].selection], events1[[5, 6]])
    assert_array_equal(events[epochs['1'].selection], events1[[0, 1, 3, 5, 6]])
Example #11
0
def test_epochs_to_nitime():
    """Test test_to_nitime
    """
    raw, events, picks = _get_data()
    epochs = Epochs(raw, events[:5], event_id, tmin, tmax, picks=picks,
                    baseline=(None, 0), preload=True,
                    reject=reject, flat=flat)

    picks2 = [0, 3]

    epochs_ts = epochs.to_nitime(picks=None, epochs_idx=[0],
                                 collapse=True, copy=True)
    assert_true(epochs_ts.ch_names == epochs.ch_names)

    epochs_ts = epochs.to_nitime(picks=picks2, epochs_idx=None,
                                 collapse=True, copy=True)
    assert_true(epochs_ts.ch_names == [epochs.ch_names[k] for k in picks2])

    epochs_ts = epochs.to_nitime(picks=None, epochs_idx=[0],
                                 collapse=False, copy=False)
    assert_true(epochs_ts.ch_names == epochs.ch_names)

    epochs_ts = epochs.to_nitime(picks=picks2, epochs_idx=None,
                                 collapse=False, copy=False)
    assert_true(epochs_ts.ch_names == [epochs.ch_names[k] for k in picks2])
Example #12
0
def test_psdestimator():
    """Test methods of PSDEstimator
    """
    raw = io.Raw(raw_fname, preload=False)
    events = read_events(event_name)
    picks = pick_types(
        raw.info, meg=True, stim=False, ecg=False, eog=False, exclude='bads')
    picks = picks[1:13:3]
    epochs = Epochs(
        raw,
        events,
        event_id,
        tmin,
        tmax,
        picks=picks,
        baseline=(None, 0),
        preload=True)
    epochs_data = epochs.get_data()
    psd = PSDEstimator(2 * np.pi, 0, np.inf)
    y = epochs.events[:, -1]
    X = psd.fit_transform(epochs_data, y)

    assert_true(X.shape[0] == epochs_data.shape[0])
    assert_array_equal(psd.fit(epochs_data, y).transform(epochs_data), X)

    # Test init exception
    assert_raises(ValueError, psd.fit, epochs, y)
    assert_raises(ValueError, psd.transform, epochs, y)
Example #13
0
def test_cov_mismatch():
    """Test estimation with MEG<->Head mismatch."""
    raw = read_raw_fif(raw_fname).crop(0, 5).load_data()
    events = find_events(raw, stim_channel='STI 014')
    raw.pick_channels(raw.ch_names[:5])
    raw.add_proj([], remove_existing=True)
    epochs = Epochs(raw, events, None, tmin=-0.2, tmax=0., preload=True)
    for kind in ('shift', 'None'):
        epochs_2 = epochs.copy()
        # This should be fine
        with warnings.catch_warnings(record=True) as w:
            compute_covariance([epochs, epochs_2])
            assert_equal(len(w), 0)
            if kind == 'shift':
                epochs_2.info['dev_head_t']['trans'][:3, 3] += 0.001
            else:  # None
                epochs_2.info['dev_head_t'] = None
            assert_raises(ValueError, compute_covariance, [epochs, epochs_2])
            assert_equal(len(w), 0)
            compute_covariance([epochs, epochs_2], on_mismatch='ignore')
            assert_equal(len(w), 0)
            compute_covariance([epochs, epochs_2], on_mismatch='warn')
            assert_raises(ValueError, compute_covariance, epochs,
                          on_mismatch='x')
        assert_true(any('transform mismatch' in str(ww.message) for ww in w))
    # This should work
    epochs.info['dev_head_t'] = None
    epochs_2.info['dev_head_t'] = None
    compute_covariance([epochs, epochs_2], method=None)
Example #14
0
def test_csp():
    """Test Common Spatial Patterns algorithm on epochs
    """
    raw = fiff.Raw(raw_fname, preload=False)
    events = read_events(event_name)
    picks = fiff.pick_types(raw.info, meg=True, stim=False, ecg=False,
                            eog=False, exclude='bads')
    picks = picks[1:13:3]
    epochs = Epochs(raw, events, event_id, tmin, tmax, picks=picks,
                    baseline=(None, 0), preload=True)
    epochs_data = epochs.get_data()
    n_channels = epochs_data.shape[1]

    n_components = 3
    csp = CSP(n_components=n_components)

    csp.fit(epochs_data, epochs.events[:, -1])
    y = epochs.events[:, -1]
    X = csp.fit_transform(epochs_data, y)
    assert_true(csp.filters_.shape == (n_channels, n_channels))
    assert_true(csp.patterns_.shape == (n_channels, n_channels))
    assert_array_almost_equal(csp.fit(epochs_data, y).transform(epochs_data),
                              X)

    # test init exception
    assert_raises(ValueError, csp.fit, epochs_data,
                  np.zeros_like(epochs.events))
    assert_raises(ValueError, csp.fit, epochs, y)
    assert_raises(ValueError, csp.transform, epochs, y)

    csp.n_components = n_components
    sources = csp.transform(epochs_data)
    assert_true(sources.shape[1] == n_components)
def test_xdawn_apply_transform():
    """Test Xdawn apply and transform."""
    # Get data
    raw, events, picks = _get_data()
    raw.pick_types(eeg=True, meg=False)
    epochs = Epochs(raw, events, event_id, tmin, tmax, proj=False,
                    preload=True, baseline=None,
                    verbose=False)
    n_components = 2
    # Fit Xdawn
    xd = Xdawn(n_components=n_components, correct_overlap=False)
    xd.fit(epochs)

    # Apply on different types of instances
    for inst in [raw, epochs.average(), epochs]:
        denoise = xd.apply(inst)
    # Apply on other thing should raise an error
    assert_raises(ValueError, xd.apply, 42)

    # Transform on epochs
    xd.transform(epochs)
    # Transform on ndarray
    xd.transform(epochs._data)
    # Transform on someting else
    assert_raises(ValueError, xd.transform, 42)

    # Check numerical results with shuffled epochs
    np.random.seed(0)  # random makes unstable linalg
    idx = np.arange(len(epochs))
    np.random.shuffle(idx)
    xd.fit(epochs[idx])
    denoise_shfl = xd.apply(epochs)
    assert_array_almost_equal(denoise['cond2']._data,
                              denoise_shfl['cond2']._data)
Example #16
0
def test_concatenatechannels():
    """Test methods of ConcatenateChannels
    """
    raw = fiff.Raw(raw_fname, preload=False)
    events = read_events(event_name)
    picks = fiff.pick_types(raw.info, meg=True, stim=False, ecg=False,
                            eog=False, exclude='bads')
    picks = picks[1:13:3]
    with warnings.catch_warnings(record=True) as w:
        epochs = Epochs(raw, events, event_id, tmin, tmax, picks=picks,
                        baseline=(None, 0), preload=True)
    epochs_data = epochs.get_data()
    concat = ConcatenateChannels(epochs.info)
    y = epochs.events[:, -1]
    X = concat.fit_transform(epochs_data, y)

    # Check data dimensions
    assert_true(X.shape[0] == epochs_data.shape[0])
    assert_true(X.shape[1] == epochs_data.shape[1] * epochs_data.shape[2])

    assert_array_equal(concat.fit(epochs_data, y).transform(epochs_data), X)

    # Check if data is preserved
    n_times = epochs_data.shape[2]
    assert_array_equal(epochs_data[0, 0, 0:n_times], X[0, 0:n_times])

    # Test init exception
    assert_raises(ValueError, concat.fit, epochs, y)
    assert_raises(ValueError, concat.transform, epochs, y)
Example #17
0
def test_regularized_csp():
    """Test Common Spatial Patterns algorithm using regularized covariance."""
    raw = io.read_raw_fif(raw_fname)
    events = read_events(event_name)
    picks = pick_types(raw.info, meg=True, stim=False, ecg=False,
                       eog=False, exclude='bads')
    picks = picks[1:13:3]
    epochs = Epochs(raw, events, event_id, tmin, tmax, picks=picks,
                    baseline=(None, 0), preload=True)
    epochs_data = epochs.get_data()
    n_channels = epochs_data.shape[1]

    n_components = 3
    reg_cov = [None, 0.05, 'ledoit_wolf', 'oas']
    for reg in reg_cov:
        csp = CSP(n_components=n_components, reg=reg, norm_trace=False)
        csp.fit(epochs_data, epochs.events[:, -1])
        y = epochs.events[:, -1]
        X = csp.fit_transform(epochs_data, y)
        assert_true(csp.filters_.shape == (n_channels, n_channels))
        assert_true(csp.patterns_.shape == (n_channels, n_channels))
        assert_array_almost_equal(csp.fit(epochs_data, y).
                                  transform(epochs_data), X)

        # test init exception
        assert_raises(ValueError, csp.fit, epochs_data,
                      np.zeros_like(epochs.events))
        assert_raises(ValueError, csp.fit, epochs, y)
        assert_raises(ValueError, csp.transform, epochs)

        csp.n_components = n_components
        sources = csp.transform(epochs_data)
        assert_true(sources.shape[1] == n_components)
Example #18
0
def test_scaler():
    """Test methods of Scaler
    """
    raw = fiff.Raw(raw_fname, preload=False)
    events = read_events(event_name)
    picks = fiff.pick_types(raw.info, meg=True, stim=False, ecg=False,
                            eog=False, exclude='bads')
    picks = picks[1:13:3]

    epochs = Epochs(raw, events, event_id, tmin, tmax, picks=picks,
                    baseline=(None, 0), preload=True)
    epochs_data = epochs.get_data()
    scaler = Scaler(epochs.info)
    y = epochs.events[:, -1]

    # np invalid divide value warnings
    with warnings.catch_warnings(record=True):
        X = scaler.fit_transform(epochs_data, y)
        assert_true(X.shape == epochs_data.shape)
        X2 = scaler.fit(epochs_data, y).transform(epochs_data)

    assert_array_equal(X2, X)

    # Test init exception
    assert_raises(ValueError, scaler.fit, epochs, y)
    assert_raises(ValueError, scaler.transform, epochs, y)
Example #19
0
def test_xdawn_apply_transform():
    """Test Xdawn apply and transform."""
    # get data
    raw, events, picks = _get_data()
    epochs = Epochs(raw, events, event_id, tmin, tmax, picks=picks,
                    preload=True, baseline=None, verbose=False)
    n_components = 2
    # Fit Xdawn
    xd = Xdawn(n_components=n_components, correct_overlap='auto')
    xd.fit(epochs)

    # apply on raw
    xd.apply(raw)
    # apply on epochs
    xd.apply(epochs)
    # apply on evoked
    xd.apply(epochs.average())
    # apply on other thing should raise an error
    assert_raises(ValueError, xd.apply, 42)

    # transform on epochs
    xd.transform(epochs)
    # transform on ndarray
    xd.transform(epochs._data)
    # transform on someting else
    assert_raises(ValueError, xd.transform, 42)
Example #20
0
def test_evoked_standard_error():
    """Test calculation and read/write of standard error
    """
    epochs = Epochs(raw, events[:4], event_id, tmin, tmax, picks=picks,
                    baseline=(None, 0))
    evoked = [epochs.average(), epochs.standard_error()]
    io.write_evokeds(op.join(tempdir, 'evoked.fif'), evoked)
    evoked2 = read_evokeds(op.join(tempdir, 'evoked.fif'), [0, 1])
    evoked3 = [read_evokeds(op.join(tempdir, 'evoked.fif'), 'Unknown'),
               read_evokeds(op.join(tempdir, 'evoked.fif'), 'Unknown',
                            kind='standard_error')]
    for evoked_new in [evoked2, evoked3]:
        assert_true(evoked_new[0]._aspect_kind ==
                    FIFF.FIFFV_ASPECT_AVERAGE)
        assert_true(evoked_new[0].kind == 'average')
        assert_true(evoked_new[1]._aspect_kind ==
                    FIFF.FIFFV_ASPECT_STD_ERR)
        assert_true(evoked_new[1].kind == 'standard_error')
        for ave, ave2 in zip(evoked, evoked_new):
            assert_array_almost_equal(ave.data, ave2.data)
            assert_array_almost_equal(ave.times, ave2.times)
            assert_equal(ave.nave, ave2.nave)
            assert_equal(ave._aspect_kind, ave2._aspect_kind)
            assert_equal(ave.kind, ave2.kind)
            assert_equal(ave.last, ave2.last)
            assert_equal(ave.first, ave2.first)
Example #21
0
def test_acqparser_averaging():
    """Test averaging with AcqParserFIF vs. Elekta software."""
    raw = read_raw_fif(fname_raw_elekta, preload=True)
    acqp = AcqParserFIF(raw.info)
    for cat in acqp.categories:
        # XXX datasets match only when baseline is applied to both,
        # not sure where relative dc shift comes from
        cond = acqp.get_condition(raw, cat)
        eps = Epochs(raw, baseline=(-.05, 0), **cond)
        ev = eps.average()
        ev_ref = read_evokeds(fname_ave_elekta, cat['comment'],
                              baseline=(-.05, 0), proj=False)
        ev_mag = ev.copy()
        ev_mag.pick_channels(['MEG0111'])
        ev_grad = ev.copy()
        ev_grad.pick_channels(['MEG2643', 'MEG1622'])
        ev_ref_mag = ev_ref.copy()
        ev_ref_mag.pick_channels(['MEG0111'])
        ev_ref_grad = ev_ref.copy()
        ev_ref_grad.pick_channels(['MEG2643', 'MEG1622'])
        assert_allclose(ev_mag.data, ev_ref_mag.data,
                        rtol=0, atol=1e-15)  # tol = 1 fT
        # Elekta put these in a different order
        assert ev_grad.ch_names[::-1] == ev_ref_grad.ch_names
        assert_allclose(ev_grad.data[::-1], ev_ref_grad.data,
                        rtol=0, atol=1e-13)  # tol = 1 fT/cm
Example #22
0
def test_compute_proj_epochs():
    """Test SSP computation on epochs"""
    event_id, tmin, tmax = 1, -0.2, 0.3

    raw = Raw(raw_fname, preload=True)
    events = read_events(event_fname)
    bad_ch = 'MEG 2443'
    picks = pick_types(raw.info, meg=True, eeg=False, stim=False, eog=False,
                       exclude=[])
    epochs = Epochs(raw, events, event_id, tmin, tmax, picks=picks,
                    baseline=None, proj=False)

    evoked = epochs.average()
    projs = compute_proj_epochs(epochs, n_grad=1, n_mag=1, n_eeg=0, n_jobs=1)
    write_proj(op.join(tempdir, 'proj.fif.gz'), projs)
    for p_fname in [proj_fname, proj_gz_fname,
                    op.join(tempdir, 'proj.fif.gz')]:
        projs2 = read_proj(p_fname)

        assert_true(len(projs) == len(projs2))

        for p1, p2 in zip(projs, projs2):
            assert_true(p1['desc'] == p2['desc'])
            assert_true(p1['data']['col_names'] == p2['data']['col_names'])
            assert_true(p1['active'] == p2['active'])
            # compare with sign invariance
            p1_data = p1['data']['data'] * np.sign(p1['data']['data'][0, 0])
            p2_data = p2['data']['data'] * np.sign(p2['data']['data'][0, 0])
            if bad_ch in p1['data']['col_names']:
                bad = p1['data']['col_names'].index('MEG 2443')
                mask = np.ones(p1_data.size, dtype=np.bool)
                mask[bad] = False
                p1_data = p1_data[:, mask]
                p2_data = p2_data[:, mask]
            corr = np.corrcoef(p1_data, p2_data)[0, 1]
            assert_array_almost_equal(corr, 1.0, 5)

    # test that you can compute the projection matrix
    projs = activate_proj(projs)
    proj, nproj, U = make_projector(projs, epochs.ch_names, bads=[])

    assert_true(nproj == 2)
    assert_true(U.shape[1] == 2)

    # test that you can save them
    epochs.info['projs'] += projs
    evoked = epochs.average()
    evoked.save(op.join(tempdir, 'foo.fif'))

    projs = read_proj(proj_fname)

    projs_evoked = compute_proj_evoked(evoked, n_grad=1, n_mag=1, n_eeg=0)
    assert_true(len(projs_evoked) == 2)
    # XXX : test something

    # test parallelization
    projs = compute_proj_epochs(epochs, n_grad=1, n_mag=1, n_eeg=0, n_jobs=2)
    projs = activate_proj(projs)
    proj_par, _, _ = make_projector(projs, epochs.ch_names, bads=[])
    assert_allclose(proj, proj_par, rtol=1e-8, atol=1e-16)
Example #23
0
def test_ems():
    """Test event-matched spatial filters"""
    raw = io.read_raw_fif(raw_fname, preload=False)

    # create unequal number of events
    events = read_events(event_name)
    events[-2, 2] = 3
    picks = pick_types(raw.info, meg=True, stim=False, ecg=False,
                       eog=False, exclude='bads')
    picks = picks[1:13:3]
    epochs = Epochs(raw, events, event_id, tmin, tmax, picks=picks,
                    baseline=(None, 0), preload=True)
    assert_raises(ValueError, compute_ems, epochs, ['aud_l', 'vis_l'])
    epochs = epochs.equalize_event_counts(epochs.event_id, copy=False)[0]

    assert_raises(KeyError, compute_ems, epochs, ['blah', 'hahah'])
    surrogates, filters, conditions = compute_ems(epochs)
    assert_equal(list(set(conditions)), [1, 3])

    events = read_events(event_name)
    event_id2 = dict(aud_l=1, aud_r=2, vis_l=3)
    epochs = Epochs(raw, events, event_id2, tmin, tmax, picks=picks,
                    baseline=(None, 0), preload=True)
    epochs = epochs.equalize_event_counts(epochs.event_id, copy=False)[0]

    n_expected = sum([len(epochs[k]) for k in ['aud_l', 'vis_l']])

    assert_raises(ValueError, compute_ems, epochs)
    surrogates, filters, conditions = compute_ems(epochs, ['aud_r', 'vis_l'])
    assert_equal(n_expected, len(surrogates))
    assert_equal(n_expected, len(conditions))
    assert_equal(list(set(conditions)), [2, 3])
    raw.close()
Example #24
0
def test_filterestimator():
    """Test methods of FilterEstimator
    """
    raw = io.Raw(raw_fname, preload=False)
    events = read_events(event_name)
    picks = pick_types(raw.info, meg=True, stim=False, ecg=False, eog=False, exclude="bads")
    picks = picks[1:13:3]
    epochs = Epochs(raw, events, event_id, tmin, tmax, picks=picks, baseline=(None, 0), preload=True)
    epochs_data = epochs.get_data()

    # Add tests for different combinations of l_freq and h_freq
    filt = FilterEstimator(epochs.info, l_freq=1, h_freq=40)
    y = epochs.events[:, -1]
    with warnings.catch_warnings(record=True):  # stop freq attenuation warning
        X = filt.fit_transform(epochs_data, y)
        assert_true(X.shape == epochs_data.shape)
        assert_array_equal(filt.fit(epochs_data, y).transform(epochs_data), X)

    filt = FilterEstimator(epochs.info, l_freq=0, h_freq=40)
    y = epochs.events[:, -1]
    with warnings.catch_warnings(record=True):  # stop freq attenuation warning
        X = filt.fit_transform(epochs_data, y)

    filt = FilterEstimator(epochs.info, l_freq=1, h_freq=1)
    y = epochs.events[:, -1]
    with warnings.catch_warnings(record=True):  # stop freq attenuation warning
        assert_raises(ValueError, filt.fit_transform, epochs_data, y)

    filt = FilterEstimator(epochs.info, l_freq=1, h_freq=None)
    with warnings.catch_warnings(record=True):  # stop freq attenuation warning
        X = filt.fit_transform(epochs_data, y)

    # Test init exception
    assert_raises(ValueError, filt.fit, epochs, y)
    assert_raises(ValueError, filt.transform, epochs, y)
def test_xdawn_regularization():
    """Test Xdawn with regularization."""
    # Get data
    raw, events, picks = _get_data()
    epochs = Epochs(raw, events, event_id, tmin, tmax, picks=picks,
                    preload=True, baseline=None, verbose=False)

    # Test with overlapping events.
    # modify events to simulate one overlap
    events = epochs.events
    sel = np.where(events[:, 2] == 2)[0][:2]
    modified_event = events[sel[0]]
    modified_event[0] += 1
    epochs.events[sel[1]] = modified_event
    # Fit and check that overlap was found and applied
    xd = Xdawn(n_components=2, correct_overlap='auto', reg='oas')
    xd.fit(epochs)
    assert_equal(xd.correct_overlap_, True)
    evoked = epochs['cond2'].average()
    assert_true(np.sum(np.abs(evoked.data - xd.evokeds_['cond2'].data)))

    # With covariance regularization
    for reg in [.1, 0.1, 'ledoit_wolf', 'oas']:
        xd = Xdawn(n_components=2, correct_overlap=False,
                   signal_cov=np.eye(len(picks)), reg=reg)
        xd.fit(epochs)
    # With bad shrinkage
    xd = Xdawn(n_components=2, correct_overlap=False,
               signal_cov=np.eye(len(picks)), reg=2)
    assert_raises(ValueError, xd.fit, epochs)
Example #26
0
def test_epochs_vectorizer():
    """Test methods of EpochsVectorizer
    """
    raw = io.Raw(raw_fname, preload=False)
    events = read_events(event_name)
    picks = pick_types(raw.info, meg=True, stim=False, ecg=False, eog=False, exclude="bads")
    picks = picks[1:13:3]
    with warnings.catch_warnings(record=True):
        epochs = Epochs(raw, events, event_id, tmin, tmax, picks=picks, baseline=(None, 0), preload=True)
    epochs_data = epochs.get_data()
    vector = EpochsVectorizer(epochs.info)
    y = epochs.events[:, -1]
    X = vector.fit_transform(epochs_data, y)

    # Check data dimensions
    assert_true(X.shape[0] == epochs_data.shape[0])
    assert_true(X.shape[1] == epochs_data.shape[1] * epochs_data.shape[2])

    assert_array_equal(vector.fit(epochs_data, y).transform(epochs_data), X)

    # Check if data is preserved
    n_times = epochs_data.shape[2]
    assert_array_equal(epochs_data[0, 0, 0:n_times], X[0, 0:n_times])

    # Check inverse transform
    Xi = vector.inverse_transform(X, y)
    assert_true(Xi.shape[0] == epochs_data.shape[0])
    assert_true(Xi.shape[1] == epochs_data.shape[1])
    assert_array_equal(epochs_data[0, 0, 0:n_times], Xi[0, 0, 0:n_times])

    # Test init exception
    assert_raises(ValueError, vector.fit, epochs, y)
    assert_raises(ValueError, vector.transform, epochs, y)
def test_epochs_vector_inverse():
    """Test vector inverse consistency between evoked and epochs."""
    raw = read_raw_fif(fname_raw)
    events = find_events(raw, stim_channel='STI 014')[:2]
    reject = dict(grad=2000e-13, mag=4e-12, eog=150e-6)

    epochs = Epochs(raw, events, None, 0, 0.01, baseline=None,
                    reject=reject, preload=True)

    assert_equal(len(epochs), 2)

    evoked = epochs.average(picks=range(len(epochs.ch_names)))

    inv = read_inverse_operator(fname_inv)

    method = "MNE"
    snr = 3.
    lambda2 = 1. / snr ** 2

    stcs_epo = apply_inverse_epochs(epochs, inv, lambda2, method=method,
                                    pick_ori='vector', return_generator=False)
    stc_epo = np.mean(stcs_epo)

    stc_evo = apply_inverse(evoked, inv, lambda2, method=method,
                            pick_ori='vector')

    assert_allclose(stc_epo.data, stc_evo.data, rtol=1e-9, atol=0)
Example #28
0
def test_info():
    """Test info object"""
    raw = io.Raw(raw_fname)
    event_id, tmin, tmax = 1, -0.2, 0.5
    events = read_events(event_name)
    event_id = int(events[0, 2])
    epochs = Epochs(raw, events[:1], event_id, tmin, tmax, picks=None,
                    baseline=(None, 0))

    evoked = epochs.average()

    events = read_events(event_name)

    # Test subclassing was successful.
    info = Info(a=7, b='aaaaa')
    assert_true('a' in info)
    assert_true('b' in info)
    info[42] = 'foo'
    assert_true(info[42] == 'foo')

    # test info attribute in API objects
    for obj in [raw, epochs, evoked]:
        assert_true(isinstance(obj.info, Info))
        info_str = '%s' % obj.info
        assert_equal(len(info_str.split('\n')), (len(obj.info.keys()) + 2))
        assert_true(all(k in info_str for k in obj.info.keys()))
Example #29
0
def test_events_long():
    """Test events."""
    data_path = testing.data_path()
    raw_fname = data_path + '/MEG/sample/sample_audvis_trunc_raw.fif'
    raw = read_raw_fif(raw_fname, preload=True)
    raw_tmin, raw_tmax = 0, 90

    tmin, tmax = -0.2, 0.5
    event_id = dict(aud_l=1, vis_l=3)

    # select gradiometers
    picks = pick_types(raw.info, meg='grad', eeg=False, eog=True,
                       stim=True, exclude=raw.info['bads'])

    # load data with usual Epochs for later verification
    raw = concatenate_raws([raw, raw.copy(), raw.copy(), raw.copy(),
                            raw.copy(), raw.copy()])
    assert 110 < raw.times[-1] < 130
    raw_cropped = raw.copy().crop(raw_tmin, raw_tmax)
    events_offline = find_events(raw_cropped)
    epochs_offline = Epochs(raw_cropped, events_offline, event_id=event_id,
                            tmin=tmin, tmax=tmax, picks=picks, decim=1,
                            reject=dict(grad=4000e-13, eog=150e-6),
                            baseline=None)
    epochs_offline.drop_bad()

    # create the mock-client object
    rt_client = MockRtClient(raw)
    rt_epochs = RtEpochs(rt_client, event_id, tmin, tmax, picks=picks, decim=1,
                         reject=dict(grad=4000e-13, eog=150e-6), baseline=None,
                         isi_max=1.)

    rt_epochs.start()
    rt_client.send_data(rt_epochs, picks, tmin=raw_tmin, tmax=raw_tmax,
                        buffer_size=1000)

    expected_events = epochs_offline.events.copy()
    expected_events[:, 0] = expected_events[:, 0] - raw_cropped.first_samp
    assert np.all(expected_events[:, 0] <=
                  (raw_tmax - tmax) * raw.info['sfreq'])
    assert_array_equal(rt_epochs.events, expected_events)
    assert len(rt_epochs) == len(epochs_offline)

    data_picks = pick_types(epochs_offline.info, meg='grad', eeg=False,
                            eog=True,
                            stim=False, exclude=raw.info['bads'])

    for ev_num, ev in enumerate(rt_epochs.iter_evoked()):
        if ev_num == 0:
            X_rt = ev.data[None, data_picks, :]
            y_rt = int(ev.comment)  # comment attribute contains the event_id
        else:
            X_rt = np.concatenate((X_rt, ev.data[None, data_picks, :]), axis=0)
            y_rt = np.append(y_rt, int(ev.comment))

    X_offline = epochs_offline.get_data()[:, data_picks, :]
    y_offline = epochs_offline.events[:, 2]
    assert_array_equal(X_rt, X_offline)
    assert_array_equal(y_rt, y_offline)
Example #30
0
def test_access_by_name():
    """Test accessing epochs by event name and on_missing for rare events
    """
    assert_raises(ValueError, Epochs, raw, events, {1: 42, 2: 42}, tmin,
                  tmax, picks=picks)
    assert_raises(ValueError, Epochs, raw, events, {'a': 'spam', 2: 'eggs'},
                  tmin, tmax, picks=picks)
    assert_raises(ValueError, Epochs, raw, events, {'a': 'spam', 2: 'eggs'},
                  tmin, tmax, picks=picks)
    assert_raises(ValueError, Epochs, raw, events, 'foo', tmin, tmax,
                  picks=picks)
    # Test accessing non-existent events (assumes 12345678 does not exist)
    event_id_illegal = dict(aud_l=1, does_not_exist=12345678)
    assert_raises(ValueError, Epochs, raw, events, event_id_illegal,
                  tmin, tmax)
    # Test on_missing
    assert_raises(ValueError, Epochs, raw, events, 1, tmin, tmax,
                  on_missing='foo')
    with warnings.catch_warnings(record=True) as w:
        warnings.simplefilter('always')
        Epochs(raw, events, event_id_illegal, tmin, tmax, on_missing='warning')
        nw = len(w)
        assert_true(1 <= nw <= 2)
        Epochs(raw, events, event_id_illegal, tmin, tmax, on_missing='ignore')
        assert_equal(len(w), nw)
    epochs = Epochs(raw, events, {'a': 1, 'b': 2}, tmin, tmax, picks=picks)
    assert_raises(KeyError, epochs.__getitem__, 'bar')

    data = epochs['a'].get_data()
    event_a = events[events[:, 2] == 1]
    assert_true(len(data) == len(event_a))

    epochs = Epochs(raw, events, {'a': 1, 'b': 2}, tmin, tmax, picks=picks,
                    preload=True)
    assert_raises(KeyError, epochs.__getitem__, 'bar')
    epochs.save(op.join(tempdir, 'test-epo.fif'))
    epochs2 = read_epochs(op.join(tempdir, 'test-epo.fif'))

    for ep in [epochs, epochs2]:
        data = ep['a'].get_data()
        event_a = events[events[:, 2] == 1]
        assert_true(len(data) == len(event_a))

    assert_array_equal(epochs2['a'].events, epochs['a'].events)

    epochs3 = Epochs(raw, events, {'a': 1, 'b': 2, 'c': 3, 'd': 4},
                     tmin, tmax, picks=picks, preload=True)
    assert_equal(list(sorted(epochs3[['a', 'b']].event_id.values())),
                 [1, 2])
    epochs4 = epochs['a']
    epochs5 = epochs3['a']
    assert_array_equal(epochs4.events, epochs5.events)
    # 20 is our tolerance because epochs are written out as floats
    assert_array_almost_equal(epochs4.get_data(), epochs5.get_data(), 20)
    epochs6 = epochs3[['a', 'b']]
    assert_true(all(np.logical_or(epochs6.events[:, 2] == 1,
                                  epochs6.events[:, 2] == 2)))
    assert_array_equal(epochs.events, epochs6.events)
    assert_array_almost_equal(epochs.get_data(), epochs6.get_data(), 20)
Example #31
0
def generate_virtual_epoch(epochs: mne.Epochs,
                           W: np.ndarray,
                           frequency_mean: float = 10,
                           frequency_std: float = 0.2,
                           noise_phase_level: float = 0.005,
                           noise_amplitude_level: float = 0.1) -> mne.Epochs:
    """
    Generate epochs with simulated data using Kuramoto oscillators.

    Arguments:
        epoch: mne.Epochs
          Epochs object to get epoch info structure
        W: np.ndarray
          Coupling matrix between the oscillators
        frequency_mean: float
          Mean of the normal distribution for oscillators frequency
        frequency_std: float
          Standart deviation of the normal distribution for oscillators frequency
        noise_phase_level: float
          Amount of noise at the phase level
        noise_amplitude_level: float
          Amount of noise at the amplitude level

    Returns:
        mne.Epochs
          new epoch with simulated data
    """

    n_epo, n_chan, n_samp = epochs.get_data().shape
    sfreq = epochs.info['sfreq']
    N = int(n_chan / 2)

    Nt = n_samp * n_epo
    tmax = n_samp / sfreq * n_epo  # s
    tv = np.linspace(0., tmax, Nt)

    freq = frequency_mean + frequency_std * np.random.randn(n_chan)
    omega = 2. * np.pi * freq

    def fp(t, p):
        p = np.atleast_2d(p)
        coupling = np.squeeze((np.sin(p) * np.matmul(W,
                                                     np.cos(p).T).T) -
                              (np.cos(p) * np.matmul(W,
                                                     np.sin(p).T).T))
        dotp = omega - coupling + noise_phase_level * np.random.randn(
            n_chan) / n_samp
        return dotp

    p0 = 2 * np.pi * np.block([
        np.zeros(N) + np.random.rand(N) + 0.5,
        np.zeros(N) + np.random.rand(N) + 0.5
    ])  # initialization
    ans = solve_ivp(fun=fp, t_span=(tv[0], tv[-1]), y0=p0, t_eval=tv)
    phi = ans['y'].T % (2 * np.pi)

    eeg = np.sin(phi) + noise_amplitude_level * np.random.randn(*phi.shape)

    simulation = epochs.copy()
    simulation._data = np.transpose(np.reshape(eeg.T, [n_chan, n_epo, n_samp]),
                                    (1, 0, 2))

    return simulation
Example #32
0
def test_cov_estimation_with_triggers():
    """Test estimation from raw with triggers."""
    tempdir = _TempDir()
    raw = read_raw_fif(raw_fname)
    raw.set_eeg_reference(projection=True).load_data()
    events = find_events(raw, stim_channel='STI 014')
    event_ids = [1, 2, 3, 4]
    reject = dict(grad=10000e-13, mag=4e-12, eeg=80e-6, eog=150e-6)

    # cov with merged events and keep_sample_mean=True
    events_merged = merge_events(events, event_ids, 1234)
    epochs = Epochs(raw, events_merged, 1234, tmin=-0.2, tmax=0,
                    baseline=(-0.2, -0.1), proj=True,
                    reject=reject, preload=True)

    cov = compute_covariance(epochs, keep_sample_mean=True)
    _assert_cov(cov, read_cov(cov_km_fname))

    # Test with tmin and tmax (different but not too much)
    cov_tmin_tmax = compute_covariance(epochs, tmin=-0.19, tmax=-0.01)
    assert_true(np.all(cov.data != cov_tmin_tmax.data))
    err = (linalg.norm(cov.data - cov_tmin_tmax.data, ord='fro') /
           linalg.norm(cov_tmin_tmax.data, ord='fro'))
    assert_true(err < 0.05, msg=err)

    # cov using a list of epochs and keep_sample_mean=True
    epochs = [Epochs(raw, events, ev_id, tmin=-0.2, tmax=0,
              baseline=(-0.2, -0.1), proj=True, reject=reject)
              for ev_id in event_ids]
    cov2 = compute_covariance(epochs, keep_sample_mean=True)
    assert_array_almost_equal(cov.data, cov2.data)
    assert_true(cov.ch_names == cov2.ch_names)

    # cov with keep_sample_mean=False using a list of epochs
    cov = compute_covariance(epochs, keep_sample_mean=False)
    _assert_cov(cov, read_cov(cov_fname), nfree=False)

    method_params = {'empirical': {'assume_centered': False}}
    assert_raises(ValueError, compute_covariance, epochs,
                  keep_sample_mean=False, method_params=method_params)
    assert_raises(ValueError, compute_covariance, epochs,
                  keep_sample_mean=False, method='factor_analysis')

    # test IO when computation done in Python
    cov.save(op.join(tempdir, 'test-cov.fif'))  # test saving
    cov_read = read_cov(op.join(tempdir, 'test-cov.fif'))
    _assert_cov(cov, cov_read, 1e-5)

    # cov with list of epochs with different projectors
    epochs = [Epochs(raw, events[:1], None, tmin=-0.2, tmax=0,
                     baseline=(-0.2, -0.1), proj=True),
              Epochs(raw, events[:1], None, tmin=-0.2, tmax=0,
                     baseline=(-0.2, -0.1), proj=False)]
    # these should fail
    assert_raises(ValueError, compute_covariance, epochs)
    assert_raises(ValueError, compute_covariance, epochs, projs=None)
    # these should work, but won't be equal to above
    with warnings.catch_warnings(record=True) as w:  # too few samples warning
        warnings.simplefilter('always')
        cov = compute_covariance(epochs, projs=epochs[0].info['projs'])
        cov = compute_covariance(epochs, projs=[])
    assert_equal(len(w), 2)

    # test new dict support
    epochs = Epochs(raw, events, dict(a=1, b=2, c=3, d=4), tmin=-0.01, tmax=0,
                    proj=True, reject=reject, preload=True)
    with warnings.catch_warnings(record=True):  # samples
        compute_covariance(epochs)

        # projs checking
        compute_covariance(epochs, projs=[])
    assert_raises(TypeError, compute_covariance, epochs, projs='foo')
    assert_raises(TypeError, compute_covariance, epochs, projs=['foo'])
Example #33
0
events = find_events(raw, shortest_event=0, stim_channel='STI 014')

picks = pick_types(raw.info,
                   meg=False,
                   eeg=True,
                   stim=False,
                   eog=False,
                   exclude='bads')

# Read epochs (train will be done only between 1 and 2s)
# Testing will be done with a running classifier
epochs = Epochs(raw,
                events,
                event_id,
                tmin,
                tmax,
                proj=True,
                picks=picks,
                baseline=None,
                preload=True,
                add_eeg_ref=False)
epochs_train = epochs.crop(tmin=1., tmax=2., copy=True)
labels = epochs.events[:, -1] - 2

###############################################################################
# Classification with linear discrimant analysis

from sklearn.lda import LDA  # noqa
from sklearn.cross_validation import ShuffleSplit  # noqa

# Assemble a classifier
svc = LDA()
Example #34
0
HI_FREQ = 32.
WEIGHT_PATH = f"weights"
CONFIDENCE = 0.66


### script start
# _compX, _compY = epoch_comp(prep_comp(load_comp(True), comp_channel_map3, GOODS, l_freq=LO_FREQ, h_freq=HI_FREQ), CLASSES, resample=RESAMPLE, trange=T_RANGE)
_pilotX, _pilotY = epoch_pilot(load_pilot('data/rivet/raw/pilot2/BCI_imaginedmoves_3class_7-4-21.vhdr'), CLASSES, GOODS, resample=RESAMPLE, trange=T_RANGE, l_freq=LO_FREQ, h_freq=HI_FREQ)

from mne import pick_types, Epochs, events_from_annotations, create_info
from mne.io import RawArray
from integration import stream_channels, GOODS
debug_pilot = load_pilot('data/rivet/raw/pilot2/BCI_imaginedmoves_3class_7-4-21.vhdr')
events, event_id = events_from_annotations(debug_pilot, event_id={'Stimulus/left': 0, 'Stimulus/right': 1, 'Stimulus/feet': 2})
picks = pick_types(debug_pilot.info, meg=False, eeg=True, stim=False, eog=False)
epochs = Epochs(debug_pilot, events, event_id, proj=False, picks=picks, baseline=None, preload=True, verbose=False, tmin=-1.5, tmax=2.5)
debug_data = epochs.get_data()
debug_data = debug_data[:,:,:-1]

# print(_pilotX[0].shape)
# print(debug_data[0].shape)
# print(debug_data[0])
# print()

stream_info = create_info(debug_pilot.ch_names[:-3], 500, 'eeg')
signal = debug_data[4]
raw = RawArray(data=signal, info=stream_info)
raw = raw.filter(LO_FREQ, HI_FREQ, method='fir', fir_design='firwin', phase='zero')
raw = raw.crop(tmin=2.)
raw = raw.resample(125)
realtime = raw.get_data(picks=sorted(GOODS))*1000
Example #35
0
def test_add_reference():
    """Test adding a reference."""
    raw = read_raw_fif(fif_fname, preload=True)
    picks_eeg = pick_types(raw.info, meg=False, eeg=True)
    # check if channel already exists
    pytest.raises(ValueError, add_reference_channels, raw,
                  raw.info['ch_names'][0])
    # add reference channel to Raw
    raw_ref = add_reference_channels(raw, 'Ref', copy=True)
    assert_equal(raw_ref._data.shape[0], raw._data.shape[0] + 1)
    assert_array_equal(raw._data[picks_eeg, :], raw_ref._data[picks_eeg, :])
    _check_channel_names(raw_ref, 'Ref')

    orig_nchan = raw.info['nchan']
    raw = add_reference_channels(raw, 'Ref', copy=False)
    assert_array_equal(raw._data, raw_ref._data)
    assert_equal(raw.info['nchan'], orig_nchan + 1)
    _check_channel_names(raw, 'Ref')

    # for Neuromag fif's, the reference electrode location is placed in
    # elements [3:6] of each "data" electrode location
    assert_allclose(raw.info['chs'][-1]['loc'][:3],
                    raw.info['chs'][picks_eeg[0]]['loc'][3:6], 1e-6)

    ref_idx = raw.ch_names.index('Ref')
    ref_data, _ = raw[ref_idx]
    assert_array_equal(ref_data, 0)

    # add reference channel to Raw when no digitization points exist
    raw = read_raw_fif(fif_fname).crop(0, 1).load_data()
    picks_eeg = pick_types(raw.info, meg=False, eeg=True)
    del raw.info['dig']

    raw_ref = add_reference_channels(raw, 'Ref', copy=True)

    assert_equal(raw_ref._data.shape[0], raw._data.shape[0] + 1)
    assert_array_equal(raw._data[picks_eeg, :], raw_ref._data[picks_eeg, :])
    _check_channel_names(raw_ref, 'Ref')

    orig_nchan = raw.info['nchan']
    raw = add_reference_channels(raw, 'Ref', copy=False)
    assert_array_equal(raw._data, raw_ref._data)
    assert_equal(raw.info['nchan'], orig_nchan + 1)
    _check_channel_names(raw, 'Ref')

    # Test adding an existing channel as reference channel
    pytest.raises(ValueError, add_reference_channels, raw,
                  raw.info['ch_names'][0])

    # add two reference channels to Raw
    raw_ref = add_reference_channels(raw, ['M1', 'M2'], copy=True)
    _check_channel_names(raw_ref, ['M1', 'M2'])
    assert_equal(raw_ref._data.shape[0], raw._data.shape[0] + 2)
    assert_array_equal(raw._data[picks_eeg, :], raw_ref._data[picks_eeg, :])
    assert_array_equal(raw_ref._data[-2:, :], 0)

    raw = add_reference_channels(raw, ['M1', 'M2'], copy=False)
    _check_channel_names(raw, ['M1', 'M2'])
    ref_idx = raw.ch_names.index('M1')
    ref_idy = raw.ch_names.index('M2')
    ref_data, _ = raw[[ref_idx, ref_idy]]
    assert_array_equal(ref_data, 0)

    # add reference channel to epochs
    raw = read_raw_fif(fif_fname, preload=True)
    events = read_events(eve_fname)
    picks_eeg = pick_types(raw.info, meg=False, eeg=True)
    epochs = Epochs(raw,
                    events=events,
                    event_id=1,
                    tmin=-0.2,
                    tmax=0.5,
                    picks=picks_eeg,
                    preload=True)
    # default: proj=True, after which adding a Ref channel is prohibited
    pytest.raises(RuntimeError, add_reference_channels, epochs, 'Ref')

    # create epochs in delayed mode, allowing removal of CAR when re-reffing
    epochs = Epochs(raw,
                    events=events,
                    event_id=1,
                    tmin=-0.2,
                    tmax=0.5,
                    picks=picks_eeg,
                    preload=True,
                    proj='delayed')
    epochs_ref = add_reference_channels(epochs, 'Ref', copy=True)

    assert_equal(epochs_ref._data.shape[1], epochs._data.shape[1] + 1)
    _check_channel_names(epochs_ref, 'Ref')
    ref_idx = epochs_ref.ch_names.index('Ref')
    ref_data = epochs_ref.get_data()[:, ref_idx, :]
    assert_array_equal(ref_data, 0)
    picks_eeg = pick_types(epochs.info, meg=False, eeg=True)
    assert_array_equal(epochs.get_data()[:, picks_eeg, :],
                       epochs_ref.get_data()[:, picks_eeg, :])

    # add two reference channels to epochs
    raw = read_raw_fif(fif_fname, preload=True)
    events = read_events(eve_fname)
    picks_eeg = pick_types(raw.info, meg=False, eeg=True)
    # create epochs in delayed mode, allowing removal of CAR when re-reffing
    epochs = Epochs(raw,
                    events=events,
                    event_id=1,
                    tmin=-0.2,
                    tmax=0.5,
                    picks=picks_eeg,
                    preload=True,
                    proj='delayed')
    with pytest.warns(RuntimeWarning, match='reference channels are ignored'):
        epochs_ref = add_reference_channels(epochs, ['M1', 'M2'], copy=True)
    assert_equal(epochs_ref._data.shape[1], epochs._data.shape[1] + 2)
    _check_channel_names(epochs_ref, ['M1', 'M2'])
    ref_idx = epochs_ref.ch_names.index('M1')
    ref_idy = epochs_ref.ch_names.index('M2')
    assert_equal(epochs_ref.info['chs'][ref_idx]['ch_name'], 'M1')
    assert_equal(epochs_ref.info['chs'][ref_idy]['ch_name'], 'M2')
    ref_data = epochs_ref.get_data()[:, [ref_idx, ref_idy], :]
    assert_array_equal(ref_data, 0)
    picks_eeg = pick_types(epochs.info, meg=False, eeg=True)
    assert_array_equal(epochs.get_data()[:, picks_eeg, :],
                       epochs_ref.get_data()[:, picks_eeg, :])

    # add reference channel to evoked
    raw = read_raw_fif(fif_fname, preload=True)
    events = read_events(eve_fname)
    picks_eeg = pick_types(raw.info, meg=False, eeg=True)
    # create epochs in delayed mode, allowing removal of CAR when re-reffing
    epochs = Epochs(raw,
                    events=events,
                    event_id=1,
                    tmin=-0.2,
                    tmax=0.5,
                    picks=picks_eeg,
                    preload=True,
                    proj='delayed')
    evoked = epochs.average()
    evoked_ref = add_reference_channels(evoked, 'Ref', copy=True)
    assert_equal(evoked_ref.data.shape[0], evoked.data.shape[0] + 1)
    _check_channel_names(evoked_ref, 'Ref')
    ref_idx = evoked_ref.ch_names.index('Ref')
    ref_data = evoked_ref.data[ref_idx, :]
    assert_array_equal(ref_data, 0)
    picks_eeg = pick_types(evoked.info, meg=False, eeg=True)
    assert_array_equal(evoked.data[picks_eeg, :],
                       evoked_ref.data[picks_eeg, :])

    # add two reference channels to evoked
    raw = read_raw_fif(fif_fname, preload=True)
    events = read_events(eve_fname)
    picks_eeg = pick_types(raw.info, meg=False, eeg=True)
    # create epochs in delayed mode, allowing removal of CAR when re-reffing
    epochs = Epochs(raw,
                    events=events,
                    event_id=1,
                    tmin=-0.2,
                    tmax=0.5,
                    picks=picks_eeg,
                    preload=True,
                    proj='delayed')
    evoked = epochs.average()
    with pytest.warns(RuntimeWarning, match='reference channels are ignored'):
        evoked_ref = add_reference_channels(evoked, ['M1', 'M2'], copy=True)
    assert_equal(evoked_ref.data.shape[0], evoked.data.shape[0] + 2)
    _check_channel_names(evoked_ref, ['M1', 'M2'])
    ref_idx = evoked_ref.ch_names.index('M1')
    ref_idy = evoked_ref.ch_names.index('M2')
    ref_data = evoked_ref.data[[ref_idx, ref_idy], :]
    assert_array_equal(ref_data, 0)
    picks_eeg = pick_types(evoked.info, meg=False, eeg=True)
    assert_array_equal(evoked.data[picks_eeg, :],
                       evoked_ref.data[picks_eeg, :])

    # Test invalid inputs
    raw = read_raw_fif(fif_fname, preload=False)
    with pytest.raises(RuntimeError, match='loaded'):
        add_reference_channels(raw, ['Ref'])
    raw.load_data()
    with pytest.raises(ValueError, match='Channel.*already.*'):
        add_reference_channels(raw, raw.ch_names[:1])
    with pytest.raises(TypeError, match='instance of'):
        add_reference_channels(raw, 1)
Example #36
0
def test_ica_full_data_recovery():
    """Test recovery of full data when no source is rejected"""
    # Most basic recovery
    raw = Raw(raw_fname).crop(0.5, stop, copy=False)
    raw.load_data()
    events = read_events(event_name)
    picks = pick_types(raw.info,
                       meg=True,
                       stim=False,
                       ecg=False,
                       eog=False,
                       exclude='bads')[:10]
    with warnings.catch_warnings(record=True):  # bad proj
        epochs = Epochs(raw,
                        events[:4],
                        event_id,
                        tmin,
                        tmax,
                        picks=picks,
                        baseline=(None, 0),
                        preload=True)
    evoked = epochs.average()
    n_channels = 5
    data = raw._data[:n_channels].copy()
    data_epochs = epochs.get_data()
    data_evoked = evoked.data
    for method in ['fastica']:
        stuff = [(2, n_channels, True), (2, n_channels // 2, False)]
        for n_components, n_pca_components, ok in stuff:
            ica = ICA(n_components=n_components,
                      max_pca_components=n_pca_components,
                      n_pca_components=n_pca_components,
                      method=method,
                      max_iter=1)
            with warnings.catch_warnings(record=True):
                ica.fit(raw, picks=list(range(n_channels)))
            raw2 = ica.apply(raw.copy(), exclude=[])
            if ok:
                assert_allclose(data[:n_channels],
                                raw2._data[:n_channels],
                                rtol=1e-10,
                                atol=1e-15)
            else:
                diff = np.abs(data[:n_channels] - raw2._data[:n_channels])
                assert_true(np.max(diff) > 1e-14)

            ica = ICA(n_components=n_components,
                      max_pca_components=n_pca_components,
                      n_pca_components=n_pca_components)
            with warnings.catch_warnings(record=True):
                ica.fit(epochs, picks=list(range(n_channels)))
            epochs2 = ica.apply(epochs.copy(), exclude=[])
            data2 = epochs2.get_data()[:, :n_channels]
            if ok:
                assert_allclose(data_epochs[:, :n_channels],
                                data2,
                                rtol=1e-10,
                                atol=1e-15)
            else:
                diff = np.abs(data_epochs[:, :n_channels] - data2)
                assert_true(np.max(diff) > 1e-14)

            evoked2 = ica.apply(evoked.copy(), exclude=[])
            data2 = evoked2.data[:n_channels]
            if ok:
                assert_allclose(data_evoked[:n_channels],
                                data2,
                                rtol=1e-10,
                                atol=1e-15)
            else:
                diff = np.abs(evoked.data[:n_channels] - data2)
                assert_true(np.max(diff) > 1e-14)
    assert_raises(ValueError, ICA, method='pizza-decomposision')
Example #37
0
def test_ica_core(method):
    """Test ICA on raw and epochs."""
    _skip_check_picard(method)
    raw = read_raw_fif(raw_fname).crop(1.5, stop).load_data()

    # XXX. The None cases helped revealing bugs but are time consuming.
    test_cov = read_cov(test_cov_name)
    events = read_events(event_name)
    picks = pick_types(raw.info,
                       meg=True,
                       stim=False,
                       ecg=False,
                       eog=False,
                       exclude='bads')
    epochs = Epochs(raw,
                    events[:4],
                    event_id,
                    tmin,
                    tmax,
                    picks=picks,
                    baseline=(None, 0),
                    preload=True)
    noise_cov = [None, test_cov]
    # removed None cases to speed up...
    n_components = [2, 1.0]  # for future dbg add cases
    max_pca_components = [3]
    picks_ = [picks]
    methods = [method]
    iter_ica_params = product(noise_cov, n_components, max_pca_components,
                              picks_, methods)

    # # test init catchers
    pytest.raises(ValueError, ICA, n_components=3, max_pca_components=2)
    pytest.raises(ValueError, ICA, n_components=2.3, max_pca_components=2)

    # test essential core functionality
    for n_cov, n_comp, max_n, pcks, method in iter_ica_params:
        # Test ICA raw
        ica = ICA(noise_cov=n_cov,
                  n_components=n_comp,
                  max_pca_components=max_n,
                  n_pca_components=max_n,
                  random_state=0,
                  method=method,
                  max_iter=1)
        pytest.raises(ValueError, ica.__contains__, 'mag')

        print(ica)  # to test repr

        # test fit checker
        pytest.raises(RuntimeError, ica.get_sources, raw)
        pytest.raises(RuntimeError, ica.get_sources, epochs)

        # Test error upon empty epochs fitting
        with pytest.raises(RuntimeError, match='none were found'):
            ica.fit(epochs[0:0])

        # test decomposition
        with pytest.warns(UserWarning, match='did not converge'):
            ica.fit(raw, picks=pcks, start=start, stop=stop)
        repr(ica)  # to test repr
        assert ('mag' in ica)  # should now work without error

        # test re-fit
        unmixing1 = ica.unmixing_matrix_
        with pytest.warns(UserWarning, match='did not converge'):
            ica.fit(raw, picks=pcks, start=start, stop=stop)
        assert_array_almost_equal(unmixing1, ica.unmixing_matrix_)

        raw_sources = ica.get_sources(raw)
        # test for #3804
        assert_equal(raw_sources._filenames, [None])
        print(raw_sources)

        # test for gh-6271 (scaling of ICA traces)
        fig = raw_sources.plot()
        assert len(fig.axes[0].lines) in (4, 5, 6)
        for line in fig.axes[0].lines:
            y = line.get_ydata()
            if len(y) > 2:  # actual data, not markers
                assert np.ptp(y) < 15
        plt.close('all')

        sources = raw_sources[:, :][0]
        assert (sources.shape[0] == ica.n_components_)

        # test preload filter
        raw3 = raw.copy()
        raw3.preload = False
        pytest.raises(RuntimeError, ica.apply, raw3, include=[1, 2])

        #######################################################################
        # test epochs decomposition
        ica = ICA(noise_cov=n_cov,
                  n_components=n_comp,
                  max_pca_components=max_n,
                  n_pca_components=max_n,
                  random_state=0,
                  method=method)
        with pytest.warns(None):  # sometimes warns
            ica.fit(epochs, picks=picks)
        data = epochs.get_data()[:, 0, :]
        n_samples = np.prod(data.shape)
        assert_equal(ica.n_samples_, n_samples)
        print(ica)  # to test repr

        sources = ica.get_sources(epochs).get_data()
        assert (sources.shape[1] == ica.n_components_)

        pytest.raises(ValueError,
                      ica.score_sources,
                      epochs,
                      target=np.arange(1))

        # test preload filter
        epochs3 = epochs.copy()
        epochs3.preload = False
        pytest.raises(RuntimeError, ica.apply, epochs3, include=[1, 2])

    # test for bug with whitener updating
    _pre_whitener = ica.pre_whitener_.copy()
    epochs._data[:, 0, 10:15] *= 1e12
    ica.apply(epochs.copy())
    assert_array_equal(_pre_whitener, ica.pre_whitener_)

    # test expl. var threshold leading to empty sel
    ica.n_components = 0.1
    pytest.raises(RuntimeError, ica.fit, epochs)

    offender = 1, 2, 3,
    pytest.raises(ValueError, ica.get_sources, offender)
    pytest.raises(TypeError, ica.fit, offender)
    pytest.raises(TypeError, ica.apply, offender)
Example #38
0
events = find_events(raw, shortest_event=0, stim_channel='STI 014')
picks = pick_types(raw.info,
                   meg=False,
                   eeg=True,
                   stim=False,
                   eog=False,
                   exclude='bads')

raw.filter(7., 35., method='iir', picks=picks)

epochs = Epochs(raw,
                events,
                event_id,
                tmin,
                tmax,
                proj=True,
                picks=picks,
                baseline=None,
                preload=True,
                add_eeg_ref=False,
                verbose=False)
labels = epochs.events[:, -1] - 2

# get epochs
epochs_data = epochs.get_data()

###############################################################################
# Pairwise distance based permutation test
###############################################################################

covest = Covariances()
Example #39
0
def test_csp():
    """Test Common Spatial Patterns algorithm on epochs."""
    raw = io.read_raw_fif(raw_fname, preload=False)
    events = read_events(event_name)
    picks = pick_types(raw.info,
                       meg=True,
                       stim=False,
                       ecg=False,
                       eog=False,
                       exclude='bads')
    picks = picks[2:12:3]  # subselect channels -> disable proj!
    raw.add_proj([], remove_existing=True)
    epochs = Epochs(raw,
                    events,
                    event_id,
                    tmin,
                    tmax,
                    picks=picks,
                    baseline=(None, 0),
                    preload=True,
                    proj=False)
    epochs_data = epochs.get_data()
    n_channels = epochs_data.shape[1]
    y = epochs.events[:, -1]

    # Init
    pytest.raises(ValueError, CSP, n_components='foo', norm_trace=False)
    for reg in ['foo', -0.1, 1.1]:
        csp = CSP(reg=reg, norm_trace=False)
        pytest.raises(ValueError, csp.fit, epochs_data, epochs.events[:, -1])
    for reg in ['oas', 'ledoit_wolf', 0, 0.5, 1.]:
        CSP(reg=reg, norm_trace=False)
    for cov_est in ['foo', None]:
        pytest.raises(ValueError, CSP, cov_est=cov_est, norm_trace=False)
    pytest.raises(ValueError, CSP, norm_trace='foo')
    for cov_est in ['concat', 'epoch']:
        CSP(cov_est=cov_est, norm_trace=False)

    n_components = 3
    # Fit
    for norm_trace in [True, False]:
        csp = CSP(n_components=n_components, norm_trace=norm_trace)
        csp.fit(epochs_data, epochs.events[:, -1])

    assert_equal(len(csp.mean_), n_components)
    assert_equal(len(csp.std_), n_components)

    # Transform
    X = csp.fit_transform(epochs_data, y)
    sources = csp.transform(epochs_data)
    assert (sources.shape[1] == n_components)
    assert (csp.filters_.shape == (n_channels, n_channels))
    assert (csp.patterns_.shape == (n_channels, n_channels))
    assert_array_almost_equal(sources, X)

    # Test data exception
    pytest.raises(ValueError, csp.fit, epochs_data,
                  np.zeros_like(epochs.events))
    pytest.raises(ValueError, csp.fit, epochs, y)
    pytest.raises(ValueError, csp.transform, epochs)

    # Test plots
    epochs.pick_types(meg='mag')
    cmap = ('RdBu', True)
    components = np.arange(n_components)
    for plot in (csp.plot_patterns, csp.plot_filters):
        plot(epochs.info, components=components, res=12, show=False, cmap=cmap)

    # Test with more than 2 classes
    epochs = Epochs(raw,
                    events,
                    tmin=tmin,
                    tmax=tmax,
                    picks=picks,
                    event_id=dict(aud_l=1, aud_r=2, vis_l=3, vis_r=4),
                    baseline=(None, 0),
                    proj=False,
                    preload=True)
    epochs_data = epochs.get_data()
    n_channels = epochs_data.shape[1]

    n_channels = epochs_data.shape[1]
    for cov_est in ['concat', 'epoch']:
        csp = CSP(n_components=n_components, cov_est=cov_est, norm_trace=False)
        csp.fit(epochs_data, epochs.events[:, 2]).transform(epochs_data)
        assert_equal(len(csp._classes), 4)
        assert_array_equal(csp.filters_.shape, [n_channels, n_channels])
        assert_array_equal(csp.patterns_.shape, [n_channels, n_channels])

    # Test average power transform
    n_components = 2
    assert (csp.transform_into == 'average_power')
    feature_shape = [len(epochs_data), n_components]
    X_trans = dict()
    for log in (None, True, False):
        csp = CSP(n_components=n_components, log=log, norm_trace=False)
        assert (csp.log is log)
        Xt = csp.fit_transform(epochs_data, epochs.events[:, 2])
        assert_array_equal(Xt.shape, feature_shape)
        X_trans[str(log)] = Xt
    # log=None => log=True
    assert_array_almost_equal(X_trans['None'], X_trans['True'])
    # Different normalization return different transform
    assert (np.sum((X_trans['True'] - X_trans['False'])**2) > 1.)
    # Check wrong inputs
    pytest.raises(ValueError, CSP, transform_into='average_power', log='foo')

    # Test csp space transform
    csp = CSP(transform_into='csp_space', norm_trace=False)
    assert (csp.transform_into == 'csp_space')
    for log in ('foo', True, False):
        pytest.raises(ValueError,
                      CSP,
                      transform_into='csp_space',
                      log=log,
                      norm_trace=False)
    n_components = 2
    csp = CSP(n_components=n_components,
              transform_into='csp_space',
              norm_trace=False)
    Xt = csp.fit(epochs_data, epochs.events[:, 2]).transform(epochs_data)
    feature_shape = [len(epochs_data), n_components, epochs_data.shape[2]]
    assert_array_equal(Xt.shape, feature_shape)

    # Check mixing matrix on simulated data
    y = np.array([100] * 50 + [1] * 50)
    X, A = simulate_data(y)

    for cov_est in ['concat', 'epoch']:
        # fit csp
        csp = CSP(n_components=1, cov_est=cov_est, norm_trace=False)
        csp.fit(X, y)

        # check the first pattern match the mixing matrix
        # the sign might change
        corr = np.abs(np.corrcoef(csp.patterns_[0, :].T, A[:, 0])[0, 1])
        assert np.abs(corr) > 0.99

        # check output
        out = csp.transform(X)
        corr = np.abs(np.corrcoef(out[:, 0], y)[0, 1])
        assert np.abs(corr) > 0.95
Example #40
0
def test_io_set_raw(fnames, tmpdir):
    """Test importing EEGLAB .set files."""
    tmpdir = str(tmpdir)
    raw_fname, raw_fname_onefile = fnames
    with pytest.warns(RuntimeWarning) as w:
        _test_raw_reader(read_raw_eeglab, input_fname=raw_fname,
                         montage=montage)
        _test_raw_reader(read_raw_eeglab, input_fname=raw_fname_onefile,
                         montage=montage)
    for want in ('Events like', 'consist entirely', 'could not be mapped',
                 'string preload is not supported'):
        assert (any(want in str(ww.message) for ww in w))
    with pytest.warns(RuntimeWarning) as w:
        # test finding events in continuous data
        event_id = {'rt': 1, 'square': 2}
        raw0 = read_raw_eeglab(input_fname=raw_fname, montage=montage,
                               event_id=event_id, preload=True)
        raw1 = read_raw_eeglab(input_fname=raw_fname, montage=montage,
                               event_id=event_id, preload=False)
        raw2 = read_raw_eeglab(input_fname=raw_fname_onefile, montage=montage,
                               event_id=event_id)
        raw3 = read_raw_eeglab(input_fname=raw_fname, montage=montage,
                               event_id=event_id)
        raw4 = read_raw_eeglab(input_fname=raw_fname, montage=montage)
        Epochs(raw0, find_events(raw0), event_id)
        epochs = Epochs(raw1, find_events(raw1), event_id)
        assert_equal(len(find_events(raw4)), 0)  # no events without event_id
        assert_equal(epochs["square"].average().nave, 80)  # 80 with
        assert_array_equal(raw0[:][0], raw1[:][0], raw2[:][0], raw3[:][0])
        assert_array_equal(raw0[:][-1], raw1[:][-1], raw2[:][-1], raw3[:][-1])
        assert_equal(len(w), 4)
        # 1 for preload=False / str with fname_onefile, 3 for dropped events
        raw0.filter(1, None, l_trans_bandwidth='auto', filter_length='auto',
                    phase='zero')  # test that preloading works

    # test that using uint16_codec does not break stuff
    raw0 = read_raw_eeglab(input_fname=raw_fname, montage=montage,
                           event_id=event_id, preload=False,
                           uint16_codec='ascii')

    # test old EEGLAB version event import (read old version)
    eeg = io.loadmat(raw_fname_mat, struct_as_record=False,
                     squeeze_me=True)['EEG']
    for event in eeg.event:  # old version allows integer events
        event.type = 1
    assert_equal(read_events_eeglab(eeg)[-1, -1], 1)
    eeg.event = eeg.event[0]  # single event
    eeg.event.latency = float(eeg.event.latency) - .1  # test rounding
    assert_equal(read_events_eeglab(eeg)[-1, -1], 1)

    # test reading file with one event (read old version)
    eeg = io.loadmat(raw_fname_mat, struct_as_record=False,
                     squeeze_me=True)['EEG']
    one_event_fname = op.join(tmpdir, 'test_one_event.set')
    io.savemat(one_event_fname, {'EEG':
               {'trials': eeg.trials, 'srate': eeg.srate,
                'nbchan': eeg.nbchan, 'data': 'test_one_event.fdt',
                'epoch': eeg.epoch, 'event': eeg.event[0],
                'chanlocs': eeg.chanlocs, 'pnts': eeg.pnts}},
               appendmat=False, oned_as='row')
    shutil.copyfile(op.join(base_dir, 'test_raw.fdt'),
                    one_event_fname.replace('.set', '.fdt'))
    event_id = {eeg.event[0].type: 1}
    test_raw = read_raw_eeglab(input_fname=one_event_fname, montage=montage,
                               event_id=event_id, preload=True)

    # test that sample indices are read python-wise (zero-based)
    assert find_events(test_raw)[0, 0] == round(eeg.event[0].latency) - 1

    # test negative event latencies
    negative_latency_fname = op.join(tmpdir, 'test_negative_latency.set')
    evnts = deepcopy(eeg.event[0])
    evnts.latency = 0
    io.savemat(negative_latency_fname, {'EEG':
               {'trials': eeg.trials, 'srate': eeg.srate,
                'nbchan': eeg.nbchan, 'data': 'test_one_event.fdt',
                'epoch': eeg.epoch, 'event': evnts,
                'chanlocs': eeg.chanlocs, 'pnts': eeg.pnts}},
               appendmat=False, oned_as='row')
    shutil.copyfile(op.join(base_dir, 'test_raw.fdt'),
                    negative_latency_fname.replace('.set', '.fdt'))
    event_id = {eeg.event[0].type: 1}
    with pytest.warns(RuntimeWarning, match="has a sample index of -1."):
        read_raw_eeglab(input_fname=negative_latency_fname, preload=True,
                        event_id=event_id, montage=montage)

    # test overlapping events
    overlap_fname = op.join(tmpdir, 'test_overlap_event.set')
    io.savemat(overlap_fname, {'EEG':
               {'trials': eeg.trials, 'srate': eeg.srate,
                'nbchan': eeg.nbchan, 'data': 'test_overlap_event.fdt',
                'epoch': eeg.epoch, 'event': [eeg.event[0], eeg.event[0]],
                'chanlocs': eeg.chanlocs, 'pnts': eeg.pnts}},
               appendmat=False, oned_as='row')
    shutil.copyfile(op.join(base_dir, 'test_raw.fdt'),
                    overlap_fname.replace('.set', '.fdt'))
    event_id = {'rt': 1, 'square': 2}
    with pytest.warns(RuntimeWarning, match='will be dropped'):
        raw = read_raw_eeglab(input_fname=overlap_fname,
                              montage=montage, event_id=event_id,
                              preload=True)
    events_stimchan = find_events(raw)
    events_read_events_eeglab = read_events_eeglab(overlap_fname, event_id)
    assert (len(events_stimchan) == 1)
    assert (len(events_read_events_eeglab) == 2)

    # test reading file with one channel
    one_chan_fname = op.join(tmpdir, 'test_one_channel.set')
    io.savemat(one_chan_fname, {'EEG':
               {'trials': eeg.trials, 'srate': eeg.srate,
                'nbchan': 1, 'data': np.random.random((1, 3)),
                'epoch': eeg.epoch, 'event': eeg.epoch,
                'chanlocs': {'labels': 'E1', 'Y': -6.6069,
                             'X': 6.3023, 'Z': -2.9423},
                'times': eeg.times[:3], 'pnts': 3}},
               appendmat=False, oned_as='row')
    with pytest.warns(None) as w:
        read_raw_eeglab(input_fname=one_chan_fname, preload=True)
    # no warning for 'no events found'
    assert len(w) == 0

    # test reading file with 3 channels - one without position information
    # first, create chanlocs structured array
    ch_names = ['F3', 'unknown', 'FPz']
    x, y, z = [1., 2., np.nan], [4., 5., np.nan], [7., 8., np.nan]
    dt = [('labels', 'S10'), ('X', 'f8'), ('Y', 'f8'), ('Z', 'f8')]
    chanlocs = np.zeros((3,), dtype=dt)
    for ind, vals in enumerate(zip(ch_names, x, y, z)):
        for fld in range(4):
            chanlocs[ind][dt[fld][0]] = vals[fld]

    if LooseVersion(np.__version__) == '1.14.0':
        # There is a bug in 1.14.0 (or maybe with SciPy 1.0.0?) that causes
        # this write to fail!
        raise SkipTest('Need to fix bug in NumPy 1.14.0!')

    # save set file
    one_chanpos_fname = op.join(tmpdir, 'test_chanpos.set')
    io.savemat(one_chanpos_fname, {'EEG':
               {'trials': eeg.trials, 'srate': eeg.srate,
                'nbchan': 3, 'data': np.random.random((3, 3)),
                'epoch': eeg.epoch, 'event': eeg.epoch,
                'chanlocs': chanlocs, 'times': eeg.times[:3], 'pnts': 3}},
               appendmat=False, oned_as='row')
    # load it
    with pytest.warns(RuntimeWarning, match='did not have a position'):
        raw = read_raw_eeglab(input_fname=one_chanpos_fname, preload=True)
    # position should be present for first two channels
    for i in range(2):
        assert_array_equal(raw.info['chs'][i]['loc'][:3],
                           np.array([-chanlocs[i]['Y'],
                                     chanlocs[i]['X'],
                                     chanlocs[i]['Z']]))
    # position of the last channel should be zero
    assert_array_equal(raw.info['chs'][-1]['loc'][:3], [np.nan] * 3)

    # test reading channel names from set and positions from montage
    with pytest.warns(RuntimeWarning, match='did not have a position'):
        raw = read_raw_eeglab(input_fname=one_chanpos_fname, preload=True,
                              montage=montage)

    # when montage was passed - channel positions should be taken from there
    correct_pos = [[-0.56705965, 0.67706631, 0.46906776], [np.nan] * 3,
                   [0., 0.99977915, -0.02101571]]
    for ch_ind in range(3):
        assert_array_almost_equal(raw.info['chs'][ch_ind]['loc'][:3],
                                  np.array(correct_pos[ch_ind]))

    # test reading channel names but not positions when there is no X (only Z)
    # field in the EEG.chanlocs structure
    nopos_chanlocs = chanlocs[['labels', 'Z']]
    nopos_fname = op.join(tmpdir, 'test_no_chanpos.set')
    io.savemat(nopos_fname, {'EEG':
               {'trials': eeg.trials, 'srate': eeg.srate, 'nbchan': 3,
                'data': np.random.random((3, 2)), 'epoch': eeg.epoch,
                'event': eeg.epoch, 'chanlocs': nopos_chanlocs,
                'times': eeg.times[:2], 'pnts': 2}},
               appendmat=False, oned_as='row')
    # load the file
    raw = read_raw_eeglab(input_fname=nopos_fname, preload=True)
    # test that channel names have been loaded but not channel positions
    for i in range(3):
        assert_equal(raw.info['chs'][i]['ch_name'], ch_names[i])
        assert_array_equal(raw.info['chs'][i]['loc'][:3],
                           np.array([np.nan, np.nan, np.nan]))
                          data_fun=data_fun,
                          random_state=0)
# look at our source data
fig, ax = plt.subplots(1)
ax.plot(times, 1e9 * stc.data.T)
ax.set(ylabel='Amplitude (nAm)', xlabel='Time (sec)')
fig.show()

##############################################################################
# Simulate raw data
raw_sim = simulate_raw(raw,
                       stc,
                       trans_fname,
                       src,
                       bem_fname,
                       cov='simple',
                       iir_filter=[0.2, -0.2, 0.04],
                       ecg=True,
                       blink=True,
                       n_jobs=1,
                       verbose=True)
raw_sim.plot()

##############################################################################
# Plot evoked data
events = find_events(raw_sim)  # only 1 pos, so event number == 1
epochs = Epochs(raw_sim, events, 1, -0.2, epoch_duration)
cov = compute_covariance(epochs, tmax=0., method='empirical')  # quick calc
evoked = epochs.average()
evoked.plot_white(cov)
Example #42
0
def test_compute_covariance_auto_reg(rank):
    """Test automated regularization."""
    raw = read_raw_fif(raw_fname, preload=True)
    raw.resample(100, npad='auto')  # much faster estimation
    events = find_events(raw, stim_channel='STI 014')
    event_ids = [1, 2, 3, 4]
    reject = dict(mag=4e-12)

    # cov with merged events and keep_sample_mean=True
    events_merged = merge_events(events, event_ids, 1234)
    # we need a few channels for numerical reasons in PCA/FA
    picks = pick_types(raw.info, meg='mag', eeg=False)[:10]
    raw.pick_channels([raw.ch_names[pick] for pick in picks])
    raw.info.normalize_proj()
    epochs = Epochs(raw,
                    events_merged,
                    1234,
                    tmin=-0.2,
                    tmax=0,
                    baseline=(-0.2, -0.1),
                    proj=True,
                    reject=reject,
                    preload=True)
    epochs = epochs.crop(None, 0)[:5]

    method_params = dict(factor_analysis=dict(iter_n_components=[3]),
                         pca=dict(iter_n_components=[3]))

    covs = compute_covariance(epochs,
                              method='auto',
                              method_params=method_params,
                              return_estimators=True,
                              rank=rank)
    # make sure regularization produces structured differencess
    diag_mask = np.eye(len(epochs.ch_names)).astype(bool)
    off_diag_mask = np.invert(diag_mask)
    for cov_a, cov_b in itt.combinations(covs, 2):
        if (cov_a['method'] == 'diagonal_fixed' and
                # here we have diagnoal or no regularization.
                cov_b['method'] == 'empirical' and rank == 'full'):

            assert not np.any(
                cov_a['data'][diag_mask] == cov_b['data'][diag_mask])

            # but the rest is the same
            assert_allclose(cov_a['data'][off_diag_mask],
                            cov_b['data'][off_diag_mask],
                            rtol=1e-12)

        else:
            # and here we have shrinkage everywhere.
            assert not np.any(
                cov_a['data'][diag_mask] == cov_b['data'][diag_mask])

            assert not np.any(
                cov_a['data'][diag_mask] == cov_b['data'][diag_mask])

    logliks = [c['loglik'] for c in covs]
    assert np.diff(logliks).max() <= 0  # descending order

    methods = ['empirical', 'ledoit_wolf', 'oas', 'shrunk', 'shrinkage']
    if rank == 'full':
        methods.extend(['factor_analysis', 'pca'])
    with catch_logging() as log:
        cov3 = compute_covariance(epochs,
                                  method=methods,
                                  method_params=method_params,
                                  projs=None,
                                  return_estimators=True,
                                  rank=rank,
                                  verbose=True)
    log = log.getvalue().split('\n')
    if rank is None:
        assert '    Setting small MAG eigenvalues to zero (without PCA)' in log
        assert 'Reducing data rank from 10 -> 7' in log
    else:
        assert 'Reducing' not in log
    method_names = [cov['method'] for cov in cov3]
    best_bounds = [-45, -35]
    bounds = [-55, -45] if rank == 'full' else best_bounds
    for method in set(methods) - {'empirical', 'shrunk'}:
        this_lik = cov3[method_names.index(method)]['loglik']
        assert bounds[0] < this_lik < bounds[1]
    this_lik = cov3[method_names.index('shrunk')]['loglik']
    assert best_bounds[0] < this_lik < best_bounds[1]
    this_lik = cov3[method_names.index('empirical')]['loglik']
    bounds = [-110, -100] if rank == 'full' else best_bounds
    assert bounds[0] < this_lik < bounds[1]

    assert_equal({c['method'] for c in cov3}, set(methods))

    cov4 = compute_covariance(epochs,
                              method=methods,
                              method_params=method_params,
                              projs=None,
                              return_estimators=False,
                              rank=rank)
    assert cov3[0]['method'] == cov4['method']  # ordering

    # invalid prespecified method
    pytest.raises(ValueError, compute_covariance, epochs, method='pizza')

    # invalid scalings
    pytest.raises(ValueError,
                  compute_covariance,
                  epochs,
                  method='shrunk',
                  scalings=dict(misc=123))
Example #43
0
def test_ica_full_data_recovery(method):
    """Test recovery of full data when no source is rejected."""
    # Most basic recovery
    _skip_check_picard(method)
    raw = read_raw_fif(raw_fname).crop(0.5, stop).load_data()
    events = read_events(event_name)
    picks = pick_types(raw.info,
                       meg=True,
                       stim=False,
                       ecg=False,
                       eog=False,
                       exclude='bads')[:10]
    with pytest.warns(RuntimeWarning, match='projection'):
        epochs = Epochs(raw,
                        events[:4],
                        event_id,
                        tmin,
                        tmax,
                        picks=picks,
                        baseline=(None, 0),
                        preload=True)
    evoked = epochs.average()
    n_channels = 5
    data = raw._data[:n_channels].copy()
    data_epochs = epochs.get_data()
    data_evoked = evoked.data
    raw.set_annotations(Annotations([0.5], [0.5], ['BAD']))
    methods = [method]
    for method in methods:
        stuff = [(2, n_channels, True), (2, n_channels // 2, False)]
        for n_components, n_pca_components, ok in stuff:
            ica = ICA(n_components=n_components,
                      random_state=0,
                      max_pca_components=n_pca_components,
                      n_pca_components=n_pca_components,
                      method=method,
                      max_iter=1)
            with pytest.warns(UserWarning, match=None):  # sometimes warns
                ica.fit(raw, picks=list(range(n_channels)))
            raw2 = ica.apply(raw.copy(), exclude=[])
            if ok:
                assert_allclose(data[:n_channels],
                                raw2._data[:n_channels],
                                rtol=1e-10,
                                atol=1e-15)
            else:
                diff = np.abs(data[:n_channels] - raw2._data[:n_channels])
                assert (np.max(diff) > 1e-14)

            ica = ICA(n_components=n_components,
                      method=method,
                      max_pca_components=n_pca_components,
                      n_pca_components=n_pca_components,
                      random_state=0)
            with pytest.warns(None):  # sometimes warns
                ica.fit(epochs, picks=list(range(n_channels)))
            epochs2 = ica.apply(epochs.copy(), exclude=[])
            data2 = epochs2.get_data()[:, :n_channels]
            if ok:
                assert_allclose(data_epochs[:, :n_channels],
                                data2,
                                rtol=1e-10,
                                atol=1e-15)
            else:
                diff = np.abs(data_epochs[:, :n_channels] - data2)
                assert (np.max(diff) > 1e-14)

            evoked2 = ica.apply(evoked.copy(), exclude=[])
            data2 = evoked2.data[:n_channels]
            if ok:
                assert_allclose(data_evoked[:n_channels],
                                data2,
                                rtol=1e-10,
                                atol=1e-15)
            else:
                diff = np.abs(evoked.data[:n_channels] - data2)
                assert (np.max(diff) > 1e-14)
    pytest.raises(ValueError, ICA, method='pizza-decomposision')
Example #44
0
def test_ica_additional():
    """Test additional ICA functionality"""
    tempdir = _TempDir()
    stop2 = 500
    raw = Raw(raw_fname).crop(1.5, stop, copy=False)
    raw.load_data()
    picks = pick_types(raw.info,
                       meg=True,
                       stim=False,
                       ecg=False,
                       eog=False,
                       exclude='bads')
    test_cov = read_cov(test_cov_name)
    events = read_events(event_name)
    picks = pick_types(raw.info,
                       meg=True,
                       stim=False,
                       ecg=False,
                       eog=False,
                       exclude='bads')
    epochs = Epochs(raw,
                    events[:4],
                    event_id,
                    tmin,
                    tmax,
                    picks=picks,
                    baseline=(None, 0),
                    preload=True)
    # test if n_components=None works
    with warnings.catch_warnings(record=True):
        ica = ICA(n_components=None,
                  max_pca_components=None,
                  n_pca_components=None,
                  random_state=0)
        ica.fit(epochs, picks=picks, decim=3)
    # for testing eog functionality
    picks2 = pick_types(raw.info,
                        meg=True,
                        stim=False,
                        ecg=False,
                        eog=True,
                        exclude='bads')
    epochs_eog = Epochs(raw,
                        events[:4],
                        event_id,
                        tmin,
                        tmax,
                        picks=picks2,
                        baseline=(None, 0),
                        preload=True)

    test_cov2 = test_cov.copy()
    ica = ICA(noise_cov=test_cov2,
              n_components=3,
              max_pca_components=4,
              n_pca_components=4)
    assert_true(ica.info is None)
    with warnings.catch_warnings(record=True):
        ica.fit(raw, picks[:5])
    assert_true(isinstance(ica.info, Info))
    assert_true(ica.n_components_ < 5)

    ica = ICA(n_components=3, max_pca_components=4, n_pca_components=4)
    assert_raises(RuntimeError, ica.save, '')
    with warnings.catch_warnings(record=True):
        ica.fit(raw, picks=[1, 2, 3, 4, 5], start=start, stop=stop2)

    # test corrmap
    ica2 = ica.copy()
    ica3 = ica.copy()
    corrmap([ica, ica2], (0, 0),
            threshold='auto',
            label='blinks',
            plot=True,
            ch_type="mag")
    corrmap([ica, ica2], (0, 0), threshold=2, plot=False, show=False)
    assert_true(ica.labels_["blinks"] == ica2.labels_["blinks"])
    assert_true(0 in ica.labels_["blinks"])
    template = _get_ica_map(ica)[0]
    corrmap([ica, ica3],
            template,
            threshold='auto',
            label='blinks',
            plot=True,
            ch_type="mag")
    assert_true(ica2.labels_["blinks"] == ica3.labels_["blinks"])
    plt.close('all')

    # test warnings on bad filenames
    with warnings.catch_warnings(record=True) as w:
        warnings.simplefilter('always')
        ica_badname = op.join(op.dirname(tempdir), 'test-bad-name.fif.gz')
        ica.save(ica_badname)
        read_ica(ica_badname)
    assert_naming(w, 'test_ica.py', 2)

    # test decim
    ica = ICA(n_components=3, max_pca_components=4, n_pca_components=4)
    raw_ = raw.copy()
    for _ in range(3):
        raw_.append(raw_)
    n_samples = raw_._data.shape[1]
    with warnings.catch_warnings(record=True):
        ica.fit(raw, picks=None, decim=3)
    assert_true(raw_._data.shape[1], n_samples)

    # test expl var
    ica = ICA(n_components=1.0, max_pca_components=4, n_pca_components=4)
    with warnings.catch_warnings(record=True):
        ica.fit(raw, picks=None, decim=3)
    assert_true(ica.n_components_ == 4)

    # epochs extraction from raw fit
    assert_raises(RuntimeError, ica.get_sources, epochs)
    # test reading and writing
    test_ica_fname = op.join(op.dirname(tempdir), 'test-ica.fif')
    for cov in (None, test_cov):
        ica = ICA(noise_cov=cov,
                  n_components=2,
                  max_pca_components=4,
                  n_pca_components=4)
        with warnings.catch_warnings(record=True):  # ICA does not converge
            ica.fit(raw, picks=picks, start=start, stop=stop2)
        sources = ica.get_sources(epochs).get_data()
        assert_true(ica.mixing_matrix_.shape == (2, 2))
        assert_true(ica.unmixing_matrix_.shape == (2, 2))
        assert_true(ica.pca_components_.shape == (4, len(picks)))
        assert_true(sources.shape[1] == ica.n_components_)

        for exclude in [[], [0]]:
            ica.exclude = exclude
            ica.labels_ = {'foo': [0]}
            ica.save(test_ica_fname)
            ica_read = read_ica(test_ica_fname)
            assert_true(ica.exclude == ica_read.exclude)
            assert_equal(ica.labels_, ica_read.labels_)
            ica.exclude = []
            ica.apply(raw, exclude=[1])
            assert_true(ica.exclude == [])

            ica.exclude = [0, 1]
            ica.apply(raw, exclude=[1])
            assert_true(ica.exclude == [0, 1])

            ica_raw = ica.get_sources(raw)
            assert_true(
                ica.exclude ==
                [ica_raw.ch_names.index(e) for e in ica_raw.info['bads']])

        # test filtering
        d1 = ica_raw._data[0].copy()
        with warnings.catch_warnings(record=True):  # dB warning
            ica_raw.filter(4, 20)
            assert_equal(type(ica_raw.info['lowpass']), float)
            assert_equal(type(ica_raw.info['highpass']), float)
        assert_true((d1 != ica_raw._data[0]).any())
        d1 = ica_raw._data[0].copy()
        with warnings.catch_warnings(record=True):  # dB warning
            ica_raw.notch_filter([10])
        assert_true((d1 != ica_raw._data[0]).any())

        ica.n_pca_components = 2
        ica.method = 'fake'
        ica.save(test_ica_fname)
        ica_read = read_ica(test_ica_fname)
        assert_true(ica.n_pca_components == ica_read.n_pca_components)
        assert_equal(ica.method, ica_read.method)
        assert_equal(ica.labels_, ica_read.labels_)

        # check type consistency
        attrs = ('mixing_matrix_ unmixing_matrix_ pca_components_ '
                 'pca_explained_variance_ _pre_whitener')

        def f(x, y):
            return getattr(x, y).dtype

        for attr in attrs.split():
            assert_equal(f(ica_read, attr), f(ica, attr))

        ica.n_pca_components = 4
        ica_read.n_pca_components = 4

        ica.exclude = []
        ica.save(test_ica_fname)
        ica_read = read_ica(test_ica_fname)
        for attr in [
                'mixing_matrix_', 'unmixing_matrix_', 'pca_components_',
                'pca_mean_', 'pca_explained_variance_', '_pre_whitener'
        ]:
            assert_array_almost_equal(getattr(ica, attr),
                                      getattr(ica_read, attr))

        assert_true(ica.ch_names == ica_read.ch_names)
        assert_true(isinstance(ica_read.info, Info))

        sources = ica.get_sources(raw)[:, :][0]
        sources2 = ica_read.get_sources(raw)[:, :][0]
        assert_array_almost_equal(sources, sources2)

        _raw1 = ica.apply(raw, exclude=[1])
        _raw2 = ica_read.apply(raw, exclude=[1])
        assert_array_almost_equal(_raw1[:, :][0], _raw2[:, :][0])

    os.remove(test_ica_fname)
    # check scrore funcs
    for name, func in get_score_funcs().items():
        if name in score_funcs_unsuited:
            continue
        scores = ica.score_sources(raw,
                                   target='EOG 061',
                                   score_func=func,
                                   start=0,
                                   stop=10)
        assert_true(ica.n_components_ == len(scores))

    # check univariate stats
    scores = ica.score_sources(raw, score_func=stats.skew)
    # check exception handling
    assert_raises(ValueError, ica.score_sources, raw, target=np.arange(1))

    params = []
    params += [(None, -1, slice(2), [0, 1])]  # varicance, kurtosis idx params
    params += [(None, 'MEG 1531')]  # ECG / EOG channel params
    for idx, ch_name in product(*params):
        ica.detect_artifacts(raw,
                             start_find=0,
                             stop_find=50,
                             ecg_ch=ch_name,
                             eog_ch=ch_name,
                             skew_criterion=idx,
                             var_criterion=idx,
                             kurt_criterion=idx)
    with warnings.catch_warnings(record=True):
        idx, scores = ica.find_bads_ecg(raw, method='ctps')
        assert_equal(len(scores), ica.n_components_)
        idx, scores = ica.find_bads_ecg(raw, method='correlation')
        assert_equal(len(scores), ica.n_components_)

        idx, scores = ica.find_bads_eog(raw)
        assert_equal(len(scores), ica.n_components_)

        ica.labels_ = None
        idx, scores = ica.find_bads_ecg(epochs, method='ctps')
        assert_equal(len(scores), ica.n_components_)
        assert_raises(ValueError,
                      ica.find_bads_ecg,
                      epochs.average(),
                      method='ctps')
        assert_raises(ValueError,
                      ica.find_bads_ecg,
                      raw,
                      method='crazy-coupling')

        raw.info['chs'][raw.ch_names.index('EOG 061') - 1]['kind'] = 202
        idx, scores = ica.find_bads_eog(raw)
        assert_true(isinstance(scores, list))
        assert_equal(len(scores[0]), ica.n_components_)

    # check score funcs
    for name, func in get_score_funcs().items():
        if name in score_funcs_unsuited:
            continue
        scores = ica.score_sources(epochs_eog,
                                   target='EOG 061',
                                   score_func=func)
        assert_true(ica.n_components_ == len(scores))

    # check univariate stats
    scores = ica.score_sources(epochs, score_func=stats.skew)

    # check exception handling
    assert_raises(ValueError, ica.score_sources, epochs, target=np.arange(1))

    # ecg functionality
    ecg_scores = ica.score_sources(raw,
                                   target='MEG 1531',
                                   score_func='pearsonr')

    with warnings.catch_warnings(record=True):  # filter attenuation warning
        ecg_events = ica_find_ecg_events(raw,
                                         sources[np.abs(ecg_scores).argmax()])

    assert_true(ecg_events.ndim == 2)

    # eog functionality
    eog_scores = ica.score_sources(raw,
                                   target='EOG 061',
                                   score_func='pearsonr')
    with warnings.catch_warnings(record=True):  # filter attenuation warning
        eog_events = ica_find_eog_events(raw,
                                         sources[np.abs(eog_scores).argmax()])

    assert_true(eog_events.ndim == 2)

    # Test ica fiff export
    ica_raw = ica.get_sources(raw, start=0, stop=100)
    assert_true(ica_raw.last_samp - ica_raw.first_samp == 100)
    assert_true(len(ica_raw._filenames) == 0)  # API consistency
    ica_chans = [ch for ch in ica_raw.ch_names if 'ICA' in ch]
    assert_true(ica.n_components_ == len(ica_chans))
    test_ica_fname = op.join(op.abspath(op.curdir), 'test-ica_raw.fif')
    ica.n_components = np.int32(ica.n_components)
    ica_raw.save(test_ica_fname, overwrite=True)
    ica_raw2 = Raw(test_ica_fname, preload=True)
    assert_allclose(ica_raw._data, ica_raw2._data, rtol=1e-5, atol=1e-4)
    ica_raw2.close()
    os.remove(test_ica_fname)

    # Test ica epochs export
    ica_epochs = ica.get_sources(epochs)
    assert_true(ica_epochs.events.shape == epochs.events.shape)
    ica_chans = [ch for ch in ica_epochs.ch_names if 'ICA' in ch]
    assert_true(ica.n_components_ == len(ica_chans))
    assert_true(ica.n_components_ == ica_epochs.get_data().shape[1])
    assert_true(ica_epochs._raw is None)
    assert_true(ica_epochs.preload is True)

    # test float n pca components
    ica.pca_explained_variance_ = np.array([0.2] * 5)
    ica.n_components_ = 0
    for ncomps, expected in [[0.3, 1], [0.9, 4], [1, 1]]:
        ncomps_ = ica._check_n_pca_components(ncomps)
        assert_true(ncomps_ == expected)
Example #45
0
def test_ica_additional(method):
    """Test additional ICA functionality."""
    _skip_check_picard(method)

    tempdir = _TempDir()
    stop2 = 500
    raw = read_raw_fif(raw_fname).crop(1.5, stop).load_data()
    raw.del_proj()  # avoid warnings
    raw.set_annotations(Annotations([0.5], [0.5], ['BAD']))
    # XXX This breaks the tests :(
    # raw.info['bads'] = [raw.ch_names[1]]
    test_cov = read_cov(test_cov_name)
    events = read_events(event_name)
    picks = pick_types(raw.info,
                       meg=True,
                       stim=False,
                       ecg=False,
                       eog=False,
                       exclude='bads')[1::2]
    epochs = Epochs(raw,
                    events,
                    None,
                    tmin,
                    tmax,
                    picks=picks,
                    baseline=(None, 0),
                    preload=True,
                    proj=False)
    epochs.decimate(3, verbose='error')
    assert len(epochs) == 4

    # test if n_components=None works
    ica = ICA(n_components=None,
              max_pca_components=None,
              n_pca_components=None,
              random_state=0,
              method=method,
              max_iter=1)
    with pytest.warns(UserWarning, match='did not converge'):
        ica.fit(epochs)
    # for testing eog functionality
    picks2 = np.concatenate([picks, pick_types(raw.info, False, eog=True)])
    epochs_eog = Epochs(raw,
                        events[:4],
                        event_id,
                        tmin,
                        tmax,
                        picks=picks2,
                        baseline=(None, 0),
                        preload=True)
    del picks2

    test_cov2 = test_cov.copy()
    ica = ICA(noise_cov=test_cov2,
              n_components=3,
              max_pca_components=4,
              n_pca_components=4,
              method=method)
    assert (ica.info is None)
    with pytest.warns(RuntimeWarning, match='normalize_proj'):
        ica.fit(raw, picks[:5])
    assert (isinstance(ica.info, Info))
    assert (ica.n_components_ < 5)

    ica = ICA(n_components=3,
              max_pca_components=4,
              method=method,
              n_pca_components=4,
              random_state=0)
    pytest.raises(RuntimeError, ica.save, '')

    ica.fit(raw, picks=[1, 2, 3, 4, 5], start=start, stop=stop2)

    # check passing a ch_name to find_bads_ecg
    with pytest.warns(RuntimeWarning, match='longer'):
        _, scores_1 = ica.find_bads_ecg(raw)
        _, scores_2 = ica.find_bads_ecg(raw, raw.ch_names[1])
    assert scores_1[0] != scores_2[0]

    # test corrmap
    ica2 = ica.copy()
    ica3 = ica.copy()
    corrmap([ica, ica2], (0, 0),
            threshold='auto',
            label='blinks',
            plot=True,
            ch_type="mag")
    with pytest.raises(RuntimeError, match='No component detected'):
        corrmap(
            [ica, ica2],
            (0, 0),
            threshold=2,
            plot=False,
            show=False,
        )
    corrmap([ica, ica2], (0, 0), threshold=0.5, plot=False, show=False)
    assert (ica.labels_["blinks"] == ica2.labels_["blinks"])
    assert (0 in ica.labels_["blinks"])
    # test retrieval of component maps as arrays
    components = ica.get_components()
    template = components[:, 0]
    EvokedArray(components, ica.info, tmin=0.).plot_topomap([0], time_unit='s')

    corrmap([ica, ica3],
            template,
            threshold='auto',
            label='blinks',
            plot=True,
            ch_type="mag")
    assert (ica2.labels_["blinks"] == ica3.labels_["blinks"])

    plt.close('all')

    # No match
    bad_ica = ica2.copy()
    bad_ica.mixing_matrix_[:] = 0.
    with pytest.warns(RuntimeWarning, match='divide'):
        with catch_logging() as log:
            corrmap([ica, bad_ica], (0, 0),
                    threshold=0.5,
                    plot=False,
                    show=False,
                    verbose=True)
    log = log.getvalue()
    assert 'No maps selected' in log

    # make sure a single threshold in a list works
    corrmap([ica, ica3],
            template,
            threshold=[0.5],
            label='blinks',
            plot=True,
            ch_type="mag")

    ica_different_channels = ICA(n_components=2,
                                 random_state=0).fit(raw, picks=[2, 3, 4, 5])
    pytest.raises(ValueError, corrmap, [ica_different_channels, ica], (0, 0))

    # test warnings on bad filenames
    ica_badname = op.join(op.dirname(tempdir), 'test-bad-name.fif.gz')
    with pytest.warns(RuntimeWarning, match='-ica.fif'):
        ica.save(ica_badname)
    with pytest.warns(RuntimeWarning, match='-ica.fif'):
        read_ica(ica_badname)

    # test decim
    ica = ICA(n_components=3,
              max_pca_components=4,
              n_pca_components=4,
              method=method,
              max_iter=1)
    raw_ = raw.copy()
    for _ in range(3):
        raw_.append(raw_)
    n_samples = raw_._data.shape[1]
    with pytest.warns(UserWarning, match='did not converge'):
        ica.fit(raw, picks=picks[:5], decim=3)
    assert raw_._data.shape[1] == n_samples

    # test expl var
    ica = ICA(n_components=1.0,
              max_pca_components=4,
              n_pca_components=4,
              method=method,
              max_iter=1)
    with pytest.warns(UserWarning, match='did not converge'):
        ica.fit(raw, picks=None, decim=3)
    assert (ica.n_components_ == 4)
    ica_var = _ica_explained_variance(ica, raw, normalize=True)
    assert (np.all(ica_var[:-1] >= ica_var[1:]))

    # test ica sorting
    ica.exclude = [0]
    ica.labels_ = dict(blink=[0], think=[1])
    ica_sorted = _sort_components(ica, [3, 2, 1, 0], copy=True)
    assert_equal(ica_sorted.exclude, [3])
    assert_equal(ica_sorted.labels_, dict(blink=[3], think=[2]))

    # epochs extraction from raw fit
    pytest.raises(RuntimeError, ica.get_sources, epochs)
    # test reading and writing
    test_ica_fname = op.join(op.dirname(tempdir), 'test-ica.fif')
    for cov in (None, test_cov):
        ica = ICA(noise_cov=cov,
                  n_components=2,
                  max_pca_components=4,
                  n_pca_components=4,
                  method=method,
                  max_iter=1)
        with pytest.warns(None):  # ICA does not converge
            ica.fit(raw, picks=picks[:10], start=start, stop=stop2)
        sources = ica.get_sources(epochs).get_data()
        assert (ica.mixing_matrix_.shape == (2, 2))
        assert (ica.unmixing_matrix_.shape == (2, 2))
        assert (ica.pca_components_.shape == (4, 10))
        assert (sources.shape[1] == ica.n_components_)

        for exclude in [[], [0], np.array([1, 2, 3])]:
            ica.exclude = exclude
            ica.labels_ = {'foo': [0]}
            ica.save(test_ica_fname)
            ica_read = read_ica(test_ica_fname)
            assert (list(ica.exclude) == ica_read.exclude)
            assert_equal(ica.labels_, ica_read.labels_)
            ica.apply(raw)
            ica.exclude = []
            ica.apply(raw, exclude=[1])
            assert (ica.exclude == [])

            ica.exclude = [0, 1]
            ica.apply(raw, exclude=[1])
            assert (ica.exclude == [0, 1])

            ica_raw = ica.get_sources(raw)
            assert (ica.exclude == [
                ica_raw.ch_names.index(e) for e in ica_raw.info['bads']
            ])

        # test filtering
        d1 = ica_raw._data[0].copy()
        ica_raw.filter(4, 20, fir_design='firwin2')
        assert_equal(ica_raw.info['lowpass'], 20.)
        assert_equal(ica_raw.info['highpass'], 4.)
        assert ((d1 != ica_raw._data[0]).any())
        d1 = ica_raw._data[0].copy()
        ica_raw.notch_filter([10], trans_bandwidth=10, fir_design='firwin')
        assert ((d1 != ica_raw._data[0]).any())

        ica.n_pca_components = 2
        ica.method = 'fake'
        ica.save(test_ica_fname)
        ica_read = read_ica(test_ica_fname)
        assert (ica.n_pca_components == ica_read.n_pca_components)
        assert_equal(ica.method, ica_read.method)
        assert_equal(ica.labels_, ica_read.labels_)

        # check type consistency
        attrs = ('mixing_matrix_ unmixing_matrix_ pca_components_ '
                 'pca_explained_variance_ pre_whitener_')

        def f(x, y):
            return getattr(x, y).dtype

        for attr in attrs.split():
            assert_equal(f(ica_read, attr), f(ica, attr))

        ica.n_pca_components = 4
        ica_read.n_pca_components = 4

        ica.exclude = []
        ica.save(test_ica_fname)
        ica_read = read_ica(test_ica_fname)
        for attr in [
                'mixing_matrix_', 'unmixing_matrix_', 'pca_components_',
                'pca_mean_', 'pca_explained_variance_', 'pre_whitener_'
        ]:
            assert_array_almost_equal(getattr(ica, attr),
                                      getattr(ica_read, attr))

        assert (ica.ch_names == ica_read.ch_names)
        assert (isinstance(ica_read.info, Info))

        sources = ica.get_sources(raw)[:, :][0]
        sources2 = ica_read.get_sources(raw)[:, :][0]
        assert_array_almost_equal(sources, sources2)

        _raw1 = ica.apply(raw, exclude=[1])
        _raw2 = ica_read.apply(raw, exclude=[1])
        assert_array_almost_equal(_raw1[:, :][0], _raw2[:, :][0])

    os.remove(test_ica_fname)
    # check score funcs
    for name, func in get_score_funcs().items():
        if name in score_funcs_unsuited:
            continue
        scores = ica.score_sources(raw,
                                   target='EOG 061',
                                   score_func=func,
                                   start=0,
                                   stop=10)
        assert (ica.n_components_ == len(scores))

    # check univariate stats
    scores = ica.score_sources(raw, start=0, stop=50, score_func=stats.skew)
    # check exception handling
    pytest.raises(ValueError, ica.score_sources, raw, target=np.arange(1))

    params = []
    params += [(None, -1, slice(2), [0, 1])]  # variance, kurtosis params
    params += [(None, 'MEG 1531')]  # ECG / EOG channel params
    for idx, ch_name in product(*params):
        ica.detect_artifacts(raw,
                             start_find=0,
                             stop_find=50,
                             ecg_ch=ch_name,
                             eog_ch=ch_name,
                             skew_criterion=idx,
                             var_criterion=idx,
                             kurt_criterion=idx)

    # Make sure detect_artifacts marks the right components.
    # For int criterion, the doc says "E.g. range(2) would return the two
    # sources with the highest score". Assert that's what it does.
    # Only test for skew, since it's always the same code.
    ica.exclude = []
    ica.detect_artifacts(raw,
                         start_find=0,
                         stop_find=50,
                         ecg_ch=None,
                         eog_ch=None,
                         skew_criterion=0,
                         var_criterion=None,
                         kurt_criterion=None)
    assert np.abs(scores[ica.exclude]) == np.max(np.abs(scores))

    evoked = epochs.average()
    evoked_data = evoked.data.copy()
    raw_data = raw[:][0].copy()
    epochs_data = epochs.get_data().copy()

    with pytest.warns(RuntimeWarning, match='longer'):
        idx, scores = ica.find_bads_ecg(raw, method='ctps')
    assert_equal(len(scores), ica.n_components_)
    with pytest.warns(RuntimeWarning, match='longer'):
        idx, scores = ica.find_bads_ecg(raw, method='correlation')
    assert_equal(len(scores), ica.n_components_)

    with pytest.warns(RuntimeWarning, match='longer'):
        idx, scores = ica.find_bads_eog(raw)
    assert_equal(len(scores), ica.n_components_)

    idx, scores = ica.find_bads_ecg(epochs, method='ctps')

    assert_equal(len(scores), ica.n_components_)
    pytest.raises(ValueError,
                  ica.find_bads_ecg,
                  epochs.average(),
                  method='ctps')
    pytest.raises(ValueError, ica.find_bads_ecg, raw, method='crazy-coupling')

    with pytest.warns(RuntimeWarning, match='longer'):
        idx, scores = ica.find_bads_eog(raw)
    assert_equal(len(scores), ica.n_components_)

    raw.info['chs'][raw.ch_names.index('EOG 061') - 1]['kind'] = 202
    with pytest.warns(RuntimeWarning, match='longer'):
        idx, scores = ica.find_bads_eog(raw)
    assert (isinstance(scores, list))
    assert_equal(len(scores[0]), ica.n_components_)

    idx, scores = ica.find_bads_eog(evoked, ch_name='MEG 1441')
    assert_equal(len(scores), ica.n_components_)

    idx, scores = ica.find_bads_ecg(evoked, method='correlation')
    assert_equal(len(scores), ica.n_components_)

    assert_array_equal(raw_data, raw[:][0])
    assert_array_equal(epochs_data, epochs.get_data())
    assert_array_equal(evoked_data, evoked.data)

    # check score funcs
    for name, func in get_score_funcs().items():
        if name in score_funcs_unsuited:
            continue
        scores = ica.score_sources(epochs_eog,
                                   target='EOG 061',
                                   score_func=func)
        assert (ica.n_components_ == len(scores))

    # check univariate stats
    scores = ica.score_sources(epochs, score_func=stats.skew)

    # check exception handling
    pytest.raises(ValueError, ica.score_sources, epochs, target=np.arange(1))

    # ecg functionality
    ecg_scores = ica.score_sources(raw,
                                   target='MEG 1531',
                                   score_func='pearsonr')

    with pytest.warns(RuntimeWarning, match='longer'):
        ecg_events = ica_find_ecg_events(raw,
                                         sources[np.abs(ecg_scores).argmax()])
    assert (ecg_events.ndim == 2)

    # eog functionality
    eog_scores = ica.score_sources(raw,
                                   target='EOG 061',
                                   score_func='pearsonr')
    with pytest.warns(RuntimeWarning, match='longer'):
        eog_events = ica_find_eog_events(raw,
                                         sources[np.abs(eog_scores).argmax()])
    assert (eog_events.ndim == 2)

    # Test ica fiff export
    ica_raw = ica.get_sources(raw, start=0, stop=100)
    assert (ica_raw.last_samp - ica_raw.first_samp == 100)
    assert_equal(len(ica_raw._filenames), 1)  # API consistency
    ica_chans = [ch for ch in ica_raw.ch_names if 'ICA' in ch]
    assert (ica.n_components_ == len(ica_chans))
    test_ica_fname = op.join(op.abspath(op.curdir), 'test-ica_raw.fif')
    ica.n_components = np.int32(ica.n_components)
    ica_raw.save(test_ica_fname, overwrite=True)
    ica_raw2 = read_raw_fif(test_ica_fname, preload=True)
    assert_allclose(ica_raw._data, ica_raw2._data, rtol=1e-5, atol=1e-4)
    ica_raw2.close()
    os.remove(test_ica_fname)

    # Test ica epochs export
    ica_epochs = ica.get_sources(epochs)
    assert (ica_epochs.events.shape == epochs.events.shape)
    ica_chans = [ch for ch in ica_epochs.ch_names if 'ICA' in ch]
    assert (ica.n_components_ == len(ica_chans))
    assert (ica.n_components_ == ica_epochs.get_data().shape[1])
    assert (ica_epochs._raw is None)
    assert (ica_epochs.preload is True)

    # test float n pca components
    ica.pca_explained_variance_ = np.array([0.2] * 5)
    ica.n_components_ = 0
    for ncomps, expected in [[0.3, 1], [0.9, 4], [1, 1]]:
        ncomps_ = ica._check_n_pca_components(ncomps)
        assert (ncomps_ == expected)

    ica = ICA(method=method)
    with pytest.warns(None):  # sometimes does not converge
        ica.fit(raw, picks=picks[:5])
    with pytest.warns(RuntimeWarning, match='longer'):
        ica.find_bads_ecg(raw)
    ica.find_bads_eog(epochs, ch_name='MEG 0121')
    assert_array_equal(raw_data, raw[:][0])

    raw.drop_channels(['MEG 0122'])
    pytest.raises(RuntimeError, ica.find_bads_eog, raw)
    with pytest.warns(RuntimeWarning, match='longer'):
        pytest.raises(RuntimeError, ica.find_bads_ecg, raw)
Example #46
0
def test_ica_core():
    """Test ICA on raw and epochs"""
    raw = Raw(raw_fname).crop(1.5, stop, copy=False)
    raw.load_data()
    picks = pick_types(raw.info,
                       meg=True,
                       stim=False,
                       ecg=False,
                       eog=False,
                       exclude='bads')
    # XXX. The None cases helped revealing bugs but are time consuming.
    test_cov = read_cov(test_cov_name)
    events = read_events(event_name)
    picks = pick_types(raw.info,
                       meg=True,
                       stim=False,
                       ecg=False,
                       eog=False,
                       exclude='bads')
    epochs = Epochs(raw,
                    events[:4],
                    event_id,
                    tmin,
                    tmax,
                    picks=picks,
                    baseline=(None, 0),
                    preload=True)
    noise_cov = [None, test_cov]
    # removed None cases to speed up...
    n_components = [2, 1.0]  # for future dbg add cases
    max_pca_components = [3]
    picks_ = [picks]
    methods = ['fastica']
    iter_ica_params = product(noise_cov, n_components, max_pca_components,
                              picks_, methods)

    # # test init catchers
    assert_raises(ValueError, ICA, n_components=3, max_pca_components=2)
    assert_raises(ValueError, ICA, n_components=2.3, max_pca_components=2)

    # test essential core functionality
    for n_cov, n_comp, max_n, pcks, method in iter_ica_params:
        # Test ICA raw
        ica = ICA(noise_cov=n_cov,
                  n_components=n_comp,
                  max_pca_components=max_n,
                  n_pca_components=max_n,
                  random_state=0,
                  method=method,
                  max_iter=1)
        assert_raises(ValueError, ica.__contains__, 'mag')

        print(ica)  # to test repr

        # test fit checker
        assert_raises(RuntimeError, ica.get_sources, raw)
        assert_raises(RuntimeError, ica.get_sources, epochs)

        # test decomposition
        with warnings.catch_warnings(record=True):
            ica.fit(raw, picks=pcks, start=start, stop=stop)
            repr(ica)  # to test repr
        assert_true('mag' in ica)  # should now work without error

        # test re-fit
        unmixing1 = ica.unmixing_matrix_
        with warnings.catch_warnings(record=True):
            ica.fit(raw, picks=pcks, start=start, stop=stop)
        assert_array_almost_equal(unmixing1, ica.unmixing_matrix_)

        sources = ica.get_sources(raw)[:, :][0]
        assert_true(sources.shape[0] == ica.n_components_)

        # test preload filter
        raw3 = raw.copy()
        raw3.preload = False
        assert_raises(ValueError, ica.apply, raw3, include=[1, 2])

        #######################################################################
        # test epochs decomposition
        ica = ICA(noise_cov=n_cov,
                  n_components=n_comp,
                  max_pca_components=max_n,
                  n_pca_components=max_n,
                  random_state=0)
        with warnings.catch_warnings(record=True):
            ica.fit(epochs, picks=picks)
        data = epochs.get_data()[:, 0, :]
        n_samples = np.prod(data.shape)
        assert_equal(ica.n_samples_, n_samples)
        print(ica)  # to test repr

        sources = ica.get_sources(epochs).get_data()
        assert_true(sources.shape[1] == ica.n_components_)

        assert_raises(ValueError,
                      ica.score_sources,
                      epochs,
                      target=np.arange(1))

        # test preload filter
        epochs3 = epochs.copy()
        epochs3.preload = False
        assert_raises(ValueError, ica.apply, epochs3, include=[1, 2])

    # test for bug with whitener updating
    _pre_whitener = ica._pre_whitener.copy()
    epochs._data[:, 0, 10:15] *= 1e12
    ica.apply(epochs.copy())
    assert_array_equal(_pre_whitener, ica._pre_whitener)

    # test expl. var threshold leading to empty sel
    ica.n_components = 0.1
    assert_raises(RuntimeError, ica.fit, epochs)

    offender = 1, 2, 3,
    assert_raises(ValueError, ica.get_sources, offender)
    assert_raises(ValueError, ica.fit, offender)
    assert_raises(ValueError, ica.apply, offender)
Example #47
0
def test_ica_eeg():
    """Test ICA on EEG."""
    method = 'fastica'
    raw_fif = read_raw_fif(fif_fname, preload=True)
    raw_eeglab = read_raw_eeglab(input_fname=eeglab_fname, preload=True)
    for raw in [raw_fif, raw_eeglab]:
        events = make_fixed_length_events(raw,
                                          99999,
                                          start=0,
                                          stop=0.3,
                                          duration=0.1)
        picks_meg = pick_types(raw.info, meg=True, eeg=False)[:2]
        picks_eeg = pick_types(raw.info, meg=False, eeg=True)[:2]
        picks_all = []
        picks_all.extend(picks_meg)
        picks_all.extend(picks_eeg)
        epochs = Epochs(raw, events, None, -0.1, 0.1, preload=True)
        evoked = epochs.average()

        for picks in [picks_meg, picks_eeg, picks_all]:
            if len(picks) == 0:
                continue
            # test fit
            for inst in [raw, epochs]:
                ica = ICA(n_components=2,
                          random_state=0,
                          max_iter=2,
                          method=method)
                with pytest.warns(None):
                    ica.fit(inst, picks=picks)

            # test apply and get_sources
            for inst in [raw, epochs, evoked]:
                ica.apply(inst)
                ica.get_sources(inst)

    with pytest.warns(RuntimeWarning, match='MISC channel'):
        raw = read_raw_ctf(ctf_fname2, preload=True)
    events = make_fixed_length_events(raw,
                                      99999,
                                      start=0,
                                      stop=0.2,
                                      duration=0.1)
    picks_meg = pick_types(raw.info, meg=True, eeg=False)[:2]
    picks_eeg = pick_types(raw.info, meg=False, eeg=True)[:2]
    picks_all = picks_meg + picks_eeg
    for comp in [0, 1]:
        raw.apply_gradient_compensation(comp)
        epochs = Epochs(raw, events, None, -0.1, 0.1, preload=True)
        evoked = epochs.average()

        for picks in [picks_meg, picks_eeg, picks_all]:
            if len(picks) == 0:
                continue
            # test fit
            for inst in [raw, epochs]:
                ica = ICA(n_components=2,
                          random_state=0,
                          max_iter=2,
                          method=method)
                with pytest.warns(None):
                    ica.fit(inst)

            # test apply and get_sources
            for inst in [raw, epochs, evoked]:
                ica.apply(inst)
                ica.get_sources(inst)
Example #48
0
offline_events_file = './data/record-[2012.07.06-19.06.14]-eve.fif'

# To epoching
event_id = {'13 Hz': 2, '17 Hz': 4, '21 Hz': 3, 'resting-state': 1}
# To bandpass filtering
frequencies = [13., 17., 21.]
frequency_range = 0.1

## Loading EEG data for online prediction
raw, events = openEEGFile(raw_file, events_file)

# Filtering data for online training
signal = _bandpass_filter(raw, frequencies, frequency_range)
raw = createRaw(signal, raw, filtered=True)

epochs = Epochs(raw, events, event_id, tmin=2, tmax=5, baseline=None)
labels = epochs.events[:, -1]

## Loading EEG data for offilne ML model base
offline_raw, offline_events = openEEGFile(offline_raw_file,
                                          offline_events_file)

# Filtering data for offline training
filtered_offline_signal = _bandpass_filter(offline_raw, frequencies,
                                           frequency_range)
offline_raw = createRaw(filtered_offline_signal, offline_raw, filtered=True)

offline_epochs = Epochs(offline_raw,
                        offline_events,
                        event_id,
                        tmin=2,
Example #49
0
def test_apply_reference():
    """Test base function for rereferencing."""
    raw = read_raw_fif(fif_fname, preload=True)

    # Rereference raw data by creating a copy of original data
    reref, ref_data = _apply_reference(raw.copy(),
                                       ref_from=['EEG 001', 'EEG 002'])
    assert (reref.info['custom_ref_applied'])
    _test_reference(raw, reref, ref_data, ['EEG 001', 'EEG 002'])

    # The CAR reference projection should have been removed by the function
    assert (not _has_eeg_average_ref_proj(reref.info['projs']))

    # Test that data is modified in place when copy=False
    reref, ref_data = _apply_reference(raw, ['EEG 001', 'EEG 002'])
    assert (raw is reref)

    # Test that disabling the reference does not change anything
    reref, ref_data = _apply_reference(raw.copy(), [])
    assert_array_equal(raw._data, reref._data)

    # Test re-referencing Epochs object
    raw = read_raw_fif(fif_fname, preload=False)
    events = read_events(eve_fname)
    picks_eeg = pick_types(raw.info, meg=False, eeg=True)
    epochs = Epochs(raw,
                    events=events,
                    event_id=1,
                    tmin=-0.2,
                    tmax=0.5,
                    picks=picks_eeg,
                    preload=True)
    reref, ref_data = _apply_reference(epochs.copy(),
                                       ref_from=['EEG 001', 'EEG 002'])
    assert (reref.info['custom_ref_applied'])
    _test_reference(epochs, reref, ref_data, ['EEG 001', 'EEG 002'])

    # Test re-referencing Evoked object
    evoked = epochs.average()
    reref, ref_data = _apply_reference(evoked.copy(),
                                       ref_from=['EEG 001', 'EEG 002'])
    assert (reref.info['custom_ref_applied'])
    _test_reference(evoked, reref, ref_data, ['EEG 001', 'EEG 002'])

    # Referencing needs data to be preloaded
    raw_np = read_raw_fif(fif_fname, preload=False)
    pytest.raises(RuntimeError, _apply_reference, raw_np, ['EEG 001'])

    # Test having inactive SSP projections that deal with channels involved
    # during re-referencing
    raw = read_raw_fif(fif_fname, preload=True)
    raw.add_proj(
        Projection(
            active=False,
            data=dict(col_names=['EEG 001', 'EEG 002'],
                      row_names=None,
                      data=np.array([[1, 1]]),
                      ncol=2,
                      nrow=1),
            desc='test',
            kind=1,
        ))
    # Projection concerns channels mentioned in projector
    with pytest.raises(RuntimeError, match='Inactive signal space'):
        _apply_reference(raw, ['EEG 001'])

    # Projection does not concern channels mentioned in projector, no error
    _apply_reference(raw, ['EEG 003'], ['EEG 004'])

    # CSD cannot be rereferenced
    with raw.info._unlock():
        raw.info['custom_ref_applied'] = FIFF.FIFFV_MNE_CUSTOM_REF_CSD
    with pytest.raises(RuntimeError, match="Cannot set.* type 'CSD'"):
        raw.set_eeg_reference()
Example #50
0
def test_compute_proj_epochs(tmp_path):
    """Test SSP computation on epochs."""
    tempdir = str(tmp_path)
    event_id, tmin, tmax = 1, -0.2, 0.3

    raw = read_raw_fif(raw_fname, preload=True)
    events = read_events(event_fname)
    bad_ch = 'MEG 2443'
    picks = pick_types(raw.info,
                       meg=True,
                       eeg=False,
                       stim=False,
                       eog=False,
                       exclude=[])
    epochs = Epochs(raw,
                    events,
                    event_id,
                    tmin,
                    tmax,
                    picks=picks,
                    baseline=None,
                    proj=False)

    evoked = epochs.average()
    projs = compute_proj_epochs(epochs, n_grad=1, n_mag=1, n_eeg=0, n_jobs=1)
    write_proj(op.join(tempdir, 'test-proj.fif.gz'), projs)
    for p_fname in [
            proj_fname, proj_gz_fname,
            op.join(tempdir, 'test-proj.fif.gz')
    ]:
        projs2 = read_proj(p_fname)

        assert len(projs) == len(projs2)

        for p1, p2 in zip(projs, projs2):
            assert p1['desc'] == p2['desc']
            assert p1['data']['col_names'] == p2['data']['col_names']
            assert p1['active'] == p2['active']
            # compare with sign invariance
            p1_data = p1['data']['data'] * np.sign(p1['data']['data'][0, 0])
            p2_data = p2['data']['data'] * np.sign(p2['data']['data'][0, 0])
            if bad_ch in p1['data']['col_names']:
                bad = p1['data']['col_names'].index('MEG 2443')
                mask = np.ones(p1_data.size, dtype=bool)
                mask[bad] = False
                p1_data = p1_data[:, mask]
                p2_data = p2_data[:, mask]
            corr = np.corrcoef(p1_data, p2_data)[0, 1]
            assert_array_almost_equal(corr, 1.0, 5)
            if p2['explained_var']:
                assert_array_almost_equal(p1['explained_var'],
                                          p2['explained_var'])

    # test that you can compute the projection matrix
    projs = activate_proj(projs)
    proj, nproj, U = make_projector(projs, epochs.ch_names, bads=[])

    assert nproj == 2
    assert U.shape[1] == 2

    # test that you can save them
    with epochs.info._unlock():
        epochs.info['projs'] += projs
    evoked = epochs.average()
    evoked.save(op.join(tempdir, 'foo-ave.fif'))

    projs = read_proj(proj_fname)

    projs_evoked = compute_proj_evoked(evoked, n_grad=1, n_mag=1, n_eeg=0)
    assert len(projs_evoked) == 2
    # XXX : test something

    # test parallelization
    projs = compute_proj_epochs(epochs,
                                n_grad=1,
                                n_mag=1,
                                n_eeg=0,
                                n_jobs=1,
                                desc_prefix='foobar')
    assert all('foobar' in x['desc'] for x in projs)
    projs = activate_proj(projs)
    proj_par, _, _ = make_projector(projs, epochs.ch_names, bads=[])
    assert_allclose(proj, proj_par, rtol=1e-8, atol=1e-16)

    # test warnings on bad filenames
    proj_badname = op.join(tempdir, 'test-bad-name.fif.gz')
    with pytest.warns(RuntimeWarning, match='-proj.fif'):
        write_proj(proj_badname, projs)
    with pytest.warns(RuntimeWarning, match='-proj.fif'):
        read_proj(proj_badname)

    # bad inputs
    fname = op.join(tempdir, 'out-proj.fif')
    with pytest.raises(TypeError, match='projs'):
        write_proj(fname, 'foo')
    with pytest.raises(TypeError, match=r'projs\[0\] must be .*'):
        write_proj(fname, ['foo'], overwrite=True)
Example #51
0
def test_set_bipolar_reference(inst_type):
    """Test bipolar referencing."""
    raw = read_raw_fif(fif_fname, preload=True)
    raw.apply_proj()

    if inst_type == 'raw':
        inst = raw
        del raw
    elif inst_type in ['epochs', 'evoked']:
        events = find_events(raw, stim_channel='STI 014')
        epochs = Epochs(raw, events, tmin=-0.3, tmax=0.7, preload=True)
        inst = epochs
        if inst_type == 'evoked':
            inst = epochs.average()
        del epochs

    ch_info = {'kind': FIFF.FIFFV_EOG_CH, 'extra': 'some extra value'}
    with pytest.raises(KeyError, match='key errantly present'):
        set_bipolar_reference(inst, 'EEG 001', 'EEG 002', 'bipolar', ch_info)
    ch_info.pop('extra')
    reref = set_bipolar_reference(inst, 'EEG 001', 'EEG 002', 'bipolar',
                                  ch_info)
    assert (reref.info['custom_ref_applied'])

    # Compare result to a manual calculation
    a = inst.copy().pick_channels(['EEG 001', 'EEG 002'])
    a = a._data[..., 0, :] - a._data[..., 1, :]
    b = reref.copy().pick_channels(['bipolar'])._data[..., 0, :]
    assert_allclose(a, b)

    # Original channels should be replaced by a virtual one
    assert ('EEG 001' not in reref.ch_names)
    assert ('EEG 002' not in reref.ch_names)
    assert ('bipolar' in reref.ch_names)

    # Check channel information
    bp_info = reref.info['chs'][reref.ch_names.index('bipolar')]
    an_info = inst.info['chs'][inst.ch_names.index('EEG 001')]
    for key in bp_info:
        if key == 'coil_type':
            assert bp_info[key] == FIFF.FIFFV_COIL_EEG_BIPOLAR, key
        elif key == 'kind':
            assert bp_info[key] == FIFF.FIFFV_EOG_CH, key
        elif key != 'ch_name':
            assert_equal(bp_info[key], an_info[key], err_msg=key)

    # Minimalist call
    reref = set_bipolar_reference(inst, 'EEG 001', 'EEG 002')
    assert ('EEG 001-EEG 002' in reref.ch_names)

    # Minimalist call with twice the same anode
    reref = set_bipolar_reference(inst, ['EEG 001', 'EEG 001', 'EEG 002'],
                                  ['EEG 002', 'EEG 003', 'EEG 003'])
    assert ('EEG 001-EEG 002' in reref.ch_names)
    assert ('EEG 001-EEG 003' in reref.ch_names)

    # Set multiple references at once
    reref = set_bipolar_reference(
        inst,
        ['EEG 001', 'EEG 003'],
        ['EEG 002', 'EEG 004'],
        ['bipolar1', 'bipolar2'],
        [{
            'kind': FIFF.FIFFV_EOG_CH
        }, {
            'kind': FIFF.FIFFV_EOG_CH
        }],
    )
    a = inst.copy().pick_channels(['EEG 001', 'EEG 002', 'EEG 003', 'EEG 004'])
    a = np.concatenate([
        a._data[..., :1, :] - a._data[..., 1:2, :],
        a._data[..., 2:3, :] - a._data[..., 3:4, :]
    ],
                       axis=-2)
    b = reref.copy().pick_channels(['bipolar1', 'bipolar2'])._data
    assert_allclose(a, b)

    # Test creating a bipolar reference that doesn't involve EEG channels:
    # it should not set the custom_ref_applied flag
    reref = set_bipolar_reference(inst,
                                  'MEG 0111',
                                  'MEG 0112',
                                  ch_info={'kind': FIFF.FIFFV_MEG_CH},
                                  verbose='error')
    assert (not reref.info['custom_ref_applied'])
    assert ('MEG 0111-MEG 0112' in reref.ch_names)

    # Test a battery of invalid inputs
    pytest.raises(ValueError, set_bipolar_reference, inst, 'EEG 001',
                  ['EEG 002', 'EEG 003'], 'bipolar')
    pytest.raises(ValueError, set_bipolar_reference, inst,
                  ['EEG 001', 'EEG 002'], 'EEG 003', 'bipolar')
    pytest.raises(ValueError, set_bipolar_reference, inst, 'EEG 001',
                  'EEG 002', ['bipolar1', 'bipolar2'])
    pytest.raises(ValueError,
                  set_bipolar_reference,
                  inst,
                  'EEG 001',
                  'EEG 002',
                  'bipolar',
                  ch_info=[{
                      'foo': 'bar'
                  }, {
                      'foo': 'bar'
                  }])
    pytest.raises(ValueError,
                  set_bipolar_reference,
                  inst,
                  'EEG 001',
                  'EEG 002',
                  ch_name='EEG 003')
Example #52
0
def test_render_report(renderer, tmpdir):
    """Test rendering -*.fif files for mne report."""
    tempdir = str(tmpdir)
    raw_fname_new = op.join(tempdir, 'temp_raw.fif')
    raw_fname_new_bids = op.join(tempdir, 'temp_meg.fif')
    ms_fname_new = op.join(tempdir, 'temp_ms_raw.fif')
    event_fname_new = op.join(tempdir, 'temp_raw-eve.fif')
    cov_fname_new = op.join(tempdir, 'temp_raw-cov.fif')
    proj_fname_new = op.join(tempdir, 'temp_ecg-proj.fif')
    fwd_fname_new = op.join(tempdir, 'temp_raw-fwd.fif')
    inv_fname_new = op.join(tempdir, 'temp_raw-inv.fif')
    for a, b in [[raw_fname, raw_fname_new], [raw_fname, raw_fname_new_bids],
                 [ms_fname, ms_fname_new], [event_fname, event_fname_new],
                 [cov_fname, cov_fname_new], [proj_fname, proj_fname_new],
                 [fwd_fname, fwd_fname_new], [inv_fname, inv_fname_new]]:
        shutil.copyfile(a, b)

    # create and add -epo.fif and -ave.fif files
    epochs_fname = op.join(tempdir, 'temp-epo.fif')
    evoked_fname = op.join(tempdir, 'temp-ave.fif')
    # Speed it up by picking channels
    raw = read_raw_fif(raw_fname_new, preload=True)
    raw.pick_channels(['MEG 0111', 'MEG 0121', 'EEG 001', 'EEG 002'])
    raw.del_proj()
    raw.set_eeg_reference(projection=True)
    epochs = Epochs(raw, read_events(event_fname), 1, -0.2, 0.2)
    epochs.save(epochs_fname, overwrite=True)
    # This can take forever (stall Travis), so let's make it fast
    # Also, make sure crop range is wide enough to avoid rendering bug
    evoked = epochs.average().crop(0.1, 0.2)
    evoked.save(evoked_fname)

    report = Report(info_fname=raw_fname_new,
                    subjects_dir=subjects_dir,
                    projs=True)
    with pytest.warns(RuntimeWarning, match='Cannot render MRI'):
        report.parse_folder(data_path=tempdir, on_error='raise')
    assert repr(report)

    # Check correct paths and filenames
    fnames = glob.glob(op.join(tempdir, '*.fif'))
    for fname in fnames:
        assert (op.basename(fname) in [op.basename(x) for x in report.fnames])
        assert (''.join(report.html).find(op.basename(fname)) != -1)

    assert_equal(len(report.fnames), len(fnames))
    assert_equal(len(report.html), len(report.fnames))
    assert_equal(len(report.fnames), len(report))

    # Check saving functionality
    report.data_path = tempdir
    fname = op.join(tempdir, 'report.html')
    report.save(fname=fname, open_browser=False)
    assert (op.isfile(fname))
    with open(fname, 'rb') as fid:
        html = fid.read().decode('utf-8')
    assert '(MaxShield on)' in html
    # Projectors in Raw.info
    assert '<h4>SSP Projectors</h4>' in html
    # Projectors in `proj_fname_new`
    assert f'SSP Projectors: {op.basename(proj_fname_new)}' in html
    # Evoked in `evoked_fname`
    assert f'Evoked: {op.basename(evoked_fname)} ({evoked.comment})' in html
    assert 'Topomap (ch_type =' in html
    assert f'Evoked: {op.basename(evoked_fname)} (GFPs)' in html

    assert_equal(len(report.html), len(fnames))
    assert_equal(len(report.html), len(report.fnames))

    # Check saving same report to new filename
    report.save(fname=op.join(tempdir, 'report2.html'), open_browser=False)
    assert (op.isfile(op.join(tempdir, 'report2.html')))

    # Check overwriting file
    report.save(fname=op.join(tempdir, 'report.html'),
                open_browser=False,
                overwrite=True)
    assert (op.isfile(op.join(tempdir, 'report.html')))

    # Check pattern matching with multiple patterns
    pattern = ['*raw.fif', '*eve.fif']
    with pytest.warns(RuntimeWarning, match='Cannot render MRI'):
        report.parse_folder(data_path=tempdir, pattern=pattern)
    assert (repr(report))

    fnames = glob.glob(op.join(tempdir, '*.raw')) + \
        glob.glob(op.join(tempdir, '*.raw'))
    for fname in fnames:
        assert (op.basename(fname) in [op.basename(x) for x in report.fnames])
        assert (''.join(report.html).find(op.basename(fname)) != -1)

    pytest.raises(ValueError, Report, image_format='foo')
    pytest.raises(ValueError, Report, image_format=None)

    # SVG rendering
    report = Report(info_fname=raw_fname_new,
                    subjects_dir=subjects_dir,
                    image_format='svg')
    tempdir = pathlib.Path(tempdir)  # test using pathlib.Path
    with pytest.warns(RuntimeWarning, match='Cannot render MRI'):
        report.parse_folder(data_path=tempdir, on_error='raise')

    # ndarray support smoke test
    report.add_figs_to_section(np.zeros((2, 3, 3)), 'caption', 'section')

    with pytest.raises(TypeError, match='figure must be a'):
        report.add_figs_to_section('foo', 'caption', 'section')
    with pytest.raises(TypeError, match='figure must be a'):
        report.add_figs_to_section(['foo'], 'caption', 'section')
Example #53
0
def test_low_rank_cov(raw_epochs_events):
    """Test additional properties of low rank computations."""
    raw, epochs, events = raw_epochs_events
    sss_proj_rank = 139  # 80 MEG + 60 EEG - 1 proj
    n_ch = 366
    proj_rank = 365  # one EEG proj
    with pytest.warns(RuntimeWarning, match='Too few samples'):
        emp_cov = compute_covariance(epochs)
    # Test equivalence with mne.cov.regularize subspace
    with pytest.raises(ValueError, match='are dependent.*must equal'):
        regularize(emp_cov, epochs.info, rank=None, mag=0.1, grad=0.2)
    assert _cov_rank(emp_cov, epochs.info) == sss_proj_rank
    reg_cov = regularize(emp_cov, epochs.info, proj=True, rank='full')
    assert _cov_rank(reg_cov, epochs.info) == proj_rank
    with pytest.warns(RuntimeWarning, match='exceeds the theoretical'):
        _compute_rank_int(reg_cov, info=epochs.info)
    del reg_cov
    with catch_logging() as log:
        reg_r_cov = regularize(emp_cov,
                               epochs.info,
                               proj=True,
                               rank=None,
                               verbose=True)
    log = log.getvalue()
    assert 'jointly' in log
    assert _cov_rank(reg_r_cov, epochs.info) == sss_proj_rank
    reg_r_only_cov = regularize(emp_cov, epochs.info, proj=False, rank=None)
    assert _cov_rank(reg_r_only_cov, epochs.info) == sss_proj_rank
    assert_allclose(reg_r_only_cov['data'], reg_r_cov['data'])
    del reg_r_only_cov, reg_r_cov

    # test that rank=306 is same as rank='full'
    epochs_meg = epochs.copy().pick_types(meg=True)
    assert len(epochs_meg.ch_names) == 306
    epochs_meg.info.update(bads=[], projs=[])
    cov_full = compute_covariance(epochs_meg,
                                  method='oas',
                                  rank='full',
                                  verbose='error')
    assert _cov_rank(cov_full, epochs_meg.info) == 306
    with pytest.warns(RuntimeWarning, match='few samples'):
        cov_dict = compute_covariance(epochs_meg,
                                      method='oas',
                                      rank=dict(meg=306))
    assert _cov_rank(cov_dict, epochs_meg.info) == 306
    assert_allclose(cov_full['data'], cov_dict['data'])
    cov_dict = compute_covariance(epochs_meg,
                                  method='oas',
                                  rank=dict(meg=306),
                                  verbose='error')
    assert _cov_rank(cov_dict, epochs_meg.info) == 306
    assert_allclose(cov_full['data'], cov_dict['data'])

    # Work with just EEG data to simplify projection / rank reduction
    raw = raw.copy().pick_types(meg=False, eeg=True)
    n_proj = 2
    raw.add_proj(compute_proj_raw(raw, n_eeg=n_proj))
    n_ch = len(raw.ch_names)
    rank = n_ch - n_proj - 1  # plus avg proj
    assert len(raw.info['projs']) == 3
    epochs = Epochs(raw, events, tmin=-0.2, tmax=0, preload=True)
    assert len(raw.ch_names) == n_ch
    emp_cov = compute_covariance(epochs, rank='full', verbose='error')
    assert _cov_rank(emp_cov, epochs.info) == rank
    reg_cov = regularize(emp_cov, epochs.info, proj=True, rank='full')
    assert _cov_rank(reg_cov, epochs.info) == rank
    reg_r_cov = regularize(emp_cov, epochs.info, proj=False, rank=None)
    assert _cov_rank(reg_r_cov, epochs.info) == rank
    dia_cov = compute_covariance(epochs,
                                 rank=None,
                                 method='diagonal_fixed',
                                 verbose='error')
    assert _cov_rank(dia_cov, epochs.info) == rank
    assert_allclose(dia_cov['data'], reg_cov['data'])
    epochs.pick_channels(epochs.ch_names[:103])
    # degenerate
    with pytest.raises(ValueError, match='can.*only be used with rank="full"'):
        compute_covariance(epochs, rank=None, method='pca')
    with pytest.raises(ValueError, match='can.*only be used with rank="full"'):
        compute_covariance(epochs, rank=None, method='factor_analysis')
Example #54
0
events, _ = events_from_annotations(raw, event_id=dict(T1=2, T2=3))

picks = pick_types(raw.info,
                   meg=False,
                   eeg=True,
                   stim=False,
                   eog=False,
                   exclude='bads')

# Read epochs (train will be done only between 1 and 2s)
# Testing will be done with a running classifier
epochs = Epochs(raw,
                events,
                event_id,
                tmin,
                tmax,
                proj=True,
                picks=picks,
                baseline=None,
                preload=True)
epochs_train = epochs.copy().crop(tmin=1., tmax=2.)
labels = epochs.events[:, -1] - 2

###############################################################################
# Classification with linear discrimant analysis

# Define a monte-carlo cross-validation generator (reduce variance):
scores = []
epochs_data = epochs.get_data()
epochs_data_train = epochs_train.get_data()
cv = ShuffleSplit(10, test_size=0.2, random_state=42)
Example #55
0
def test_cov_estimation_with_triggers(rank, tmpdir):
    """Test estimation from raw with triggers."""
    raw = read_raw_fif(raw_fname)
    raw.set_eeg_reference(projection=True).load_data()
    events = find_events(raw, stim_channel='STI 014')
    event_ids = [1, 2, 3, 4]
    reject = dict(grad=10000e-13, mag=4e-12, eeg=80e-6, eog=150e-6)

    # cov with merged events and keep_sample_mean=True
    events_merged = merge_events(events, event_ids, 1234)
    epochs = Epochs(raw,
                    events_merged,
                    1234,
                    tmin=-0.2,
                    tmax=0,
                    baseline=(-0.2, -0.1),
                    proj=True,
                    reject=reject,
                    preload=True)

    cov = compute_covariance(epochs, keep_sample_mean=True)
    cov_km = read_cov(cov_km_fname)
    # adjust for nfree bug
    cov_km['nfree'] -= 1
    _assert_cov(cov, cov_km)

    # Test with tmin and tmax (different but not too much)
    cov_tmin_tmax = compute_covariance(epochs, tmin=-0.19, tmax=-0.01)
    assert np.all(cov.data != cov_tmin_tmax.data)
    err = (linalg.norm(cov.data - cov_tmin_tmax.data, ord='fro') /
           linalg.norm(cov_tmin_tmax.data, ord='fro'))
    assert err < 0.05

    # cov using a list of epochs and keep_sample_mean=True
    epochs = [
        Epochs(raw,
               events,
               ev_id,
               tmin=-0.2,
               tmax=0,
               baseline=(-0.2, -0.1),
               proj=True,
               reject=reject) for ev_id in event_ids
    ]
    cov2 = compute_covariance(epochs, keep_sample_mean=True)
    assert_array_almost_equal(cov.data, cov2.data)
    assert cov.ch_names == cov2.ch_names

    # cov with keep_sample_mean=False using a list of epochs
    cov = compute_covariance(epochs, keep_sample_mean=False)
    assert cov_km.nfree == cov.nfree
    _assert_cov(cov, read_cov(cov_fname), nfree=False)

    method_params = {'empirical': {'assume_centered': False}}
    pytest.raises(ValueError,
                  compute_covariance,
                  epochs,
                  keep_sample_mean=False,
                  method_params=method_params)
    pytest.raises(ValueError,
                  compute_covariance,
                  epochs,
                  keep_sample_mean=False,
                  method='shrunk',
                  rank=rank)

    # test IO when computation done in Python
    cov.save(tmpdir.join('test-cov.fif'))  # test saving
    cov_read = read_cov(tmpdir.join('test-cov.fif'))
    _assert_cov(cov, cov_read, 1e-5)

    # cov with list of epochs with different projectors
    epochs = [
        Epochs(raw,
               events[:1],
               None,
               tmin=-0.2,
               tmax=0,
               baseline=(-0.2, -0.1),
               proj=True),
        Epochs(raw,
               events[:1],
               None,
               tmin=-0.2,
               tmax=0,
               baseline=(-0.2, -0.1),
               proj=False)
    ]
    # these should fail
    pytest.raises(ValueError, compute_covariance, epochs)
    pytest.raises(ValueError, compute_covariance, epochs, projs=None)
    # these should work, but won't be equal to above
    with pytest.warns(RuntimeWarning, match='Too few samples'):
        cov = compute_covariance(epochs, projs=epochs[0].info['projs'])
    with pytest.warns(RuntimeWarning, match='Too few samples'):
        cov = compute_covariance(epochs, projs=[])

    # test new dict support
    epochs = Epochs(raw,
                    events,
                    dict(a=1, b=2, c=3, d=4),
                    tmin=-0.01,
                    tmax=0,
                    proj=True,
                    reject=reject,
                    preload=True)
    with pytest.warns(RuntimeWarning, match='Too few samples'):
        compute_covariance(epochs)
    with pytest.warns(RuntimeWarning, match='Too few samples'):
        compute_covariance(epochs, projs=[])
    pytest.raises(TypeError, compute_covariance, epochs, projs='foo')
    pytest.raises(TypeError, compute_covariance, epochs, projs=['foo'])
Example #56
0
def test_compute_covariance_auto_reg():
    """Test automated regularization."""
    raw = read_raw_fif(raw_fname, preload=True)
    raw.resample(100, npad='auto')  # much faster estimation
    events = find_events(raw, stim_channel='STI 014')
    event_ids = [1, 2, 3, 4]
    reject = dict(mag=4e-12)

    # cov with merged events and keep_sample_mean=True
    events_merged = merge_events(events, event_ids, 1234)
    # we need a few channels for numerical reasons in PCA/FA
    picks = pick_types(raw.info, meg='mag', eeg=False)[:10]
    raw.pick_channels([raw.ch_names[pick] for pick in picks])
    raw.info.normalize_proj()
    epochs = Epochs(
        raw, events_merged, 1234, tmin=-0.2, tmax=0,
        baseline=(-0.2, -0.1), proj=True, reject=reject, preload=True)
    epochs = epochs.crop(None, 0)[:10]

    method_params = dict(factor_analysis=dict(iter_n_components=[3]),
                         pca=dict(iter_n_components=[3]))

    covs = compute_covariance(epochs, method='auto',
                              method_params=method_params,
                              return_estimators=True)
    # make sure regularization produces structured differencess
    diag_mask = np.eye(len(epochs.ch_names)).astype(bool)
    off_diag_mask = np.invert(diag_mask)
    for cov_a, cov_b in itt.combinations(covs, 2):
        if (cov_a['method'] == 'diagonal_fixed' and
                # here we have diagnoal or no regularization.
                cov_b['method'] == 'empirical'):

            assert_true(not np.any(
                        cov_a['data'][diag_mask] ==
                        cov_b['data'][diag_mask]))

            # but the rest is the same
            assert_array_equal(
                cov_a['data'][off_diag_mask],
                cov_b['data'][off_diag_mask])

        else:
            # and here we have shrinkage everywhere.
            assert_true(not np.any(
                        cov_a['data'][diag_mask] ==
                        cov_b['data'][diag_mask]))

            assert_true(not np.any(
                        cov_a['data'][diag_mask] ==
                        cov_b['data'][diag_mask]))

    logliks = [c['loglik'] for c in covs]
    assert_true(np.diff(logliks).max() <= 0)  # descending order

    methods = ['empirical', 'factor_analysis', 'ledoit_wolf', 'oas', 'pca',
               'shrunk', 'shrinkage']
    cov3 = compute_covariance(epochs, method=methods,
                              method_params=method_params, projs=None,
                              return_estimators=True)
    method_names = [cov['method'] for cov in cov3]
    for method in ['factor_analysis', 'ledoit_wolf', 'oas', 'pca',
                   'shrinkage']:
        this_lik = cov3[method_names.index(method)]['loglik']
        assert -55 < this_lik < -45
    this_lik = cov3[method_names.index('empirical')]['loglik']
    assert -110 < this_lik < -100
    this_lik = cov3[method_names.index('shrunk')]['loglik']
    assert -45 < this_lik < -35

    assert_equal(set([c['method'] for c in cov3]), set(methods))

    cov4 = compute_covariance(epochs, method=methods,
                              method_params=method_params, projs=None,
                              return_estimators=False)
    assert cov3[0]['method'] == cov4['method']  # ordering

    # invalid prespecified method
    assert_raises(ValueError, compute_covariance, epochs, method='pizza')

    # invalid scalings
    assert_raises(ValueError, compute_covariance, epochs, method='shrunk',
                  scalings=dict(misc=123))
Example #57
0
def test_array_raw():
    """Test creating raw from array
    """
    import matplotlib.pyplot as plt
    # creating
    raw = read_raw_fif(fif_fname).crop(2, 5)
    data, times = raw[:, :]
    sfreq = raw.info['sfreq']
    ch_names = [(ch[4:] if 'STI' not in ch else ch)
                for ch in raw.info['ch_names']]  # change them, why not
    # del raw
    types = list()
    for ci in range(102):
        types.extend(('grad', 'grad', 'mag'))
    types.extend(['stim'] * 9)
    types.extend(['eeg'] * 60)
    # wrong length
    assert_raises(ValueError, create_info, ch_names, sfreq, types)
    # bad entry
    types.append('foo')
    assert_raises(KeyError, create_info, ch_names, sfreq, types)
    types[-1] = 'eog'
    # default type
    info = create_info(ch_names, sfreq)
    assert_equal(info['chs'][0]['kind'], _kind_dict['misc'][0])
    # use real types
    info = create_info(ch_names, sfreq, types)
    raw2 = _test_raw_reader(RawArray,
                            test_preloading=False,
                            data=data,
                            info=info,
                            first_samp=2 * data.shape[1])
    data2, times2 = raw2[:, :]
    assert_allclose(data, data2)
    assert_allclose(times, times2)
    assert_true('RawArray' in repr(raw2))
    assert_raises(TypeError, RawArray, info, data)

    # filtering
    picks = pick_types(raw2.info, misc=True, exclude='bads')[:4]
    assert_equal(len(picks), 4)
    raw_lp = raw2.copy()
    raw_lp.filter(None,
                  4.0,
                  h_trans_bandwidth=4.,
                  filter_length='auto',
                  picks=picks,
                  n_jobs=2,
                  phase='zero',
                  fir_window='hamming')
    raw_hp = raw2.copy()
    raw_hp.filter(16.0,
                  None,
                  l_trans_bandwidth=4.,
                  filter_length='auto',
                  picks=picks,
                  n_jobs=2,
                  phase='zero',
                  fir_window='hamming')
    raw_bp = raw2.copy()
    raw_bp.filter(8.0,
                  12.0,
                  l_trans_bandwidth=4.,
                  h_trans_bandwidth=4.,
                  filter_length='auto',
                  picks=picks,
                  phase='zero',
                  fir_window='hamming')
    raw_bs = raw2.copy()
    raw_bs.filter(16.0,
                  4.0,
                  l_trans_bandwidth=4.,
                  h_trans_bandwidth=4.,
                  filter_length='auto',
                  picks=picks,
                  n_jobs=2,
                  phase='zero',
                  fir_window='hamming')
    data, _ = raw2[picks, :]
    lp_data, _ = raw_lp[picks, :]
    hp_data, _ = raw_hp[picks, :]
    bp_data, _ = raw_bp[picks, :]
    bs_data, _ = raw_bs[picks, :]
    sig_dec = 15
    assert_array_almost_equal(data, lp_data + bp_data + hp_data, sig_dec)
    assert_array_almost_equal(data, bp_data + bs_data, sig_dec)

    # plotting
    raw2.plot()
    raw2.plot_psd()
    plt.close('all')

    # epoching
    events = find_events(raw2, stim_channel='STI 014')
    events[:, 2] = 1
    assert_true(len(events) > 2)
    epochs = Epochs(raw2, events, 1, -0.2, 0.4, preload=True)
    epochs.plot_drop_log()
    epochs.plot()
    evoked = epochs.average()
    evoked.plot()
    assert_equal(evoked.nave, len(events) - 1)
    plt.close('all')

    # complex data
    rng = np.random.RandomState(0)
    data = rng.randn(1, 100) + 1j * rng.randn(1, 100)
    raw = RawArray(data, create_info(1, 1000., 'eeg'))
    assert_allclose(raw._data, data)
Example #58
0
# Filter muscular activity to only keep high frequencies
emg = raw.copy().pick_channels(['EMGlft']).load_data()
emg.filter(20., None, fir_design='firwin')

# Filter MEG data to focus on beta band
raw.pick_types(meg=True, ref_meg=True, eeg=False, eog=False).load_data()
raw.filter(15., 30., fir_design='firwin')

# Build epochs as sliding windows over the continuous raw file
events = mne.make_fixed_length_events(raw, id=1, duration=.250)

# Epoch length is 1.5 second
meg_epochs = Epochs(raw,
                    events,
                    tmin=0.,
                    tmax=1.500,
                    baseline=None,
                    detrend=1,
                    decim=8)
emg_epochs = Epochs(emg, events, tmin=0., tmax=1.500, baseline=None)

# Prepare classification
X = meg_epochs.get_data()
y = emg_epochs.get_data().var(axis=2)[:, 0]  # target is EMG power

# Classification pipeline with SPoC spatial filtering and Ridge Regression
spoc = SPoC(n_components=2, log=True, reg='oas', rank='full')
clf = make_pipeline(spoc, Ridge())
# Define a two fold cross-validation
cv = KFold(n_splits=2, shuffle=False)
raw_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw.fif'
event_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw-eve.fif'
tmin, tmax = -0.1, 0.3
event_id = dict(vis_r=4)

# Setup for reading the raw data
raw = io.read_raw_fif(raw_fname, preload=True)
raw.filter(1, 20, fir_design='firwin')  # replace baselining with high-pass
events = read_events(event_fname)

raw.info['bads'] = ['MEG 2443']  # set bad channels
picks = pick_types(raw.info, meg=True, eeg=False, stim=False, eog=False,
                   exclude='bads')
# Epoching
epochs = Epochs(raw, events, event_id, tmin, tmax, proj=False,
                picks=picks, baseline=None, preload=True,
                verbose=False)

# Plot image epoch before xdawn
plot_epochs_image(epochs['vis_r'], picks=[230], vmin=-500, vmax=500)

###############################################################################
# Now, we estimate a set of xDAWN filters for the epochs (which contain only
# the ``vis_r`` class).

# Estimates signal covariance
signal_cov = compute_raw_covariance(raw, picks=picks)

# Xdawn instance
xd = Xdawn(n_components=2, signal_cov=signal_cov)
def test_tfr_with_inverse_operator():
    """Test time freq with MNE inverse computation"""

    tmin, tmax, event_id = -0.2, 0.5, 1

    # Setup for reading the raw data
    raw = fiff.Raw(fname_data)
    events = find_events(raw, stim_channel='STI 014')
    inverse_operator = read_inverse_operator(fname_inv)

    raw.info['bads'] += ['MEG 2443', 'EEG 053']  # bads + 2 more

    # picks MEG gradiometers
    picks = fiff.pick_types(raw.info,
                            meg=True,
                            eeg=False,
                            eog=True,
                            stim=False,
                            exclude='bads')

    # Load condition 1
    event_id = 1
    events3 = events[:3]  # take 3 events to keep the computation time low
    epochs = Epochs(raw,
                    events3,
                    event_id,
                    tmin,
                    tmax,
                    picks=picks,
                    baseline=(None, 0),
                    reject=dict(grad=4000e-13, eog=150e-6),
                    preload=True)

    # Compute a source estimate per frequency band
    bands = dict(alpha=[10, 10])
    label = read_label(fname_label)

    stcs = source_band_induced_power(epochs,
                                     inverse_operator,
                                     bands,
                                     n_cycles=2,
                                     use_fft=False,
                                     pca=True,
                                     label=label)

    stc = stcs['alpha']
    assert_true(len(stcs) == len(list(bands.keys())))
    assert_true(np.all(stc.data > 0))
    assert_array_almost_equal(stc.times, epochs.times)

    stcs_no_pca = source_band_induced_power(epochs,
                                            inverse_operator,
                                            bands,
                                            n_cycles=2,
                                            use_fft=False,
                                            pca=False,
                                            label=label)

    assert_array_almost_equal(stcs['alpha'].data, stcs_no_pca['alpha'].data)

    # Compute a source estimate per frequency band
    epochs = Epochs(raw,
                    events[:10],
                    event_id,
                    tmin,
                    tmax,
                    picks=picks,
                    baseline=(None, 0),
                    reject=dict(grad=4000e-13, eog=150e-6),
                    preload=True)

    frequencies = np.arange(7, 30, 2)  # define frequencies of interest
    power, phase_lock = source_induced_power(epochs,
                                             inverse_operator,
                                             frequencies,
                                             label,
                                             baseline=(-0.1, 0),
                                             baseline_mode='percent',
                                             n_cycles=2,
                                             n_jobs=1)
    assert_true(np.all(phase_lock > 0))
    assert_true(np.all(phase_lock <= 1))
    assert_true(np.max(power) > 10)