예제 #1
1
def test_calculate_chpi_positions():
    """Test calculation of cHPI positions
    """
    trans, rot, t = head_pos_to_trans_rot_t(read_head_pos(pos_fname))
    with warnings.catch_warnings(record=True):
        raw = Raw(chpi_fif_fname, allow_maxshield=True, preload=True)
    t -= raw.first_samp / raw.info['sfreq']
    quats = _calculate_chpi_positions(raw, verbose='debug')
    trans_est, rot_est, t_est = head_pos_to_trans_rot_t(quats)
    _compare_positions((trans, rot, t), (trans_est, rot_est, t_est), 0.003)

    # degenerate conditions
    raw_no_chpi = Raw(test_fif_fname)
    assert_raises(RuntimeError, _calculate_chpi_positions, raw_no_chpi)
    raw_bad = raw.copy()
    for d in raw_bad.info['dig']:
        if d['kind'] == FIFF.FIFFV_POINT_HPI:
            d['coord_frame'] = 999
            break
    assert_raises(RuntimeError, _calculate_chpi_positions, raw_bad)
    raw_bad = raw.copy()
    for d in raw_bad.info['dig']:
        if d['kind'] == FIFF.FIFFV_POINT_HPI:
            d['r'] = np.ones(3)
    raw_bad.crop(0, 1., copy=False)
    with warnings.catch_warnings(record=True):  # bad pos
        with catch_logging() as log_file:
            _calculate_chpi_positions(raw_bad, verbose=True)
    # ignore HPI info header and [done] footer
    for line in log_file.getvalue().strip().split('\n')[4:-1]:
        assert_true('0/5 good' in line)
예제 #2
0
def test_compute_proj_eog():
    """Test computation of EOG SSP projectors"""
    raw = Raw(raw_fname).crop(0, 10, False)
    raw.preload_data()
    for average in [False, True]:
        n_projs_init = len(raw.info['projs'])
        projs, events = compute_proj_eog(raw, n_mag=2, n_grad=2, n_eeg=2,
                                         bads=['MEG 2443'], average=average,
                                         avg_ref=True, no_proj=False,
                                         l_freq=None, h_freq=None,
                                         reject=None, tmax=dur_use)
        assert_true(len(projs) == (7 + n_projs_init))
        assert_true(np.abs(events.shape[0] -
                    np.sum(np.less(eog_times, dur_use))) <= 1)
        # XXX: better tests

        # This will throw a warning b/c simplefilter('always')
        with warnings.catch_warnings(record=True) as w:
            warnings.simplefilter('always')
            projs, events = compute_proj_eog(raw, n_mag=2, n_grad=2, n_eeg=2,
                                             average=average, bads=[],
                                             avg_ref=True, no_proj=False,
                                             l_freq=None, h_freq=None,
                                             tmax=dur_use)
            assert_equal(len(w), 1)
        assert_equal(projs, None)
예제 #3
0
def test_compute_proj_ecg():
    """Test computation of ECG SSP projectors"""
    raw = Raw(raw_fname).crop(0, 10, False)
    raw.preload_data()
    for average in [False, True]:
        # For speed, let's not filter here (must also not reject then)
        projs, events = compute_proj_ecg(raw, n_mag=2, n_grad=2, n_eeg=2,
                                         ch_name='MEG 1531', bads=['MEG 2443'],
                                         average=average, avg_ref=True,
                                         no_proj=True, l_freq=None,
                                         h_freq=None, reject=None,
                                         tmax=dur_use, qrs_threshold=0.5)
        assert_true(len(projs) == 7)
        # heart rate at least 0.5 Hz, but less than 3 Hz
        assert_true(events.shape[0] > 0.5 * dur_use and
                    events.shape[0] < 3 * dur_use)
        # XXX: better tests

        # without setting a bad channel, this should throw a warning
        with warnings.catch_warnings(record=True) as w:
            warnings.simplefilter('always')
            projs, events = compute_proj_ecg(raw, n_mag=2, n_grad=2, n_eeg=2,
                                             ch_name='MEG 1531', bads=[],
                                             average=average, avg_ref=True,
                                             no_proj=True, l_freq=None,
                                             h_freq=None, tmax=dur_use)
            assert_equal(len(w), 1)
        assert_equal(projs, None)
예제 #4
0
def test_cov_estimation_on_raw():
    """Test estimation from raw (typically empty room)"""
    tempdir = _TempDir()
    raw = Raw(raw_fname, preload=False)
    cov_mne = read_cov(erm_cov_fname)

    cov = compute_raw_covariance(raw, tstep=None)
    assert_equal(cov.ch_names, cov_mne.ch_names)
    assert_equal(cov.nfree, cov_mne.nfree)
    assert_snr(cov.data, cov_mne.data, 1e4)

    cov = compute_raw_covariance(raw)  # tstep=0.2 (default)
    assert_equal(cov.nfree, cov_mne.nfree - 119)  # cutoff some samples
    assert_snr(cov.data, cov_mne.data, 1e2)

    # test IO when computation done in Python
    cov.save(op.join(tempdir, 'test-cov.fif'))  # test saving
    cov_read = read_cov(op.join(tempdir, 'test-cov.fif'))
    assert_true(cov_read.ch_names == cov.ch_names)
    assert_true(cov_read.nfree == cov.nfree)
    assert_array_almost_equal(cov.data, cov_read.data)

    # test with a subset of channels
    picks = pick_channels(raw.ch_names, include=raw.ch_names[:5])
    cov = compute_raw_covariance(raw, picks=picks, tstep=None)
    assert_true(cov_mne.ch_names[:5] == cov.ch_names)
    assert_snr(cov.data, cov_mne.data[picks][:, picks], 1e4)
    cov = compute_raw_covariance(raw, picks=picks)
    assert_snr(cov.data, cov_mne.data[picks][:, picks], 90)  # cutoff samps
    # make sure we get a warning with too short a segment
    raw_2 = raw.crop(0, 1)
    with warnings.catch_warnings(record=True) as w:
        warnings.simplefilter('always')
        cov = compute_raw_covariance(raw_2)
    assert_true(any('Too few samples' in str(ww.message) for ww in w))
예제 #5
0
def test_ica_reset():
    """Test ICA resetting"""
    raw = Raw(raw_fname).crop(0.5, stop, False)
    raw.load_data()
    picks = pick_types(raw.info, meg=True, stim=False, ecg=False,
                       eog=False, exclude='bads')[:10]

    run_time_attrs = (
        '_pre_whitener',
        'unmixing_matrix_',
        'mixing_matrix_',
        'n_components_',
        'n_samples_',
        'pca_components_',
        'pca_explained_variance_',
        'pca_mean_'
    )
    with warnings.catch_warnings(record=True):
        ica = ICA(
            n_components=3, max_pca_components=3, n_pca_components=3,
            method='fastica', max_iter=1).fit(raw, picks=picks)

    assert_true(all(hasattr(ica, attr) for attr in run_time_attrs))
    ica._reset()
    assert_true(not any(hasattr(ica, attr) for attr in run_time_attrs))
예제 #6
0
def test_calculate_chpi_positions():
    """Test calculation of cHPI positions
    """
    trans, rot, t = get_chpi_positions(pos_fname)
    with warnings.catch_warnings(record=True):
        raw = Raw(raw_fif_fname, allow_maxshield=True, preload=True)
    t -= raw.first_samp / raw.info['sfreq']
    trans_est, rot_est, t_est = _calculate_chpi_positions(raw, verbose='debug')
    _compare_positions((trans, rot, t), (trans_est, rot_est, t_est))

    # degenerate conditions
    raw_no_chpi = Raw(test_fif_fname)
    assert_raises(RuntimeError, _calculate_chpi_positions, raw_no_chpi)
    raw_bad = raw.copy()
    for d in raw_bad.info['dig']:
        if d['kind'] == FIFF.FIFFV_POINT_HPI:
            d['coord_frame'] = 999
            break
    assert_raises(RuntimeError, _calculate_chpi_positions, raw_bad)
    raw_bad = raw.copy()
    for d in raw_bad.info['dig']:
        if d['kind'] == FIFF.FIFFV_POINT_HPI:
            d['r'] = np.ones(3)
    raw_bad.crop(0, 1., copy=False)
    with catch_logging() as log_file:
        _calculate_chpi_positions(raw_bad)
    for line in log_file.getvalue().split('\n')[:-1]:
        assert_true('0/5 acceptable' in line)
예제 #7
0
def test_clean_info_bads():
    """Test cleaning info['bads'] when bad_channels are excluded """

    raw_file = op.join(op.dirname(__file__), "io", "tests", "data", "test_raw.fif")
    raw = Raw(raw_file)

    # select eeg channels
    picks_eeg = pick_types(raw.info, meg=False, eeg=True)

    # select 3 eeg channels as bads
    idx_eeg_bad_ch = picks_eeg[[1, 5, 14]]
    eeg_bad_ch = [raw.info["ch_names"][k] for k in idx_eeg_bad_ch]

    # select meg channels
    picks_meg = pick_types(raw.info, meg=True, eeg=False)

    # select randomly 3 meg channels as bads
    idx_meg_bad_ch = picks_meg[[0, 15, 34]]
    meg_bad_ch = [raw.info["ch_names"][k] for k in idx_meg_bad_ch]

    # simulate the bad channels
    raw.info["bads"] = eeg_bad_ch + meg_bad_ch

    # simulate the call to pick_info excluding the bad eeg channels
    info_eeg = pick_info(raw.info, picks_eeg)

    # simulate the call to pick_info excluding the bad meg channels
    info_meg = pick_info(raw.info, picks_meg)

    assert_equal(info_eeg["bads"], eeg_bad_ch)
    assert_equal(info_meg["bads"], meg_bad_ch)
예제 #8
0
def test_hilbert():
    """Test computation of analytic signal using hilbert
    """
    raw = Raw(fif_fname, preload=True)
    picks_meg = pick_types(raw.info, meg=True, exclude='bads')
    picks = picks_meg[:4]

    raw_filt = raw.copy()
    raw_filt.filter(10, 20)
    raw_filt_2 = raw_filt.copy()

    raw2 = raw.copy()
    raw3 = raw.copy()
    raw.apply_hilbert(picks)
    raw2.apply_hilbert(picks, envelope=True, n_jobs=2)

    # Test custom n_fft
    raw_filt.apply_hilbert(picks)
    raw_filt_2.apply_hilbert(picks, n_fft=raw_filt_2.n_times + 1000)
    assert_equal(raw_filt._data.shape, raw_filt_2._data.shape)
    assert_allclose(raw_filt._data[:, 50:-50], raw_filt_2._data[:, 50:-50],
                    atol=1e-13, rtol=1e-2)
    assert_raises(ValueError, raw3.apply_hilbert, picks,
                  n_fft=raw3.n_times - 100)

    env = np.abs(raw._data[picks, :])
    assert_allclose(env, raw2._data[picks, :], rtol=1e-2, atol=1e-13)
예제 #9
0
def _get_data():
    # Read raw data
    raw = Raw(raw_fname)
    raw.info['bads'] = ['MEG 2443', 'EEG 053']  # 2 bads channels

    # Set picks
    picks = mne.pick_types(raw.info, meg=True, eeg=False, eog=False,
                                stim=False, exclude='bads')

    # Read several epochs
    event_id, tmin, tmax = 1, -0.2, 0.5
    events = mne.read_events(event_fname)[0:100]
    epochs = mne.Epochs(raw, events, event_id, tmin, tmax, proj=True,
                        picks=picks, baseline=(None, 0), preload=True,
                        reject=dict(grad=4000e-13, mag=4e-12))

    # Create an epochs object with one epoch and one channel of artificial data
    event_id, tmin, tmax = 1, 0.0, 1.0
    epochs_sin = mne.Epochs(raw, events[0:5], event_id, tmin, tmax, proj=True,
                            picks=[0], baseline=(None, 0), preload=True,
                            reject=dict(grad=4000e-13))
    freq = 10
    epochs_sin._data = np.sin(2 * np.pi * freq
                              * epochs_sin.times)[None, None, :]
    return epochs, epochs_sin
예제 #10
0
def test_set_eeg_reference():
    """Test rereference eeg data"""
    raw = Raw(fif_fname, preload=True)
    raw.info['projs'] = []

    # Test setting an average reference
    assert_true(not _has_eeg_average_ref_proj(raw.info['projs']))
    reref, ref_data = set_eeg_reference(raw)
    assert_true(_has_eeg_average_ref_proj(reref.info['projs']))
    assert_true(ref_data is None)

    # Test setting an average reference when one was already present
    with warnings.catch_warnings(record=True):  # weight tables
        reref, ref_data = set_eeg_reference(raw, copy=False)
    assert_true(ref_data is None)

    # Rereference raw data by creating a copy of original data
    reref, ref_data = set_eeg_reference(raw, ['EEG 001', 'EEG 002'], copy=True)
    assert_true(reref.info['custom_ref_applied'])
    _test_reference(raw, reref, ref_data, ['EEG 001', 'EEG 002'])

    # Test that data is modified in place when copy=False
    reref, ref_data = set_eeg_reference(raw, ['EEG 001', 'EEG 002'],
                                        copy=False)
    assert_true(raw is reref)
예제 #11
0
def test_split_files():
    """Test writing and reading of split raw files
    """
    tempdir = _TempDir()
    raw_1 = Raw(fif_fname, preload=True)
    assert_allclose(raw_1.info['buffer_size_sec'], 10., atol=1e-2)  # samp rate
    split_fname = op.join(tempdir, 'split_raw.fif')
    raw_1.save(split_fname, buffer_size_sec=1.0, split_size='10MB')

    raw_2 = Raw(split_fname)
    assert_allclose(raw_2.info['buffer_size_sec'], 1., atol=1e-2)  # samp rate
    data_1, times_1 = raw_1[:, :]
    data_2, times_2 = raw_2[:, :]
    assert_array_equal(data_1, data_2)
    assert_array_equal(times_1, times_2)

    # test the case where the silly user specifies the split files
    fnames = [split_fname]
    fnames.extend(sorted(glob.glob(op.join(tempdir, 'split_raw-*.fif'))))
    with warnings.catch_warnings(record=True):
        warnings.simplefilter('always')
        raw_2 = Raw(fnames)
    data_2, times_2 = raw_2[:, :]
    assert_array_equal(data_1, data_2)
    assert_array_equal(times_1, times_2)
예제 #12
0
def test_ica_rank_reduction():
    """Test recovery of full data when no source is rejected"""
    # Most basic recovery
    raw = Raw(raw_fname).crop(0.5, stop, False)
    raw.load_data()
    picks = pick_types(raw.info, meg=True, stim=False, ecg=False,
                       eog=False, exclude='bads')[:10]
    n_components = 5
    max_pca_components = len(picks)
    for n_pca_components in [6, 10]:
        with warnings.catch_warnings(record=True):  # non-convergence
            warnings.simplefilter('always')
            ica = ICA(n_components=n_components,
                      max_pca_components=max_pca_components,
                      n_pca_components=n_pca_components,
                      method='fastica', max_iter=1).fit(raw, picks=picks)

        rank_before = raw.estimate_rank(picks=picks)
        assert_equal(rank_before, len(picks))
        raw_clean = ica.apply(raw, copy=True)
        rank_after = raw_clean.estimate_rank(picks=picks)
        # interaction between ICA rejection and PCA components difficult
        # to preduct. Rank_after often seems to be 1 higher then
        # n_pca_components
        assert_true(n_components < n_pca_components <= rank_after <=
                    rank_before)
예제 #13
0
def test_preload_modify():
    """Test preloading and modifying data
    """
    tempdir = _TempDir()
    for preload in [False, True, 'memmap.dat']:
        raw = Raw(fif_fname, preload=preload)

        nsamp = raw.last_samp - raw.first_samp + 1
        picks = pick_types(raw.info, meg='grad', exclude='bads')

        data = rng.randn(len(picks), nsamp // 2)

        try:
            raw[picks, :nsamp // 2] = data
        except RuntimeError as err:
            if not preload:
                continue
            else:
                raise err

        tmp_fname = op.join(tempdir, 'raw.fif')
        raw.save(tmp_fname, overwrite=True)

        raw_new = Raw(tmp_fname)
        data_new, _ = raw_new[picks, :nsamp / 2]

        assert_allclose(data, data_new)
예제 #14
0
def test_subject_info():
    """Test reading subject information
    """
    tempdir = _TempDir()
    raw = Raw(fif_fname).crop(0, 1, False)
    assert_true(raw.info['subject_info'] is None)
    # fake some subject data
    keys = ['id', 'his_id', 'last_name', 'first_name', 'birthday', 'sex',
            'hand']
    vals = [1, 'foobar', 'bar', 'foo', (1901, 2, 3), 0, 1]
    subject_info = dict()
    for key, val in zip(keys, vals):
        subject_info[key] = val
    raw.info['subject_info'] = subject_info
    out_fname = op.join(tempdir, 'test_subj_info_raw.fif')
    raw.save(out_fname, overwrite=True)
    raw_read = Raw(out_fname)
    for key in keys:
        assert_equal(subject_info[key], raw_read.info['subject_info'][key])
    raw_read.anonymize()
    assert_true(raw_read.info.get('subject_info') is None)
    out_fname_anon = op.join(tempdir, 'test_subj_info_anon_raw.fif')
    raw_read.save(out_fname_anon, overwrite=True)
    raw_read = Raw(out_fname_anon)
    assert_true(raw_read.info.get('subject_info') is None)
예제 #15
0
def test_add_channels():
    """Test raw splitting / re-appending channel types
    """
    raw = Raw(test_fif_fname).crop(0, 1).load_data()
    raw_nopre = Raw(test_fif_fname, preload=False)
    raw_eeg_meg = raw.pick_types(meg=True, eeg=True, copy=True)
    raw_eeg = raw.pick_types(meg=False, eeg=True, copy=True)
    raw_meg = raw.pick_types(meg=True, eeg=False, copy=True)
    raw_stim = raw.pick_types(meg=False, eeg=False, stim=True, copy=True)
    raw_new = raw_meg.add_channels([raw_eeg, raw_stim], copy=True)
    assert_true(
        all(ch in raw_new.ch_names
            for ch in list(raw_stim.ch_names) + list(raw_meg.ch_names))
    )
    raw_new = raw_meg.add_channels([raw_eeg], copy=True)

    assert_true(ch in raw_new.ch_names for ch in raw.ch_names)
    assert_array_equal(raw_new[:, :][0], raw_eeg_meg[:, :][0])
    assert_array_equal(raw_new[:, :][1], raw[:, :][1])
    assert_true(all(ch not in raw_new.ch_names for ch in raw_stim.ch_names))

    # Now test errors
    raw_badsf = raw_eeg.copy()
    raw_badsf.info['sfreq'] = 3.1415927
    raw_eeg = raw_eeg.crop(.5)

    assert_raises(AssertionError, raw_meg.add_channels, [raw_nopre])
    assert_raises(RuntimeError, raw_meg.add_channels, [raw_badsf])
    assert_raises(AssertionError, raw_meg.add_channels, [raw_eeg])
    assert_raises(ValueError, raw_meg.add_channels, [raw_meg])
    assert_raises(AssertionError, raw_meg.add_channels, raw_badsf)
예제 #16
0
def test_calculate_chpi_positions():
    """Test calculation of cHPI positions
    """
    trans, rot, t = head_pos_to_trans_rot_t(read_head_pos(pos_fname))
    raw = Raw(chpi_fif_fname, allow_maxshield="yes", preload=True)
    t -= raw.first_samp / raw.info["sfreq"]
    quats = _calculate_chpi_positions(raw, verbose="debug")
    trans_est, rot_est, t_est = head_pos_to_trans_rot_t(quats)
    _compare_positions((trans, rot, t), (trans_est, rot_est, t_est), 0.003)

    # degenerate conditions
    raw_no_chpi = Raw(test_fif_fname)
    assert_raises(RuntimeError, _calculate_chpi_positions, raw_no_chpi)
    raw_bad = raw.copy()
    for d in raw_bad.info["dig"]:
        if d["kind"] == FIFF.FIFFV_POINT_HPI:
            d["coord_frame"] = 999
            break
    assert_raises(RuntimeError, _calculate_chpi_positions, raw_bad)
    raw_bad = raw.copy()
    for d in raw_bad.info["dig"]:
        if d["kind"] == FIFF.FIFFV_POINT_HPI:
            d["r"] = np.ones(3)
    raw_bad.crop(0, 1.0, copy=False)
    with warnings.catch_warnings(record=True):  # bad pos
        with catch_logging() as log_file:
            _calculate_chpi_positions(raw_bad, verbose=True)
    # ignore HPI info header and [done] footer
    for line in log_file.getvalue().strip().split("\n")[4:-1]:
        assert_true("0/5 good" in line)

    # half the rate cuts off cHPI coils
    with warnings.catch_warnings(record=True):  # uint cast suggestion
        raw.resample(300.0, npad="auto")
    assert_raises_regex(RuntimeError, "above the", _calculate_chpi_positions, raw)
예제 #17
0
def test_chpi_subtraction():
    """Test subtraction of cHPI signals"""
    raw = Raw(chpi_fif_fname, allow_maxshield="yes", preload=True)
    with catch_logging() as log:
        filter_chpi(raw, include_line=False, verbose=True)
    assert_true("5 cHPI" in log.getvalue())
    # MaxFilter doesn't do quite as well as our algorithm with the last bit
    raw.crop(0, 16, copy=False)
    # remove cHPI status chans
    raw_c = Raw(sss_hpisubt_fname).crop(0, 16, copy=False).load_data()
    raw_c.pick_types(meg=True, eeg=True, eog=True, ecg=True, stim=True, misc=True)
    assert_meg_snr(raw, raw_c, 143, 624)

    # Degenerate cases
    raw_nohpi = Raw(test_fif_fname, preload=True)
    assert_raises(RuntimeError, filter_chpi, raw_nohpi)

    # When MaxFliter downsamples, like::
    #     $ maxfilter -nosss -ds 2 -f test_move_anon_raw.fif \
    #           -o test_move_anon_ds2_raw.fif
    # it can strip out some values of info, which we emulate here:
    raw = Raw(chpi_fif_fname, allow_maxshield="yes")
    with warnings.catch_warnings(record=True):  # uint cast suggestion
        raw = raw.crop(0, 1).load_data().resample(600.0, npad="auto")
    raw.info["buffer_size_sec"] = np.float64(2.0)
    raw.info["lowpass"] = 200.0
    del raw.info["maxshield"]
    del raw.info["hpi_results"][0]["moments"]
    del raw.info["hpi_subsystem"]["event_channel"]
    with catch_logging() as log:
        filter_chpi(raw, verbose=True)
    assert_true("2 cHPI" in log.getvalue())
예제 #18
0
def test_bads_reconstruction():
    """Test Maxwell filter reconstruction of bad channels"""
    raw = Raw(raw_fname, allow_maxshield='yes').crop(0., 1., False)
    raw.info['bads'] = bads
    raw_sss = maxwell_filter(raw, origin=mf_head_origin, regularize=None,
                             bad_condition='ignore')
    assert_meg_snr(raw_sss, Raw(sss_bad_recon_fname), 300.)
예제 #19
0
def test_cross_talk():
    """Test Maxwell filter cross-talk cancellation"""
    raw = Raw(raw_fname, allow_maxshield='yes').crop(0., 1., False)
    raw.info['bads'] = bads
    sss_ctc = Raw(sss_ctc_fname)
    raw_sss = maxwell_filter(raw, cross_talk=ctc_fname,
                             origin=mf_head_origin, regularize=None,
                             bad_condition='ignore')
    assert_meg_snr(raw_sss, sss_ctc, 275.)
    py_ctc = raw_sss.info['proc_history'][0]['max_info']['sss_ctc']
    assert_true(len(py_ctc) > 0)
    assert_raises(ValueError, maxwell_filter, raw, cross_talk=raw)
    assert_raises(ValueError, maxwell_filter, raw, cross_talk=raw_fname)
    mf_ctc = sss_ctc.info['proc_history'][0]['max_info']['sss_ctc']
    del mf_ctc['block_id']  # we don't write this
    assert_equal(object_diff(py_ctc, mf_ctc), '')
    raw_ctf = Raw(fname_ctf_raw)
    assert_raises(ValueError, maxwell_filter, raw_ctf)  # cannot fit headshape
    raw_sss = maxwell_filter(raw_ctf, origin=(0., 0., 0.04))
    _assert_n_free(raw_sss, 68)
    raw_sss = maxwell_filter(raw_ctf, origin=(0., 0., 0.04), ignore_ref=True)
    _assert_n_free(raw_sss, 70)
    raw_missing = raw.crop(0, 0.1, copy=True).load_data().pick_channels(
        [raw.ch_names[pi] for pi in pick_types(raw.info, meg=True,
                                               exclude=())[3:]])
    with warnings.catch_warnings(record=True) as w:
        maxwell_filter(raw_missing, cross_talk=ctc_fname)
    assert_equal(len(w), 1)
    assert_true('Not all cross-talk channels in raw' in str(w[0].message))
    # MEG channels not in cross-talk
    assert_raises(RuntimeError, maxwell_filter, raw_ctf, origin=(0., 0., 0.04),
                  cross_talk=ctc_fname)
예제 #20
0
def test_make_eeg_layout():
    """ Test creation of EEG layout """
    tmp_name = 'foo'
    lout_name = 'test_raw'
    lout_orig = read_layout(kind=lout_name, path=lout_path)
    info = Raw(fif_fname).info
    layout = make_eeg_layout(info)
    layout.save(op.join(tempdir, tmp_name + '.lout'))
    lout_new = read_layout(kind=tmp_name, path=tempdir, scale=False)
    assert_array_equal(lout_new.kind, tmp_name)
    assert_allclose(layout.pos, lout_new.pos, atol=0.1)
    assert_array_equal(lout_orig.names, lout_new.names)

    # Test input validation
    assert_raises(ValueError, make_eeg_layout, info, radius=-0.1)
    assert_raises(ValueError, make_eeg_layout, info, radius=0.6)
    assert_raises(ValueError, make_eeg_layout, info, width=-0.1)
    assert_raises(ValueError, make_eeg_layout, info, width=1.1)
    assert_raises(ValueError, make_eeg_layout, info, height=-0.1)
    assert_raises(ValueError, make_eeg_layout, info, height=1.1)

    bad_info = info.copy()
    bad_info['dig'] = None
    assert_raises(RuntimeError, make_eeg_layout, bad_info)
    bad_info['dig'] = []
    assert_raises(RuntimeError, make_eeg_layout, bad_info)
예제 #21
0
def perform_detrending(fname_raw, save=True):

    from mne.io import Raw
    from numpy import poly1d, polyfit

    fnraw = get_files_from_list(fname_raw)

    # loop across all filenames
    for fname in fnraw:

        # read data in
        raw = Raw(fname, preload=True)

        # get channels
        picks = mne.pick_types(raw.info, meg='mag', ref_meg=True, eeg=False, stim=False,
                               eog=False, exclude='bads')
        xval = np.arange(raw._data.shape[1])

        # loop over all channels
        for ipick in picks:
            coeff = polyfit(xval, raw._data[ipick, :], deg=1)
            trend = poly1d(coeff)
            raw._data[ipick, :] -= trend(xval)

    # save detrended data
    if save:
        fnout = fname_raw[:fname_raw.rfind('-raw.fif')] + ',dt-raw.fif'
        raw.save(fnout, overwrite=True)

    return raw
def test_ch_loc():
    """Test raw kit loc
    """
    raw_py = read_raw_kit(sqd_path, mrk_path, elp_path, hsp_path, stim='<')
    raw_bin = Raw(op.join(data_dir, 'test_bin_raw.fif'))

    ch_py = raw_py._raw_extras[0]['sensor_locs'][:, :5]
    # ch locs stored as m, not mm
    ch_py[:, :3] *= 1e3
    ch_sns = read_sns(op.join(data_dir, 'sns.txt'))
    assert_array_almost_equal(ch_py, ch_sns, 2)

    assert_array_almost_equal(raw_py.info['dev_head_t']['trans'],
                              raw_bin.info['dev_head_t']['trans'], 4)
    for py_ch, bin_ch in zip(raw_py.info['chs'], raw_bin.info['chs']):
        if bin_ch['ch_name'].startswith('MEG'):
            # the stored ch locs have more precision than the sns.txt
            assert_array_almost_equal(py_ch['loc'], bin_ch['loc'], decimal=2)

    # test when more than one marker file provided
    mrks = [mrk_path, mrk2_path, mrk3_path]
    read_raw_kit(sqd_path, mrks, elp_path, hsp_path, preload=False)
    # this dataset does not have the equivalent set of points :(
    raw_bin.info['dig'] = raw_bin.info['dig'][:8]
    raw_py.info['dig'] = raw_py.info['dig'][:8]
    assert_dig_allclose(raw_py.info, raw_bin.info)
예제 #23
0
def test_bads_reconstruction():
    """Test Maxwell filter reconstruction of bad channels"""
    with warnings.catch_warnings(record=True):  # maxshield
        raw = Raw(raw_fname, allow_maxshield=True).crop(0.0, 1.0, False)
    raw.info["bads"] = bads
    raw_sss = maxwell_filter(raw, origin=mf_head_origin, regularize=None, bad_condition="ignore")
    assert_meg_snr(raw_sss, Raw(sss_bad_recon_fname), 300.0)
예제 #24
0
def test_set_channel_types():
    """Test set_channel_types
    """
    raw = Raw(raw_fname)
    # Error Tests
    # Test channel name exists in ch_names
    mapping = {'EEG 160': 'EEG060'}
    assert_raises(ValueError, raw.set_channel_types, mapping)
    # Test change to illegal channel type
    mapping = {'EOG 061': 'xxx'}
    assert_raises(ValueError, raw.set_channel_types, mapping)
    # Test type change
    raw2 = Raw(raw_fname)
    raw2.info['bads'] = ['EEG 059', 'EEG 060', 'EOG 061']
    mapping = {'EEG 060': 'eog', 'EEG 059': 'ecg', 'EOG 061': 'seeg'}
    raw2.set_channel_types(mapping)
    info = raw2.info
    assert_true(info['chs'][374]['ch_name'] == 'EEG 060')
    assert_true(info['chs'][374]['kind'] == FIFF.FIFFV_EOG_CH)
    assert_true(info['chs'][374]['unit'] == FIFF.FIFF_UNIT_V)
    assert_true(info['chs'][374]['coil_type'] == FIFF.FIFFV_COIL_NONE)
    assert_true(info['chs'][373]['ch_name'] == 'EEG 059')
    assert_true(info['chs'][373]['kind'] == FIFF.FIFFV_ECG_CH)
    assert_true(info['chs'][373]['unit'] == FIFF.FIFF_UNIT_V)
    assert_true(info['chs'][373]['coil_type'] == FIFF.FIFFV_COIL_NONE)
    assert_true(info['chs'][375]['ch_name'] == 'EOG 061')
    assert_true(info['chs'][375]['kind'] == FIFF.FIFFV_SEEG_CH)
    assert_true(info['chs'][375]['unit'] == FIFF.FIFF_UNIT_V)
    assert_true(info['chs'][375]['coil_type'] == FIFF.FIFFV_COIL_EEG)
예제 #25
0
def test_io_complex():
    """Test IO with complex data types
    """
    rng = np.random.RandomState(0)
    tempdir = _TempDir()
    dtypes = [np.complex64, np.complex128]

    raw = Raw(fif_fname, preload=True)
    picks = np.arange(5)
    start, stop = raw.time_as_index([0, 5])

    data_orig, _ = raw[picks, start:stop]

    for di, dtype in enumerate(dtypes):
        imag_rand = np.array(1j * rng.randn(data_orig.shape[0], data_orig.shape[1]), dtype)

        raw_cp = raw.copy()
        raw_cp._data = np.array(raw_cp._data, dtype)
        raw_cp._data[picks, start:stop] += imag_rand
        # this should throw an error because it's complex
        with warnings.catch_warnings(record=True) as w:
            warnings.simplefilter("always")
            raw_cp.save(op.join(tempdir, "raw.fif"), picks, tmin=0, tmax=5, overwrite=True)
            # warning gets thrown on every instance b/c simplifilter('always')
            assert_equal(len(w), 1)

        raw2 = Raw(op.join(tempdir, "raw.fif"))
        raw2_data, _ = raw2[picks, :]
        n_samp = raw2_data.shape[1]
        assert_allclose(raw2_data[:, :n_samp], raw_cp._data[picks, :n_samp])
        # with preloading
        raw2 = Raw(op.join(tempdir, "raw.fif"), preload=True)
        raw2_data, _ = raw2[picks, :]
        n_samp = raw2_data.shape[1]
        assert_allclose(raw2_data[:, :n_samp], raw_cp._data[picks, :n_samp])
def test_calculate_chpi_positions():
    """Test calculation of cHPI positions
    """
    trans, rot, t = get_chpi_positions(pos_fname)
    with warnings.catch_warnings(record=True):
        raw = Raw(raw_fif_fname, allow_maxshield=True, preload=True)
    t -= raw.first_samp / raw.info['sfreq']
    trans_est, rot_est, t_est = _calculate_chpi_positions(raw, verbose='debug')
    _compare_positions((trans, rot, t), (trans_est, rot_est, t_est))

    # degenerate conditions
    raw_no_chpi = Raw(test_fif_fname)
    assert_raises(RuntimeError, _calculate_chpi_positions, raw_no_chpi)
    raw_bad = raw.copy()
    for d in raw_bad.info['dig']:
        if d['kind'] == FIFF.FIFFV_POINT_HPI:
            d['coord_frame'] = 999
            break
    assert_raises(RuntimeError, _calculate_chpi_positions, raw_bad)
    raw_bad = raw.copy()
    for d in raw_bad.info['dig']:
        if d['kind'] == FIFF.FIFFV_POINT_HPI:
            d['r'] = np.ones(3)
    raw_bad.crop(0, 1., copy=False)
    tempdir = _TempDir()
    log_file = op.join(tempdir, 'temp_log.txt')
    set_log_file(log_file, overwrite=True)
    try:
        _calculate_chpi_positions(raw_bad)
    finally:
        set_log_file()
    with open(log_file, 'r') as fid:
        for line in fid:
            assert_true('0/5 acceptable' in line)
def manual_filter():
    fname='ec_rest_before_tsss_mc_rsl.fif'
    raw = Raw(fname, preload=False)
    raw.preload_data() #  data becomes numpy.float64
    _mmap_raw(raw)

    copy = False  # filter in-place
    zero_phase = True
    n_jobs = 1
    picks = pick_types(raw.info, exclude=[], meg=True)

    x = raw._data  # pointer?

    Fs = 1000.
    Fp = 20.
    Fstop = 21.
    filter_length='10s'

    print('x.shape before prepping:', x.shape)
    h_fft, n_h, n_edge, orig_shape = _do_prep(Fs, Fp, Fstop, x, copy, picks,
                                              filter_length, zero_phase)
    print('x.shape after prepping:', x.shape)

    print('Before filtering:')
    print(x[0][:10])
    for p in picks:
        mangle_x(x[p])
        # _1d_overlap_filter(x[p], h_fft, n_h, n_edge, zero_phase,
        #                           dict(use_cuda=False))
    #x.shape = orig_shape
    print('After filtering:')
    print(x[0][:10])

    print('Data type:', raw._data[0][:5])
    print(type(raw._data))
예제 #28
0
def test_bads_reconstruction():
    """Test Maxwell filter reconstruction of bad channels"""
    with warnings.catch_warnings(record=True):  # maxshield
        raw = Raw(raw_fname, allow_maxshield=True).crop(0., 1., False)
    raw.info['bads'] = bads
    raw_sss = maxwell_filter(raw)
    _assert_snr(raw_sss, Raw(sss_bad_recon_fname), 300.)
예제 #29
0
def test_find_ecg():
    """Test find ECG peaks"""
    raw = Raw(raw_fname)

    # once with mag-trick
    # once with characteristic channel
    for ch_name in ['MEG 1531', None]:
        events, ch_ECG, average_pulse, ecg = find_ecg_events(
            raw, event_id=999, ch_name=None, return_ecg=True)
        assert_equal(len(raw.times), len(ecg))
        n_events = len(events)
        _, times = raw[0, :]
        assert_true(55 < average_pulse < 60)

    picks = pick_types(
        raw.info, meg='grad', eeg=False, stim=False,
        eog=False, ecg=True, emg=False, ref_meg=False,
        exclude='bads')

    raw.load_data()
    ecg_epochs = create_ecg_epochs(raw, picks=picks, keep_ecg=True)
    assert_equal(len(ecg_epochs.events), n_events)
    assert_true('ECG-SYN' not in raw.ch_names)
    assert_true('ECG-SYN' in ecg_epochs.ch_names)

    picks = pick_types(
        ecg_epochs.info, meg=False, eeg=False, stim=False,
        eog=False, ecg=True, emg=False, ref_meg=False,
        exclude='bads')
    assert_true(len(picks) == 1)
예제 #30
0
def test_cov_estimation_on_raw_segment():
    """Test estimation from raw on continuous recordings (typically empty room)
    """
    tempdir = _TempDir()
    raw = Raw(raw_fname, preload=False)
    cov = compute_raw_data_covariance(raw)
    cov_mne = read_cov(erm_cov_fname)
    assert_true(cov_mne.ch_names == cov.ch_names)
    assert_true(linalg.norm(cov.data - cov_mne.data, ord='fro')
                / linalg.norm(cov.data, ord='fro') < 1e-4)

    # test IO when computation done in Python
    cov.save(op.join(tempdir, 'test-cov.fif'))  # test saving
    cov_read = read_cov(op.join(tempdir, 'test-cov.fif'))
    assert_true(cov_read.ch_names == cov.ch_names)
    assert_true(cov_read.nfree == cov.nfree)
    assert_array_almost_equal(cov.data, cov_read.data)

    # test with a subset of channels
    picks = pick_channels(raw.ch_names, include=raw.ch_names[:5])
    cov = compute_raw_data_covariance(raw, picks=picks)
    assert_true(cov_mne.ch_names[:5] == cov.ch_names)
    assert_true(linalg.norm(cov.data - cov_mne.data[picks][:, picks],
                ord='fro') / linalg.norm(cov.data, ord='fro') < 1e-4)
    # make sure we get a warning with too short a segment
    raw_2 = raw.crop(0, 1)
    with warnings.catch_warnings(record=True) as w:
        warnings.simplefilter('always')
        cov = compute_raw_data_covariance(raw_2)
    assert_true(len(w) == 1)
예제 #31
0
def test_simulate_raw_sphere():
    """Test simulation of raw data with sphere model"""
    seed = 42
    raw, src, stc, trans, sphere = _get_data()
    assert_true(len(pick_types(raw.info, meg=False, ecg=True)) == 1)

    # head pos
    head_pos_sim = dict()
    # these will be at 1., 2., ... sec
    shifts = [[0.001, 0., -0.001], [-0.001, 0.001, 0.]]

    for time_key, shift in enumerate(shifts):
        # Create 4x4 matrix transform and normalize
        temp_trans = deepcopy(raw.info['dev_head_t'])
        temp_trans['trans'][:3, 3] += shift
        head_pos_sim[time_key + 1.] = temp_trans['trans']

    #
    # Test raw simulation with basic parameters
    #
    raw_sim = simulate_raw(raw, stc, trans, src, sphere, read_cov(cov_fname),
                           head_pos=head_pos_sim,
                           blink=True, ecg=True, random_state=seed)
    raw_sim_2 = simulate_raw(raw, stc, trans_fname, src_fname, sphere,
                             cov_fname, head_pos=head_pos_sim,
                             blink=True, ecg=True, random_state=seed)
    assert_array_equal(raw_sim_2[:][0], raw_sim[:][0])
    # Test IO on processed data
    tempdir = _TempDir()
    test_outname = op.join(tempdir, 'sim_test_raw.fif')
    raw_sim.save(test_outname)

    raw_sim_loaded = Raw(test_outname, preload=True, proj=False)
    assert_allclose(raw_sim_loaded[:][0], raw_sim[:][0], rtol=1e-6, atol=1e-20)
    del raw_sim, raw_sim_2
    # with no cov (no noise) but with artifacts, most time periods should match
    # but the EOG/ECG channels should not
    for ecg, eog in ((True, False), (False, True), (True, True)):
        raw_sim_3 = simulate_raw(raw, stc, trans, src, sphere,
                                 cov=None, head_pos=head_pos_sim,
                                 blink=eog, ecg=ecg, random_state=seed)
        raw_sim_4 = simulate_raw(raw, stc, trans, src, sphere,
                                 cov=None, head_pos=head_pos_sim,
                                 blink=False, ecg=False, random_state=seed)
        picks = np.arange(len(raw.ch_names))
        diff_picks = pick_types(raw.info, meg=False, ecg=ecg, eog=eog)
        these_picks = np.setdiff1d(picks, diff_picks)
        close = isclose(raw_sim_3[these_picks][0],
                        raw_sim_4[these_picks][0], atol=1e-20)
        assert_true(np.mean(close) > 0.7)
        far = ~isclose(raw_sim_3[diff_picks][0],
                       raw_sim_4[diff_picks][0], atol=1e-20)
        assert_true(np.mean(far) > 0.99)
    del raw_sim_3, raw_sim_4

    # make sure it works with EEG-only and MEG-only
    raw_sim_meg = simulate_raw(raw.copy().pick_types(meg=True, eeg=False),
                               stc, trans, src, sphere, cov=None,
                               ecg=True, blink=True, random_state=seed)
    raw_sim_eeg = simulate_raw(raw.copy().pick_types(meg=False, eeg=True),
                               stc, trans, src, sphere, cov=None,
                               ecg=True, blink=True, random_state=seed)
    raw_sim_meeg = simulate_raw(raw.copy().pick_types(meg=True, eeg=True),
                                stc, trans, src, sphere, cov=None,
                                ecg=True, blink=True, random_state=seed)
    assert_allclose(np.concatenate((raw_sim_meg[:][0], raw_sim_eeg[:][0])),
                    raw_sim_meeg[:][0], rtol=1e-7, atol=1e-20)
    del raw_sim_meg, raw_sim_eeg, raw_sim_meeg

    # check that different interpolations are similar given small movements
    raw_sim_cos = simulate_raw(raw, stc, trans, src, sphere,
                               head_pos=head_pos_sim,
                               random_state=seed)
    raw_sim_lin = simulate_raw(raw, stc, trans, src, sphere,
                               head_pos=head_pos_sim, interp='linear',
                               random_state=seed)
    assert_allclose(raw_sim_cos[:][0], raw_sim_lin[:][0],
                    rtol=1e-5, atol=1e-20)
    del raw_sim_cos, raw_sim_lin

    # Make impossible transform (translate up into helmet) and ensure failure
    head_pos_sim_err = deepcopy(head_pos_sim)
    head_pos_sim_err[1.][2, 3] -= 0.1  # z trans upward 10cm
    assert_raises(RuntimeError, simulate_raw, raw, stc, trans, src, sphere,
                  ecg=False, blink=False, head_pos=head_pos_sim_err)
    assert_raises(RuntimeError, simulate_raw, raw, stc, trans, src,
                  bem_fname, ecg=False, blink=False,
                  head_pos=head_pos_sim_err)
    # other degenerate conditions
    assert_raises(TypeError, simulate_raw, 'foo', stc, trans, src, sphere)
    assert_raises(TypeError, simulate_raw, raw, 'foo', trans, src, sphere)
    assert_raises(ValueError, simulate_raw, raw, stc.copy().crop(0, 0),
                  trans, src, sphere)
    stc_bad = stc.copy()
    stc_bad.tstep += 0.1
    assert_raises(ValueError, simulate_raw, raw, stc_bad, trans, src, sphere)
    assert_raises(RuntimeError, simulate_raw, raw, stc, trans, src, sphere,
                  chpi=True)  # no cHPI info
    assert_raises(ValueError, simulate_raw, raw, stc, trans, src, sphere,
                  interp='foo')
    assert_raises(TypeError, simulate_raw, raw, stc, trans, src, sphere,
                  head_pos=1.)
    assert_raises(RuntimeError, simulate_raw, raw, stc, trans, src, sphere,
                  head_pos=pos_fname)  # ends up with t>t_end
    head_pos_sim_err = deepcopy(head_pos_sim)
    head_pos_sim_err[-1.] = head_pos_sim_err[1.]  # negative time
    assert_raises(RuntimeError, simulate_raw, raw, stc, trans, src, sphere,
                  head_pos=head_pos_sim_err)
    raw_bad = raw.copy()
    raw_bad.info['dig'] = None
    assert_raises(RuntimeError, simulate_raw, raw_bad, stc, trans, src, sphere,
                  blink=True)
import numpy as np

import mne
from mne.io import Raw
from mne.preprocessing import ICA, create_ecg_epochs
from mne.datasets import sample

print(__doc__)

###############################################################################
# Setup paths and prepare epochs data

data_path = sample.data_path()
raw_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw.fif'

raw = Raw(raw_fname, preload=True)
raw.filter(1, 30, method='iir')
picks = mne.pick_types(raw.info,
                       meg=True,
                       eeg=False,
                       eog=True,
                       ecg=True,
                       stim=False,
                       exclude='bads')

# longer + more epochs for more artifact exposure
events = mne.find_events(raw, stim_channel='STI 014')
event_id = dict(aud_l=1, aud_r=2, vis_l=3, vis_r=4)
reject = dict(eog=250e-6)
tmin, tmax = -0.5, 0.5
epochs = mne.Epochs(raw,
예제 #33
0
def test_proj():
    """Test SSP proj operations
    """
    tempdir = _TempDir()
    for proj in [True, False]:
        raw = Raw(fif_fname, preload=False, proj=proj)
        assert_true(all(p['active'] == proj for p in raw.info['projs']))

        data, times = raw[0:2, :]
        data1, times1 = raw[0:2]
        assert_array_equal(data, data1)
        assert_array_equal(times, times1)

        # test adding / deleting proj
        if proj:
            assert_raises(ValueError, raw.add_proj, [],
                          {'remove_existing': True})
            assert_raises(ValueError, raw.del_proj, 0)
        else:
            projs = deepcopy(raw.info['projs'])
            n_proj = len(raw.info['projs'])
            raw.del_proj(0)
            assert_equal(len(raw.info['projs']), n_proj - 1)
            raw.add_proj(projs, remove_existing=False)
            assert_equal(len(raw.info['projs']), 2 * n_proj - 1)
            raw.add_proj(projs, remove_existing=True)
            assert_equal(len(raw.info['projs']), n_proj)

    # test apply_proj() with and without preload
    for preload in [True, False]:
        raw = Raw(fif_fname, preload=preload, proj=False)
        data, times = raw[:, 0:2]
        raw.apply_proj()
        data_proj_1 = np.dot(raw._projector, data)

        # load the file again without proj
        raw = Raw(fif_fname, preload=preload, proj=False)

        # write the file with proj. activated, make sure proj has been applied
        raw.save(op.join(tempdir, 'raw.fif'), proj=True, overwrite=True)
        raw2 = Raw(op.join(tempdir, 'raw.fif'), proj=False)
        data_proj_2, _ = raw2[:, 0:2]
        assert_allclose(data_proj_1, data_proj_2)
        assert_true(all(p['active'] for p in raw2.info['projs']))

        # read orig file with proj. active
        raw2 = Raw(fif_fname, preload=preload, proj=True)
        data_proj_2, _ = raw2[:, 0:2]
        assert_allclose(data_proj_1, data_proj_2)
        assert_true(all(p['active'] for p in raw2.info['projs']))

        # test that apply_proj works
        raw.apply_proj()
        data_proj_2, _ = raw[:, 0:2]
        assert_allclose(data_proj_1, data_proj_2)
        assert_allclose(data_proj_2, np.dot(raw._projector, data_proj_2))

    tempdir = _TempDir()
    out_fname = op.join(tempdir, 'test_raw.fif')
    raw = read_raw_fif(test_fif_fname, preload=True).crop(0, 0.002, copy=False)
    raw.pick_types(meg=False, eeg=True)
    raw.info['projs'] = [raw.info['projs'][-1]]
    raw._data.fill(0)
    raw._data[-1] = 1.
    raw.save(out_fname)
    raw = read_raw_fif(out_fname, proj=True, preload=False)
    assert_allclose(raw[:, :][0][:1], raw[0, :][0])
예제 #34
0
def test_movement_compensation():
    """Test movement compensation"""
    lims = (0, 8)
    with warnings.catch_warnings(record=True):  # maxshield
        raw = Raw(raw_fname, allow_maxshield=True, preload=True).crop(*lims)
    head_pos = read_head_pos(pos_fname)

    #
    # Movement compensation, no regularization, no tSSS
    #
    raw_sss = maxwell_filter(raw,
                             head_pos=head_pos,
                             origin=mf_head_origin,
                             regularize=None,
                             bad_condition='ignore')
    assert_meg_snr(raw_sss,
                   Raw(sss_movecomp_fname).crop(*lims),
                   4.6,
                   12.4,
                   chpi_med_tol=58)

    #
    # Movement compensation,    regularization, no tSSS
    #
    raw_sss = maxwell_filter(raw, head_pos=head_pos, origin=mf_head_origin)
    assert_meg_snr(raw_sss,
                   Raw(sss_movecomp_reg_in_fname).crop(*lims),
                   0.7,
                   1.9,
                   chpi_med_tol=121)

    #
    # Movement compensation,    regularization,    tSSS at the end
    #
    raw_nohpi = filter_chpi(raw.copy())
    with warnings.catch_warnings(record=True) as w:  # untested feature
        raw_sss_mv = maxwell_filter(raw_nohpi,
                                    head_pos=head_pos,
                                    st_duration=4.,
                                    origin=mf_head_origin,
                                    st_fixed=False)
    assert_equal(len(w), 1)
    assert_true('is untested' in str(w[0].message))
    # Neither match is particularly good because our algorithm actually differs
    assert_meg_snr(raw_sss_mv,
                   Raw(sss_movecomp_reg_in_st4s_fname).crop(*lims), 0.6, 1.3)
    tSSS_fname = op.join(sss_path, 'test_move_anon_st4s_raw_sss.fif')
    assert_meg_snr(raw_sss_mv,
                   Raw(tSSS_fname).crop(*lims),
                   0.6,
                   1.0,
                   chpi_med_tol=None)
    assert_meg_snr(Raw(sss_movecomp_reg_in_st4s_fname),
                   Raw(tSSS_fname),
                   0.8,
                   1.0,
                   chpi_med_tol=None)

    #
    # Movement compensation,    regularization,    tSSS at the beginning
    #
    raw_sss_mc = maxwell_filter(raw_nohpi,
                                head_pos=head_pos,
                                st_duration=4.,
                                origin=mf_head_origin)
    assert_meg_snr(raw_sss_mc,
                   Raw(tSSS_fname).crop(*lims),
                   0.6,
                   1.0,
                   chpi_med_tol=None)
    assert_meg_snr(raw_sss_mc, raw_sss_mv, 0.6, 1.4)

    # some degenerate cases
    with warnings.catch_warnings(record=True):  # maxshield
        raw_erm = Raw(erm_fname, allow_maxshield=True)
    assert_raises(ValueError,
                  maxwell_filter,
                  raw_erm,
                  coord_frame='meg',
                  head_pos=head_pos)  # can't do ERM file
    head_pos_bad = head_pos[:, :9]
    assert_raises(ValueError, maxwell_filter, raw,
                  head_pos=head_pos_bad)  # bad shape
    head_pos_bad = 'foo'
    assert_raises(TypeError, maxwell_filter, raw,
                  head_pos=head_pos_bad)  # bad type
    head_pos_bad = head_pos[::-1]
    assert_raises(ValueError, maxwell_filter, raw, head_pos=head_pos_bad)
    head_pos_bad = head_pos.copy()
    head_pos_bad[0, 0] = 1.  # bad time given the first_samp...
    assert_raises(ValueError, maxwell_filter, raw, head_pos=head_pos_bad)
예제 #35
0
def test_has_eeg_average_ref_proj():
    """Test checking whether an EEG average reference exists"""
    assert_true(not _has_eeg_average_ref_proj([]))

    raw = Raw(raw_fname, add_eeg_ref=True, preload=False)
    assert_true(_has_eeg_average_ref_proj(raw.info['projs']))
예제 #36
0
def test_compute_proj_raw():
    """Test SSP computation on raw"""
    tempdir = _TempDir()
    # Test that the raw projectors work
    raw_time = 2.5  # Do shorter amount for speed
    raw = Raw(raw_fname, preload=True).crop(0, raw_time, False)
    for ii in (0.25, 0.5, 1, 2):
        with warnings.catch_warnings(record=True) as w:
            warnings.simplefilter('always')
            projs = compute_proj_raw(raw, duration=ii - 0.1, stop=raw_time,
                                     n_grad=1, n_mag=1, n_eeg=0)
            assert_true(len(w) == 1)

        # test that you can compute the projection matrix
        projs = activate_proj(projs)
        proj, nproj, U = make_projector(projs, raw.ch_names, bads=[])

        assert_true(nproj == 2)
        assert_true(U.shape[1] == 2)

        # test that you can save them
        raw.info['projs'] += projs
        raw.save(op.join(tempdir, 'foo_%d_raw.fif' % ii), overwrite=True)

    # Test that purely continuous (no duration) raw projection works
    with warnings.catch_warnings(record=True) as w:
        warnings.simplefilter('always')
        projs = compute_proj_raw(raw, duration=None, stop=raw_time,
                                 n_grad=1, n_mag=1, n_eeg=0)
        assert_equal(len(w), 1)

    # test that you can compute the projection matrix
    projs = activate_proj(projs)
    proj, nproj, U = make_projector(projs, raw.ch_names, bads=[])

    assert_true(nproj == 2)
    assert_true(U.shape[1] == 2)

    # test that you can save them
    raw.info['projs'] += projs
    raw.save(op.join(tempdir, 'foo_rawproj_continuous_raw.fif'))

    # test resampled-data projector, upsampling instead of downsampling
    # here to save an extra filtering (raw would have to be LP'ed to be equiv)
    raw_resamp = cp.deepcopy(raw)
    raw_resamp.resample(raw.info['sfreq'] * 2, n_jobs=2)
    with warnings.catch_warnings(record=True) as w:
        warnings.simplefilter('always')
        projs = compute_proj_raw(raw_resamp, duration=None, stop=raw_time,
                                 n_grad=1, n_mag=1, n_eeg=0)
    projs = activate_proj(projs)
    proj_new, _, _ = make_projector(projs, raw.ch_names, bads=[])
    assert_array_almost_equal(proj_new, proj, 4)

    # test with bads
    raw.load_bad_channels(bads_fname)  # adds 2 bad mag channels
    with warnings.catch_warnings(record=True) as w:
        warnings.simplefilter('always')
        projs = compute_proj_raw(raw, n_grad=0, n_mag=0, n_eeg=1)

    # test that bad channels can be excluded
    proj, nproj, U = make_projector(projs, raw.ch_names,
                                    bads=raw.ch_names)
    assert_array_almost_equal(proj, np.eye(len(raw.ch_names)))
fname_event = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw-eve.fif'
label_name = 'Aud-lh'
fname_label = data_path + '/MEG/sample/labels/%s.label' % label_name

event_id, tmin, tmax = 1, -0.2, 0.5

# Using the same inverse operator when inspecting single trials Vs. evoked
snr = 3.0  # Standard assumption for average data but using it for single trial
lambda2 = 1.0 / snr**2

method = "dSPM"  # use dSPM method (could also be MNE or sLORETA)

# Load data
inverse_operator = read_inverse_operator(fname_inv)
label = mne.read_label(fname_label)
raw = Raw(fname_raw)
events = mne.read_events(fname_event)

# Set up pick list
include = []

# Add a bad channel
raw.info['bads'] += ['EEG 053']  # bads + 1 more

# pick MEG channels
picks = mne.pick_types(raw.info,
                       meg=True,
                       eeg=False,
                       stim=False,
                       eog=True,
                       include=include,
예제 #38
0
def test_shielding_factor():
    """Test Maxwell filter shielding factor using empty room"""
    with warnings.catch_warnings(record=True):  # maxshield
        raw_erm = Raw(erm_fname, allow_maxshield=True, preload=True)
    picks = pick_types(raw_erm.info, meg='mag')
    erm_power = raw_erm[picks][0].ravel()
    erm_power = np.sqrt(np.sum(erm_power * erm_power))

    # Vanilla SSS (second value would be for meg=True instead of meg='mag')
    _assert_shielding(Raw(sss_erm_std_fname), erm_power, 10)  # 1.5)
    raw_sss = maxwell_filter(raw_erm, coord_frame='meg', regularize=None)
    _assert_shielding(raw_sss, erm_power, 12)  # 1.5)

    # Fine cal
    _assert_shielding(Raw(sss_erm_fine_cal_fname), erm_power, 12)  # 2.0)
    raw_sss = maxwell_filter(raw_erm,
                             coord_frame='meg',
                             regularize=None,
                             origin=mf_meg_origin,
                             calibration=fine_cal_fname)
    _assert_shielding(raw_sss, erm_power, 12)  # 2.0)

    # Crosstalk
    _assert_shielding(Raw(sss_erm_ctc_fname), erm_power, 12)  # 2.1)
    raw_sss = maxwell_filter(raw_erm,
                             coord_frame='meg',
                             regularize=None,
                             origin=mf_meg_origin,
                             cross_talk=ctc_fname)
    _assert_shielding(raw_sss, erm_power, 12)  # 2.1)

    # Fine cal + Crosstalk
    raw_sss = maxwell_filter(raw_erm,
                             coord_frame='meg',
                             regularize=None,
                             calibration=fine_cal_fname,
                             origin=mf_meg_origin,
                             cross_talk=ctc_fname)
    _assert_shielding(raw_sss, erm_power, 13)  # 2.2)

    # tSSS
    _assert_shielding(Raw(sss_erm_st_fname), erm_power, 37)  # 5.8)
    raw_sss = maxwell_filter(raw_erm,
                             coord_frame='meg',
                             regularize=None,
                             origin=mf_meg_origin,
                             st_duration=1.)
    _assert_shielding(raw_sss, erm_power, 37)  # 5.8)

    # Crosstalk + tSSS
    raw_sss = maxwell_filter(raw_erm,
                             coord_frame='meg',
                             regularize=None,
                             cross_talk=ctc_fname,
                             origin=mf_meg_origin,
                             st_duration=1.)
    _assert_shielding(raw_sss, erm_power, 38)  # 5.91)

    # Fine cal + tSSS
    raw_sss = maxwell_filter(raw_erm,
                             coord_frame='meg',
                             regularize=None,
                             calibration=fine_cal_fname,
                             origin=mf_meg_origin,
                             st_duration=1.)
    _assert_shielding(raw_sss, erm_power, 38)  # 5.98)

    # Fine cal + Crosstalk + tSSS
    _assert_shielding(Raw(sss_erm_st1FineCalCrossTalk_fname), erm_power,
                      39)  # 6.07)
    raw_sss = maxwell_filter(raw_erm,
                             coord_frame='meg',
                             regularize=None,
                             calibration=fine_cal_fname,
                             origin=mf_meg_origin,
                             cross_talk=ctc_fname,
                             st_duration=1.)
    _assert_shielding(raw_sss, erm_power, 39)  # 6.05)

    # Fine cal + Crosstalk + tSSS + Reg-in
    _assert_shielding(Raw(sss_erm_st1FineCalCrossTalkRegIn_fname), erm_power,
                      57)  # 6.97)
    raw_sss = maxwell_filter(raw_erm,
                             calibration=fine_cal_fname,
                             cross_talk=ctc_fname,
                             st_duration=1.,
                             origin=mf_meg_origin,
                             coord_frame='meg',
                             regularize='in')
    _assert_shielding(raw_sss, erm_power, 53)  # 6.64)
    raw_sss = maxwell_filter(raw_erm,
                             calibration=fine_cal_fname,
                             cross_talk=ctc_fname,
                             st_duration=1.,
                             coord_frame='meg',
                             regularize='in')
    _assert_shielding(raw_sss, erm_power, 58)  # 7.0)
    raw_sss = maxwell_filter(raw_erm,
                             calibration=fine_cal_fname_3d,
                             cross_talk=ctc_fname,
                             st_duration=1.,
                             coord_frame='meg',
                             regularize='in')

    # Our 3D cal has worse defaults for this ERM than the 1D file
    _assert_shielding(raw_sss, erm_power, 54)
    # Show it by rewriting the 3D as 1D and testing it
    temp_dir = _TempDir()
    temp_fname = op.join(temp_dir, 'test_cal.dat')
    with open(fine_cal_fname_3d, 'r') as fid:
        with open(temp_fname, 'w') as fid_out:
            for line in fid:
                fid_out.write(' '.join(line.strip().split(' ')[:14]) + '\n')
    raw_sss = maxwell_filter(raw_erm,
                             calibration=temp_fname,
                             cross_talk=ctc_fname,
                             st_duration=1.,
                             coord_frame='meg',
                             regularize='in')
    # Our 3D cal has worse defaults for this ERM than the 1D file
    _assert_shielding(raw_sss, erm_power, 44)
예제 #39
0
def test_basic():
    """Test Maxwell filter basic version"""
    # Load testing data (raw, SSS std origin, SSS non-standard origin)
    with warnings.catch_warnings(record=True):  # maxshield
        raw = Raw(raw_fname, allow_maxshield=True).crop(0., 1., False)
        raw_err = Raw(raw_fname, proj=True, allow_maxshield=True)
        raw_erm = Raw(erm_fname, allow_maxshield=True)
    assert_raises(RuntimeError, maxwell_filter, raw_err)
    assert_raises(TypeError, maxwell_filter, 1.)  # not a raw
    assert_raises(ValueError, maxwell_filter, raw, int_order=20)  # too many

    n_int_bases = int_order**2 + 2 * int_order
    n_ext_bases = ext_order**2 + 2 * ext_order
    nbases = n_int_bases + n_ext_bases

    # Check number of bases computed correctly
    assert_equal(_get_n_moments([int_order, ext_order]).sum(), nbases)

    # Test SSS computation at the standard head origin
    assert_equal(len(raw.info['projs']), 12)  # 11 MEG projs + 1 AVG EEG
    raw_sss = maxwell_filter(raw,
                             origin=mf_head_origin,
                             regularize=None,
                             bad_condition='ignore')
    assert_equal(len(raw_sss.info['projs']), 1)  # avg EEG
    assert_equal(raw_sss.info['projs'][0]['desc'], 'Average EEG reference')
    assert_meg_snr(raw_sss, Raw(sss_std_fname), 200., 1000.)
    py_cal = raw_sss.info['proc_history'][0]['max_info']['sss_cal']
    assert_equal(len(py_cal), 0)
    py_ctc = raw_sss.info['proc_history'][0]['max_info']['sss_ctc']
    assert_equal(len(py_ctc), 0)
    py_st = raw_sss.info['proc_history'][0]['max_info']['max_st']
    assert_equal(len(py_st), 0)
    assert_raises(RuntimeError, maxwell_filter, raw_sss)

    # Test SSS computation at non-standard head origin
    raw_sss = maxwell_filter(raw,
                             origin=[0., 0.02, 0.02],
                             regularize=None,
                             bad_condition='ignore')
    assert_meg_snr(raw_sss, Raw(sss_nonstd_fname), 250., 700.)

    # Test SSS computation at device origin
    sss_erm_std = Raw(sss_erm_std_fname)
    raw_sss = maxwell_filter(raw_erm,
                             coord_frame='meg',
                             origin=mf_meg_origin,
                             regularize=None,
                             bad_condition='ignore')
    assert_meg_snr(raw_sss, sss_erm_std, 100., 900.)
    for key in ('job', 'frame'):
        vals = [
            x.info['proc_history'][0]['max_info']['sss_info'][key]
            for x in [raw_sss, sss_erm_std]
        ]
        assert_equal(vals[0], vals[1])

    # Check against SSS functions from proc_history
    sss_info = raw_sss.info['proc_history'][0]['max_info']
    assert_equal(_get_n_moments(int_order),
                 proc_history._get_sss_rank(sss_info))

    # Degenerate cases
    raw_bad = raw.copy()
    raw_bad.comp = True
    assert_raises(RuntimeError, maxwell_filter, raw_bad)
    del raw_bad
    assert_raises(ValueError, maxwell_filter, raw, coord_frame='foo')
    assert_raises(ValueError, maxwell_filter, raw, origin='foo')
    assert_raises(ValueError, maxwell_filter, raw, origin=[0] * 4)
예제 #40
0
def test_other_systems():
    """Test Maxwell filtering on KIT, BTI, and CTF files
    """
    io_dir = op.join(op.dirname(__file__), '..', '..', 'io')

    # KIT
    kit_dir = op.join(io_dir, 'kit', 'tests', 'data')
    sqd_path = op.join(kit_dir, 'test.sqd')
    mrk_path = op.join(kit_dir, 'test_mrk.sqd')
    elp_path = op.join(kit_dir, 'test_elp.txt')
    hsp_path = op.join(kit_dir, 'test_hsp.txt')
    raw_kit = read_raw_kit(sqd_path, mrk_path, elp_path, hsp_path)
    with warnings.catch_warnings(record=True):  # head fit
        assert_raises(RuntimeError, maxwell_filter, raw_kit)
    raw_sss = maxwell_filter(raw_kit, origin=(0., 0., 0.04), ignore_ref=True)
    _assert_n_free(raw_sss, 65, 65)
    # XXX this KIT origin fit is terrible! Eventually we should get a
    # corrected HSP file with proper coverage
    with warnings.catch_warnings(record=True):
        with catch_logging() as log_file:
            assert_raises(RuntimeError,
                          maxwell_filter,
                          raw_kit,
                          ignore_ref=True,
                          regularize=None)  # bad condition
            raw_sss = maxwell_filter(raw_kit,
                                     origin='auto',
                                     ignore_ref=True,
                                     bad_condition='warning',
                                     verbose='warning')
    log_file = log_file.getvalue()
    assert_true('badly conditioned' in log_file)
    assert_true('more than 20 mm from' in log_file)
    # fits can differ slightly based on scipy version, so be lenient here
    _assert_n_free(raw_sss, 28, 34)  # bad origin == brutal reg
    # Let's set the origin
    with warnings.catch_warnings(record=True):
        with catch_logging() as log_file:
            raw_sss = maxwell_filter(raw_kit,
                                     origin=(0., 0., 0.04),
                                     ignore_ref=True,
                                     bad_condition='warning',
                                     regularize=None,
                                     verbose='warning')
    log_file = log_file.getvalue()
    assert_true('badly conditioned' in log_file)
    _assert_n_free(raw_sss, 80)
    # Now with reg
    with warnings.catch_warnings(record=True):
        with catch_logging() as log_file:
            raw_sss = maxwell_filter(raw_kit,
                                     origin=(0., 0., 0.04),
                                     ignore_ref=True,
                                     verbose=True)
    log_file = log_file.getvalue()
    assert_true('badly conditioned' not in log_file)
    _assert_n_free(raw_sss, 65)

    # BTi
    bti_dir = op.join(io_dir, 'bti', 'tests', 'data')
    bti_pdf = op.join(bti_dir, 'test_pdf_linux')
    bti_config = op.join(bti_dir, 'test_config_linux')
    bti_hs = op.join(bti_dir, 'test_hs_linux')
    with warnings.catch_warnings(record=True):  # weght table
        raw_bti = read_raw_bti(bti_pdf, bti_config, bti_hs, preload=False)
    raw_sss = maxwell_filter(raw_bti)
    _assert_n_free(raw_sss, 70)

    # CTF
    fname_ctf_raw = op.join(io_dir, 'tests', 'data', 'test_ctf_comp_raw.fif')
    raw_ctf = Raw(fname_ctf_raw, compensation=2)
    assert_raises(RuntimeError, maxwell_filter, raw_ctf)  # compensated
    raw_ctf = Raw(fname_ctf_raw)
    assert_raises(ValueError, maxwell_filter, raw_ctf)  # cannot fit headshape
    raw_sss = maxwell_filter(raw_ctf, origin=(0., 0., 0.04))
    _assert_n_free(raw_sss, 68)
    raw_sss = maxwell_filter(raw_ctf, origin=(0., 0., 0.04), ignore_ref=True)
    _assert_n_free(raw_sss, 70)
예제 #41
0
def test_ica_full_data_recovery():
    """Test recovery of full data when no source is rejected"""
    # Most basic recovery
    raw = Raw(raw_fname).crop(0.5, stop, copy=False)
    raw.load_data()
    events = read_events(event_name)
    picks = pick_types(raw.info,
                       meg=True,
                       stim=False,
                       ecg=False,
                       eog=False,
                       exclude='bads')[:10]
    with warnings.catch_warnings(record=True):  # bad proj
        epochs = Epochs(raw,
                        events[:4],
                        event_id,
                        tmin,
                        tmax,
                        picks=picks,
                        baseline=(None, 0),
                        preload=True)
    evoked = epochs.average()
    n_channels = 5
    data = raw._data[:n_channels].copy()
    data_epochs = epochs.get_data()
    data_evoked = evoked.data
    for method in ['fastica']:
        stuff = [(2, n_channels, True), (2, n_channels // 2, False)]
        for n_components, n_pca_components, ok in stuff:
            ica = ICA(n_components=n_components,
                      max_pca_components=n_pca_components,
                      n_pca_components=n_pca_components,
                      method=method,
                      max_iter=1)
            with warnings.catch_warnings(record=True):
                ica.fit(raw, picks=list(range(n_channels)))
            raw2 = ica.apply(raw.copy(), exclude=[])
            if ok:
                assert_allclose(data[:n_channels],
                                raw2._data[:n_channels],
                                rtol=1e-10,
                                atol=1e-15)
            else:
                diff = np.abs(data[:n_channels] - raw2._data[:n_channels])
                assert_true(np.max(diff) > 1e-14)

            ica = ICA(n_components=n_components,
                      max_pca_components=n_pca_components,
                      n_pca_components=n_pca_components)
            with warnings.catch_warnings(record=True):
                ica.fit(epochs, picks=list(range(n_channels)))
            epochs2 = ica.apply(epochs.copy(), exclude=[])
            data2 = epochs2.get_data()[:, :n_channels]
            if ok:
                assert_allclose(data_epochs[:, :n_channels],
                                data2,
                                rtol=1e-10,
                                atol=1e-15)
            else:
                diff = np.abs(data_epochs[:, :n_channels] - data2)
                assert_true(np.max(diff) > 1e-14)

            evoked2 = ica.apply(evoked.copy(), exclude=[])
            data2 = evoked2.data[:n_channels]
            if ok:
                assert_allclose(data_evoked[:n_channels],
                                data2,
                                rtol=1e-10,
                                atol=1e-15)
            else:
                diff = np.abs(evoked.data[:n_channels] - data2)
                assert_true(np.max(diff) > 1e-14)
    assert_raises(ValueError, ICA, method='pizza-decomposision')
from mne.datasets import sample
from mne.io import Raw
from mne.minimum_norm import apply_inverse_raw, read_inverse_operator

data_path = sample.data_path()
fname_inv = data_path + '/MEG/sample/sample_audvis-meg-oct-6-meg-inv.fif'
fname_raw = data_path + '/MEG/sample/sample_audvis_raw.fif'
label_name = 'Aud-lh'
fname_label = data_path + '/MEG/sample/labels/%s.label' % label_name

snr = 1.0  # use smaller SNR for raw data
lambda2 = 1.0 / snr**2
method = "sLORETA"  # use sLORETA method (could also be MNE or dSPM)

# Load data
raw = Raw(fname_raw)
inverse_operator = read_inverse_operator(fname_inv)
label = mne.read_label(fname_label)

start, stop = raw.time_as_index([0, 15])  # read the first 15s of data

# Compute inverse solution
stc = apply_inverse_raw(raw,
                        inverse_operator,
                        lambda2,
                        method,
                        label,
                        start,
                        stop,
                        pick_ori=None)
예제 #43
0
def test_ica_core():
    """Test ICA on raw and epochs"""
    raw = Raw(raw_fname).crop(1.5, stop, copy=False)
    raw.load_data()
    picks = pick_types(raw.info,
                       meg=True,
                       stim=False,
                       ecg=False,
                       eog=False,
                       exclude='bads')
    # XXX. The None cases helped revealing bugs but are time consuming.
    test_cov = read_cov(test_cov_name)
    events = read_events(event_name)
    picks = pick_types(raw.info,
                       meg=True,
                       stim=False,
                       ecg=False,
                       eog=False,
                       exclude='bads')
    epochs = Epochs(raw,
                    events[:4],
                    event_id,
                    tmin,
                    tmax,
                    picks=picks,
                    baseline=(None, 0),
                    preload=True)
    noise_cov = [None, test_cov]
    # removed None cases to speed up...
    n_components = [2, 1.0]  # for future dbg add cases
    max_pca_components = [3]
    picks_ = [picks]
    methods = ['fastica']
    iter_ica_params = product(noise_cov, n_components, max_pca_components,
                              picks_, methods)

    # # test init catchers
    assert_raises(ValueError, ICA, n_components=3, max_pca_components=2)
    assert_raises(ValueError, ICA, n_components=2.3, max_pca_components=2)

    # test essential core functionality
    for n_cov, n_comp, max_n, pcks, method in iter_ica_params:
        # Test ICA raw
        ica = ICA(noise_cov=n_cov,
                  n_components=n_comp,
                  max_pca_components=max_n,
                  n_pca_components=max_n,
                  random_state=0,
                  method=method,
                  max_iter=1)
        assert_raises(ValueError, ica.__contains__, 'mag')

        print(ica)  # to test repr

        # test fit checker
        assert_raises(RuntimeError, ica.get_sources, raw)
        assert_raises(RuntimeError, ica.get_sources, epochs)

        # test decomposition
        with warnings.catch_warnings(record=True):
            ica.fit(raw, picks=pcks, start=start, stop=stop)
            repr(ica)  # to test repr
        assert_true('mag' in ica)  # should now work without error

        # test re-fit
        unmixing1 = ica.unmixing_matrix_
        with warnings.catch_warnings(record=True):
            ica.fit(raw, picks=pcks, start=start, stop=stop)
        assert_array_almost_equal(unmixing1, ica.unmixing_matrix_)

        sources = ica.get_sources(raw)[:, :][0]
        assert_true(sources.shape[0] == ica.n_components_)

        # test preload filter
        raw3 = raw.copy()
        raw3.preload = False
        assert_raises(ValueError, ica.apply, raw3, include=[1, 2])

        #######################################################################
        # test epochs decomposition
        ica = ICA(noise_cov=n_cov,
                  n_components=n_comp,
                  max_pca_components=max_n,
                  n_pca_components=max_n,
                  random_state=0)
        with warnings.catch_warnings(record=True):
            ica.fit(epochs, picks=picks)
        data = epochs.get_data()[:, 0, :]
        n_samples = np.prod(data.shape)
        assert_equal(ica.n_samples_, n_samples)
        print(ica)  # to test repr

        sources = ica.get_sources(epochs).get_data()
        assert_true(sources.shape[1] == ica.n_components_)

        assert_raises(ValueError,
                      ica.score_sources,
                      epochs,
                      target=np.arange(1))

        # test preload filter
        epochs3 = epochs.copy()
        epochs3.preload = False
        assert_raises(ValueError, ica.apply, epochs3, include=[1, 2])

    # test for bug with whitener updating
    _pre_whitener = ica._pre_whitener.copy()
    epochs._data[:, 0, 10:15] *= 1e12
    ica.apply(epochs.copy())
    assert_array_equal(_pre_whitener, ica._pre_whitener)

    # test expl. var threshold leading to empty sel
    ica.n_components = 0.1
    assert_raises(RuntimeError, ica.fit, epochs)

    offender = 1, 2, 3,
    assert_raises(ValueError, ica.get_sources, offender)
    assert_raises(ValueError, ica.fit, offender)
    assert_raises(ValueError, ica.apply, offender)
예제 #44
0
파일: filter_ICA.py 프로젝트: ahoejlund/CAA
from my_settings import *

subject = sys.argv[1]

import matplotlib
matplotlib.use('Agg')

# SETTINGS
n_jobs = 1
l_freq, h_freq = 1, 98  # High and low frequency setting for the band pass
n_freq = 50  # notch filter frequency
decim = 7  # decim value


raw = Raw(maxfiltered_folder + "%s_data_mc_raw_tsss.fif" % subject,
          preload=True)
raw.del_proj(0)
raw.drop_channels(raw.info["bads"])
raw.add_eeg_average_proj()

raw.notch_filter(n_freq, n_jobs=n_jobs)
raw.filter(l_freq, h_freq, n_jobs=n_jobs)

raw.save(save_folder + "%s_filtered_data_mc_raw_tsss.fif" % subject,
         overwrite=True)

# ICA Part
ica = ICA(n_components=0.95, method='fastica', max_iter=256)

picks = mne.pick_types(raw.info, meg=True, eeg=True,
                       stim=False, exclude='bads')
예제 #45
0
def test_render_report():
    """Test rendering -*.fif files for mne report.
    """
    tempdir = _TempDir()
    raw_fname_new = op.join(tempdir, 'temp_raw.fif')
    event_fname_new = op.join(tempdir, 'temp_raw-eve.fif')
    cov_fname_new = op.join(tempdir, 'temp_raw-cov.fif')
    fwd_fname_new = op.join(tempdir, 'temp_raw-fwd.fif')
    inv_fname_new = op.join(tempdir, 'temp_raw-inv.fif')
    for a, b in [[raw_fname, raw_fname_new], [event_fname, event_fname_new],
                 [cov_fname, cov_fname_new], [fwd_fname, fwd_fname_new],
                 [inv_fname, inv_fname_new]]:
        shutil.copyfile(a, b)

    # create and add -epo.fif and -ave.fif files
    epochs_fname = op.join(tempdir, 'temp-epo.fif')
    evoked_fname = op.join(tempdir, 'temp-ave.fif')
    raw = Raw(raw_fname_new)
    picks = pick_types(raw.info, meg='mag', eeg=False)  # faster with one type
    epochs = Epochs(raw, read_events(event_fname), 1, -0.2, 0.2, picks=picks)
    epochs.save(epochs_fname)
    epochs.average().save(evoked_fname)

    report = Report(info_fname=raw_fname_new, subjects_dir=subjects_dir)
    if sys.version.startswith('3.5'):  # XXX Some strange MPL/3.5 error...
        raise SkipTest('Python 3.5 and mpl have unresolved issues')
    with warnings.catch_warnings(record=True) as w:
        warnings.simplefilter('always')
        report.parse_folder(data_path=tempdir, on_error='raise')
    assert_true(len(w) >= 1)

    # Check correct paths and filenames
    fnames = glob.glob(op.join(tempdir, '*.fif'))
    for fname in fnames:
        assert_true(
            op.basename(fname) in [op.basename(x) for x in report.fnames])
        assert_true(''.join(report.html).find(op.basename(fname)) != -1)

    assert_equal(len(report.fnames), len(fnames))
    assert_equal(len(report.html), len(report.fnames))

    # Check saving functionality
    report.data_path = tempdir
    report.save(fname=op.join(tempdir, 'report.html'), open_browser=False)
    assert_true(op.isfile(op.join(tempdir, 'report.html')))

    assert_equal(len(report.html), len(fnames))
    assert_equal(len(report.html), len(report.fnames))

    # Check saving same report to new filename
    report.save(fname=op.join(tempdir, 'report2.html'), open_browser=False)
    assert_true(op.isfile(op.join(tempdir, 'report2.html')))

    # Check overwriting file
    report.save(fname=op.join(tempdir, 'report.html'),
                open_browser=False,
                overwrite=True)
    assert_true(op.isfile(op.join(tempdir, 'report.html')))

    # Check pattern matching with multiple patterns
    pattern = ['*raw.fif', '*eve.fif']
    with warnings.catch_warnings(record=True) as w:
        warnings.simplefilter('always')
        report.parse_folder(data_path=tempdir, pattern=pattern)
    assert_true(len(w) >= 1)

    fnames = glob.glob(op.join(tempdir, '*.raw')) + \
        glob.glob(op.join(tempdir, '*.raw'))
    for fname in fnames:
        assert_true(
            op.basename(fname) in [op.basename(x) for x in report.fnames])
        assert_true(''.join(report.html).find(op.basename(fname)) != -1)
예제 #46
0
def test_ica_additional():
    """Test additional ICA functionality"""
    tempdir = _TempDir()
    stop2 = 500
    raw = Raw(raw_fname).crop(1.5, stop, copy=False)
    raw.load_data()
    picks = pick_types(raw.info,
                       meg=True,
                       stim=False,
                       ecg=False,
                       eog=False,
                       exclude='bads')
    test_cov = read_cov(test_cov_name)
    events = read_events(event_name)
    picks = pick_types(raw.info,
                       meg=True,
                       stim=False,
                       ecg=False,
                       eog=False,
                       exclude='bads')
    epochs = Epochs(raw,
                    events[:4],
                    event_id,
                    tmin,
                    tmax,
                    picks=picks,
                    baseline=(None, 0),
                    preload=True)
    # test if n_components=None works
    with warnings.catch_warnings(record=True):
        ica = ICA(n_components=None,
                  max_pca_components=None,
                  n_pca_components=None,
                  random_state=0)
        ica.fit(epochs, picks=picks, decim=3)
    # for testing eog functionality
    picks2 = pick_types(raw.info,
                        meg=True,
                        stim=False,
                        ecg=False,
                        eog=True,
                        exclude='bads')
    epochs_eog = Epochs(raw,
                        events[:4],
                        event_id,
                        tmin,
                        tmax,
                        picks=picks2,
                        baseline=(None, 0),
                        preload=True)

    test_cov2 = test_cov.copy()
    ica = ICA(noise_cov=test_cov2,
              n_components=3,
              max_pca_components=4,
              n_pca_components=4)
    assert_true(ica.info is None)
    with warnings.catch_warnings(record=True):
        ica.fit(raw, picks[:5])
    assert_true(isinstance(ica.info, Info))
    assert_true(ica.n_components_ < 5)

    ica = ICA(n_components=3, max_pca_components=4, n_pca_components=4)
    assert_raises(RuntimeError, ica.save, '')
    with warnings.catch_warnings(record=True):
        ica.fit(raw, picks=[1, 2, 3, 4, 5], start=start, stop=stop2)

    # test corrmap
    ica2 = ica.copy()
    ica3 = ica.copy()
    corrmap([ica, ica2], (0, 0),
            threshold='auto',
            label='blinks',
            plot=True,
            ch_type="mag")
    corrmap([ica, ica2], (0, 0), threshold=2, plot=False, show=False)
    assert_true(ica.labels_["blinks"] == ica2.labels_["blinks"])
    assert_true(0 in ica.labels_["blinks"])
    template = _get_ica_map(ica)[0]
    corrmap([ica, ica3],
            template,
            threshold='auto',
            label='blinks',
            plot=True,
            ch_type="mag")
    assert_true(ica2.labels_["blinks"] == ica3.labels_["blinks"])
    plt.close('all')

    # test warnings on bad filenames
    with warnings.catch_warnings(record=True) as w:
        warnings.simplefilter('always')
        ica_badname = op.join(op.dirname(tempdir), 'test-bad-name.fif.gz')
        ica.save(ica_badname)
        read_ica(ica_badname)
    assert_naming(w, 'test_ica.py', 2)

    # test decim
    ica = ICA(n_components=3, max_pca_components=4, n_pca_components=4)
    raw_ = raw.copy()
    for _ in range(3):
        raw_.append(raw_)
    n_samples = raw_._data.shape[1]
    with warnings.catch_warnings(record=True):
        ica.fit(raw, picks=None, decim=3)
    assert_true(raw_._data.shape[1], n_samples)

    # test expl var
    ica = ICA(n_components=1.0, max_pca_components=4, n_pca_components=4)
    with warnings.catch_warnings(record=True):
        ica.fit(raw, picks=None, decim=3)
    assert_true(ica.n_components_ == 4)

    # epochs extraction from raw fit
    assert_raises(RuntimeError, ica.get_sources, epochs)
    # test reading and writing
    test_ica_fname = op.join(op.dirname(tempdir), 'test-ica.fif')
    for cov in (None, test_cov):
        ica = ICA(noise_cov=cov,
                  n_components=2,
                  max_pca_components=4,
                  n_pca_components=4)
        with warnings.catch_warnings(record=True):  # ICA does not converge
            ica.fit(raw, picks=picks, start=start, stop=stop2)
        sources = ica.get_sources(epochs).get_data()
        assert_true(ica.mixing_matrix_.shape == (2, 2))
        assert_true(ica.unmixing_matrix_.shape == (2, 2))
        assert_true(ica.pca_components_.shape == (4, len(picks)))
        assert_true(sources.shape[1] == ica.n_components_)

        for exclude in [[], [0]]:
            ica.exclude = exclude
            ica.labels_ = {'foo': [0]}
            ica.save(test_ica_fname)
            ica_read = read_ica(test_ica_fname)
            assert_true(ica.exclude == ica_read.exclude)
            assert_equal(ica.labels_, ica_read.labels_)
            ica.exclude = []
            ica.apply(raw, exclude=[1])
            assert_true(ica.exclude == [])

            ica.exclude = [0, 1]
            ica.apply(raw, exclude=[1])
            assert_true(ica.exclude == [0, 1])

            ica_raw = ica.get_sources(raw)
            assert_true(
                ica.exclude ==
                [ica_raw.ch_names.index(e) for e in ica_raw.info['bads']])

        # test filtering
        d1 = ica_raw._data[0].copy()
        with warnings.catch_warnings(record=True):  # dB warning
            ica_raw.filter(4, 20)
        assert_true((d1 != ica_raw._data[0]).any())
        d1 = ica_raw._data[0].copy()
        with warnings.catch_warnings(record=True):  # dB warning
            ica_raw.notch_filter([10])
        assert_true((d1 != ica_raw._data[0]).any())

        ica.n_pca_components = 2
        ica.method = 'fake'
        ica.save(test_ica_fname)
        ica_read = read_ica(test_ica_fname)
        assert_true(ica.n_pca_components == ica_read.n_pca_components)
        assert_equal(ica.method, ica_read.method)
        assert_equal(ica.labels_, ica_read.labels_)

        # check type consistency
        attrs = ('mixing_matrix_ unmixing_matrix_ pca_components_ '
                 'pca_explained_variance_ _pre_whitener')

        def f(x, y):
            return getattr(x, y).dtype

        for attr in attrs.split():
            assert_equal(f(ica_read, attr), f(ica, attr))

        ica.n_pca_components = 4
        ica_read.n_pca_components = 4

        ica.exclude = []
        ica.save(test_ica_fname)
        ica_read = read_ica(test_ica_fname)
        for attr in [
                'mixing_matrix_', 'unmixing_matrix_', 'pca_components_',
                'pca_mean_', 'pca_explained_variance_', '_pre_whitener'
        ]:
            assert_array_almost_equal(getattr(ica, attr),
                                      getattr(ica_read, attr))

        assert_true(ica.ch_names == ica_read.ch_names)
        assert_true(isinstance(ica_read.info, Info))

        sources = ica.get_sources(raw)[:, :][0]
        sources2 = ica_read.get_sources(raw)[:, :][0]
        assert_array_almost_equal(sources, sources2)

        _raw1 = ica.apply(raw, exclude=[1])
        _raw2 = ica_read.apply(raw, exclude=[1])
        assert_array_almost_equal(_raw1[:, :][0], _raw2[:, :][0])

    os.remove(test_ica_fname)
    # check scrore funcs
    for name, func in get_score_funcs().items():
        if name in score_funcs_unsuited:
            continue
        scores = ica.score_sources(raw,
                                   target='EOG 061',
                                   score_func=func,
                                   start=0,
                                   stop=10)
        assert_true(ica.n_components_ == len(scores))

    # check univariate stats
    scores = ica.score_sources(raw, score_func=stats.skew)
    # check exception handling
    assert_raises(ValueError, ica.score_sources, raw, target=np.arange(1))

    params = []
    params += [(None, -1, slice(2), [0, 1])]  # varicance, kurtosis idx params
    params += [(None, 'MEG 1531')]  # ECG / EOG channel params
    for idx, ch_name in product(*params):
        ica.detect_artifacts(raw,
                             start_find=0,
                             stop_find=50,
                             ecg_ch=ch_name,
                             eog_ch=ch_name,
                             skew_criterion=idx,
                             var_criterion=idx,
                             kurt_criterion=idx)
    with warnings.catch_warnings(record=True):
        idx, scores = ica.find_bads_ecg(raw, method='ctps')
        assert_equal(len(scores), ica.n_components_)
        idx, scores = ica.find_bads_ecg(raw, method='correlation')
        assert_equal(len(scores), ica.n_components_)

        idx, scores = ica.find_bads_eog(raw)
        assert_equal(len(scores), ica.n_components_)

        ica.labels_ = None
        idx, scores = ica.find_bads_ecg(epochs, method='ctps')
        assert_equal(len(scores), ica.n_components_)
        assert_raises(ValueError,
                      ica.find_bads_ecg,
                      epochs.average(),
                      method='ctps')
        assert_raises(ValueError,
                      ica.find_bads_ecg,
                      raw,
                      method='crazy-coupling')

        raw.info['chs'][raw.ch_names.index('EOG 061') - 1]['kind'] = 202
        idx, scores = ica.find_bads_eog(raw)
        assert_true(isinstance(scores, list))
        assert_equal(len(scores[0]), ica.n_components_)

    # check score funcs
    for name, func in get_score_funcs().items():
        if name in score_funcs_unsuited:
            continue
        scores = ica.score_sources(epochs_eog,
                                   target='EOG 061',
                                   score_func=func)
        assert_true(ica.n_components_ == len(scores))

    # check univariate stats
    scores = ica.score_sources(epochs, score_func=stats.skew)

    # check exception handling
    assert_raises(ValueError, ica.score_sources, epochs, target=np.arange(1))

    # ecg functionality
    ecg_scores = ica.score_sources(raw,
                                   target='MEG 1531',
                                   score_func='pearsonr')

    with warnings.catch_warnings(record=True):  # filter attenuation warning
        ecg_events = ica_find_ecg_events(raw,
                                         sources[np.abs(ecg_scores).argmax()])

    assert_true(ecg_events.ndim == 2)

    # eog functionality
    eog_scores = ica.score_sources(raw,
                                   target='EOG 061',
                                   score_func='pearsonr')
    with warnings.catch_warnings(record=True):  # filter attenuation warning
        eog_events = ica_find_eog_events(raw,
                                         sources[np.abs(eog_scores).argmax()])

    assert_true(eog_events.ndim == 2)

    # Test ica fiff export
    ica_raw = ica.get_sources(raw, start=0, stop=100)
    assert_true(ica_raw.last_samp - ica_raw.first_samp == 100)
    assert_true(len(ica_raw._filenames) == 0)  # API consistency
    ica_chans = [ch for ch in ica_raw.ch_names if 'ICA' in ch]
    assert_true(ica.n_components_ == len(ica_chans))
    test_ica_fname = op.join(op.abspath(op.curdir), 'test-ica_raw.fif')
    ica.n_components = np.int32(ica.n_components)
    ica_raw.save(test_ica_fname, overwrite=True)
    ica_raw2 = Raw(test_ica_fname, preload=True)
    assert_allclose(ica_raw._data, ica_raw2._data, rtol=1e-5, atol=1e-4)
    ica_raw2.close()
    os.remove(test_ica_fname)

    # Test ica epochs export
    ica_epochs = ica.get_sources(epochs)
    assert_true(ica_epochs.events.shape == epochs.events.shape)
    ica_chans = [ch for ch in ica_epochs.ch_names if 'ICA' in ch]
    assert_true(ica.n_components_ == len(ica_chans))
    assert_true(ica.n_components_ == ica_epochs.get_data().shape[1])
    assert_true(ica_epochs._raw is None)
    assert_true(ica_epochs.preload is True)

    # test float n pca components
    ica.pca_explained_variance_ = np.array([0.2] * 5)
    ica.n_components_ = 0
    for ncomps, expected in [[0.3, 1], [0.9, 4], [1, 1]]:
        ncomps_ = ica._check_n_pca_components(ncomps)
        assert_true(ncomps_ == expected)
예제 #47
0
    read_labels_from_annot(subject,
                           'aparc.a2009s',
                           hemi,
                           regexp='G_temp_sup-G_T_transv')[0]
    for hi, hemi in enumerate(('lh', 'rh'))
]
stc = stc.in_label(labels[0] + labels[1])
stc.data.fill(0)
stc.data[:, (stc.times >= pulse_tmin) & (stc.times <= pulse_tmax)] = 10e-9

# ############################################################################
# Simulate data

# Simulate data with movement
with warnings.catch_warnings(record=True):
    raw = Raw(fname_raw, allow_maxshield=True)
raw_movement = simulate_raw(raw,
                            stc,
                            trans,
                            src,
                            bem,
                            chpi=True,
                            head_pos=fname_pos_orig,
                            n_jobs=6,
                            verbose=True)

# Simulate data with no movement (use initial head position)
raw_stationary = simulate_raw(raw,
                              stc,
                              trans,
                              src,
예제 #48
0
def _test_raw_reader(reader, test_preloading=True, **kwargs):
    """Test reading, writing and slicing of raw classes.

    Parameters
    ----------
    reader : function
        Function to test.
    test_preloading : bool
        Whether not preloading is implemented for the reader. If True, both
        cases and memory mapping to file are tested.
    **kwargs :
        Arguments for the reader. Note: Do not use preload as kwarg.
        Use ``test_preloading`` instead.

    Returns
    -------
    raw : Instance of Raw
        A preloaded Raw object.
    """
    tempdir = _TempDir()
    rng = np.random.RandomState(0)
    if test_preloading:
        raw = reader(preload=True, **kwargs)
        # don't assume the first is preloaded
        buffer_fname = op.join(tempdir, 'buffer')
        picks = rng.permutation(np.arange(len(raw.ch_names)))[:10]
        bnd = min(int(round(raw.info['buffer_size_sec'] * raw.info['sfreq'])),
                  raw.n_times)
        slices = [
            slice(0, bnd),
            slice(bnd - 1, bnd),
            slice(3, bnd),
            slice(3, 300),
            slice(None),
            slice(1, bnd)
        ]
        if raw.n_times >= 2 * bnd:  # at least two complete blocks
            slices += [
                slice(bnd, 2 * bnd),
                slice(bnd, bnd + 1),
                slice(0, bnd + 100)
            ]
        other_raws = [
            reader(preload=buffer_fname, **kwargs),
            reader(preload=False, **kwargs)
        ]
        for sl_time in slices:
            for other_raw in other_raws:
                data1, times1 = raw[picks, sl_time]
                data2, times2 = other_raw[picks, sl_time]
                assert_allclose(data1, data2)
                assert_allclose(times1, times2)
    else:
        raw = reader(**kwargs)

    full_data = raw._data
    assert_true(raw.__class__.__name__, repr(raw))  # to test repr
    assert_true(raw.info.__class__.__name__, repr(raw.info))

    # Test saving and reading
    out_fname = op.join(tempdir, 'test_raw.fif')
    raw.save(out_fname, tmax=raw.times[-1], overwrite=True, buffer_size_sec=1)
    raw3 = Raw(out_fname)
    assert_equal(set(raw.info.keys()), set(raw3.info.keys()))
    assert_allclose(raw3[0:20][0], full_data[0:20], rtol=1e-6,
                    atol=1e-20)  # atol is very small but > 0
    assert_array_almost_equal(raw.times, raw3.times)

    assert_true(not math.isnan(raw3.info['highpass']))
    assert_true(not math.isnan(raw3.info['lowpass']))
    assert_true(not math.isnan(raw.info['highpass']))
    assert_true(not math.isnan(raw.info['lowpass']))

    # Make sure concatenation works
    first_samp = raw.first_samp
    last_samp = raw.last_samp
    concat_raw = concatenate_raws([raw.copy(), raw])
    assert_equal(concat_raw.n_times, 2 * raw.n_times)
    assert_equal(concat_raw.first_samp, first_samp)
    assert_equal(concat_raw.last_samp - last_samp + first_samp, last_samp + 1)
    return raw
예제 #49
0
def test_apply_mne_inverse_epochs():
    """Test MNE with precomputed inverse operator on Epochs
    """
    inverse_operator = read_inverse_operator(fname_inv)
    label_lh = read_label(fname_label % 'Aud-lh')
    label_rh = read_label(fname_label % 'Aud-rh')
    event_id, tmin, tmax = 1, -0.2, 0.5
    raw = Raw(fname_raw)

    picks = pick_types(raw.info,
                       meg=True,
                       eeg=False,
                       stim=True,
                       ecg=True,
                       eog=True,
                       include=['STI 014'],
                       exclude='bads')
    reject = dict(grad=4000e-13, mag=4e-12, eog=150e-6)
    flat = dict(grad=1e-15, mag=1e-15)

    events = read_events(fname_event)[:15]
    epochs = Epochs(raw,
                    events,
                    event_id,
                    tmin,
                    tmax,
                    picks=picks,
                    baseline=(None, 0),
                    reject=reject,
                    flat=flat)
    stcs = apply_inverse_epochs(epochs,
                                inverse_operator,
                                lambda2,
                                "dSPM",
                                label=label_lh,
                                pick_ori="normal")

    assert_true(len(stcs) == 4)
    assert_true(3 < stcs[0].data.max() < 10)
    assert_true(stcs[0].subject == 'sample')

    data = sum(stc.data for stc in stcs) / len(stcs)
    flip = label_sign_flip(label_lh, inverse_operator['src'])

    label_mean = np.mean(data, axis=0)
    label_mean_flip = np.mean(flip[:, np.newaxis] * data, axis=0)

    assert_true(label_mean.max() < label_mean_flip.max())

    # test extracting a BiHemiLabel
    stcs_rh = apply_inverse_epochs(epochs,
                                   inverse_operator,
                                   lambda2,
                                   "dSPM",
                                   label=label_rh,
                                   pick_ori="normal")
    stcs_bh = apply_inverse_epochs(epochs,
                                   inverse_operator,
                                   lambda2,
                                   "dSPM",
                                   label=label_lh + label_rh,
                                   pick_ori="normal")

    n_lh = len(stcs[0].data)
    assert_array_almost_equal(stcs[0].data, stcs_bh[0].data[:n_lh])
    assert_array_almost_equal(stcs_rh[0].data, stcs_bh[0].data[n_lh:])

    # test without using a label (so delayed computation is used)
    stcs = apply_inverse_epochs(epochs,
                                inverse_operator,
                                lambda2,
                                "dSPM",
                                pick_ori="normal")
    assert_true(stcs[0].subject == 'sample')
    label_stc = stcs[0].in_label(label_rh)
    assert_true(label_stc.subject == 'sample')
    assert_array_almost_equal(stcs_rh[0].data, label_stc.data)
예제 #50
0
 def make_evoked(fname, comp):
     raw = Raw(fname, compensation=comp)
     picks = pick_types(raw.info, meg=True, ref_meg=True)
     events = np.array([[0, 0, 1]], dtype=np.int)
     evoked = Epochs(raw, events, 1, 0, 20e-3, picks=picks).average()
     return evoked
예제 #51
0
def test_resample():
    """Test resample (with I/O and multiple files)
    """
    tempdir = _TempDir()
    raw = Raw(fif_fname).crop(0, 3, False)
    raw.load_data()
    raw_resamp = raw.copy()
    sfreq = raw.info['sfreq']
    # test parallel on upsample
    raw_resamp.resample(sfreq * 2, n_jobs=2)
    assert_equal(raw_resamp.n_times, len(raw_resamp.times))
    raw_resamp.save(op.join(tempdir, 'raw_resamp-raw.fif'))
    raw_resamp = Raw(op.join(tempdir, 'raw_resamp-raw.fif'), preload=True)
    assert_equal(sfreq, raw_resamp.info['sfreq'] / 2)
    assert_equal(raw.n_times, raw_resamp.n_times / 2)
    assert_equal(raw_resamp._data.shape[1], raw_resamp.n_times)
    assert_equal(raw._data.shape[0], raw_resamp._data.shape[0])
    # test non-parallel on downsample
    raw_resamp.resample(sfreq, n_jobs=1)
    assert_equal(raw_resamp.info['sfreq'], sfreq)
    assert_equal(raw._data.shape, raw_resamp._data.shape)
    assert_equal(raw.first_samp, raw_resamp.first_samp)
    assert_equal(raw.last_samp, raw.last_samp)
    # upsampling then downsampling doubles resampling error, but this still
    # works (hooray). Note that the stim channels had to be sub-sampled
    # without filtering to be accurately preserved
    # note we have to treat MEG and EEG+STIM channels differently (tols)
    assert_allclose(raw._data[:306, 200:-200],
                    raw_resamp._data[:306, 200:-200],
                    rtol=1e-2, atol=1e-12)
    assert_allclose(raw._data[306:, 200:-200],
                    raw_resamp._data[306:, 200:-200],
                    rtol=1e-2, atol=1e-7)

    # now check multiple file support w/resampling, as order of operations
    # (concat, resample) should not affect our data
    raw1 = raw.copy()
    raw2 = raw.copy()
    raw3 = raw.copy()
    raw4 = raw.copy()
    raw1 = concatenate_raws([raw1, raw2])
    raw1.resample(10.)
    raw3.resample(10.)
    raw4.resample(10.)
    raw3 = concatenate_raws([raw3, raw4])
    assert_array_equal(raw1._data, raw3._data)
    assert_array_equal(raw1._first_samps, raw3._first_samps)
    assert_array_equal(raw1._last_samps, raw3._last_samps)
    assert_array_equal(raw1._raw_lengths, raw3._raw_lengths)
    assert_equal(raw1.first_samp, raw3.first_samp)
    assert_equal(raw1.last_samp, raw3.last_samp)
    assert_equal(raw1.info['sfreq'], raw3.info['sfreq'])

    # test resampling of stim channel

    # basic decimation
    stim = [1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0]
    raw = RawArray([stim], create_info(1, len(stim), ['stim']))
    assert_allclose(raw.resample(8.)._data,
                    [[1, 1, 0, 0, 1, 1, 0, 0]])

    # decimation of multiple stim channels
    raw = RawArray(2 * [stim], create_info(2, len(stim), 2 * ['stim']))
    assert_allclose(raw.resample(8.)._data,
                    [[1, 1, 0, 0, 1, 1, 0, 0],
                     [1, 1, 0, 0, 1, 1, 0, 0]])

    # decimation that could potentially drop events if the decimation is
    # done naively
    stim = [0, 0, 0, 1, 1, 0, 0, 0]
    raw = RawArray([stim], create_info(1, len(stim), ['stim']))
    assert_allclose(raw.resample(4.)._data,
                    [[0, 1, 1, 0]])

    # two events are merged in this case (warning)
    stim = [0, 0, 1, 1, 1, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0]
    raw = RawArray([stim], create_info(1, len(stim), ['stim']))
    with warnings.catch_warnings(record=True) as w:
        warnings.simplefilter('always')
        raw.resample(8.)
        assert_true(len(w) == 1)

    # events are dropped in this case (warning)
    stim = [0, 1, 1, 0, 0, 1, 1, 0]
    raw = RawArray([stim], create_info(1, len(stim), ['stim']))
    with warnings.catch_warnings(record=True) as w:
        warnings.simplefilter('always')
        raw.resample(4.)
        assert_true(len(w) == 1)

    # test resampling events: this should no longer give a warning
    stim = [0, 1, 1, 0, 0, 1, 1, 0]
    raw = RawArray([stim], create_info(1, len(stim), ['stim']))
    events = find_events(raw)
    raw, events = raw.resample(4., events=events)
    assert_equal(events, np.array([[0, 0, 1], [2, 0, 1]]))

    # test copy flag
    stim = [1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0]
    raw = RawArray([stim], create_info(1, len(stim), ['stim']))
    raw_resampled = raw.resample(4., copy=True)
    assert_true(raw_resampled is not raw)
    raw_resampled = raw.resample(4., copy=False)
    assert_true(raw_resampled is raw)

    # resample should still work even when no stim channel is present
    raw = RawArray(np.random.randn(1, 100), create_info(1, 100, ['eeg']))
    raw.resample(10)
    assert_true(len(raw) == 10)
예제 #52
0
        'url': url,
        'folder_name': folder_name,
        'config_key': config_key
    }

    data_path = fetch_dataset(
        dataset_params
    )

    return os.path.join(data_path, archive_name)


# Download data
destination = download_sample_data(dataset="ssvep", subject=12, session=1)
# Read data in MNE Raw and numpy format
raw = Raw(destination, preload=True, verbose='ERROR')
events = find_events(raw, shortest_event=0, verbose=False)
raw = raw.pick_types(eeg=True)
event_id = {'13 Hz': 2, '17 Hz': 4, '21 Hz': 3, 'resting-state': 1}
sfreq = int(raw.info['sfreq'])

eeg_data = raw.get_data()

###############################################################################
# Visualization of raw EEG data
# -----------------------------
#
# Plot few seconds of signal from the `Oz` electrode using matplotlib

n_seconds = 2
time = np.linspace(0, n_seconds, n_seconds * sfreq,
예제 #53
0
def test_filter():
    """Test filtering (FIR and IIR) and Raw.apply_function interface
    """
    raw = Raw(fif_fname).crop(0, 7, False)
    raw.load_data()
    sig_dec = 11
    sig_dec_notch = 12
    sig_dec_notch_fit = 12
    picks_meg = pick_types(raw.info, meg=True, exclude='bads')
    picks = picks_meg[:4]

    raw_lp = raw.copy()
    raw_lp.filter(0., 4.0 - 0.25, picks=picks, n_jobs=2)

    raw_hp = raw.copy()
    raw_hp.filter(8.0 + 0.25, None, picks=picks, n_jobs=2)

    raw_bp = raw.copy()
    raw_bp.filter(4.0 + 0.25, 8.0 - 0.25, picks=picks)

    raw_bs = raw.copy()
    raw_bs.filter(8.0 + 0.25, 4.0 - 0.25, picks=picks, n_jobs=2)

    data, _ = raw[picks, :]

    lp_data, _ = raw_lp[picks, :]
    hp_data, _ = raw_hp[picks, :]
    bp_data, _ = raw_bp[picks, :]
    bs_data, _ = raw_bs[picks, :]

    assert_array_almost_equal(data, lp_data + bp_data + hp_data, sig_dec)
    assert_array_almost_equal(data, bp_data + bs_data, sig_dec)

    raw_lp_iir = raw.copy()
    raw_lp_iir.filter(0., 4.0, picks=picks, n_jobs=2, method='iir')
    raw_hp_iir = raw.copy()
    raw_hp_iir.filter(8.0, None, picks=picks, n_jobs=2, method='iir')
    raw_bp_iir = raw.copy()
    raw_bp_iir.filter(4.0, 8.0, picks=picks, method='iir')
    lp_data_iir, _ = raw_lp_iir[picks, :]
    hp_data_iir, _ = raw_hp_iir[picks, :]
    bp_data_iir, _ = raw_bp_iir[picks, :]
    summation = lp_data_iir + hp_data_iir + bp_data_iir
    assert_array_almost_equal(data[:, 100:-100], summation[:, 100:-100],
                              sig_dec)

    # make sure we didn't touch other channels
    data, _ = raw[picks_meg[4:], :]
    bp_data, _ = raw_bp[picks_meg[4:], :]
    assert_array_equal(data, bp_data)
    bp_data_iir, _ = raw_bp_iir[picks_meg[4:], :]
    assert_array_equal(data, bp_data_iir)

    # do a very simple check on line filtering
    raw_bs = raw.copy()
    with warnings.catch_warnings(record=True):
        warnings.simplefilter('always')
        raw_bs.filter(60.0 + 0.5, 60.0 - 0.5, picks=picks, n_jobs=2)
        data_bs, _ = raw_bs[picks, :]
        raw_notch = raw.copy()
        raw_notch.notch_filter(60.0, picks=picks, n_jobs=2, method='fft')
    data_notch, _ = raw_notch[picks, :]
    assert_array_almost_equal(data_bs, data_notch, sig_dec_notch)

    # now use the sinusoidal fitting
    raw_notch = raw.copy()
    raw_notch.notch_filter(None, picks=picks, n_jobs=2, method='spectrum_fit')
    data_notch, _ = raw_notch[picks, :]
    data, _ = raw[picks, :]
    assert_array_almost_equal(data, data_notch, sig_dec_notch_fit)
예제 #54
0
def test_raw_index_as_time():
    """ Test index as time conversion"""
    raw = Raw(fif_fname, preload=True)
    t0 = raw.index_as_time([0], True)[0]
    t1 = raw.index_as_time([100], False)[0]
    t2 = raw.index_as_time([100], True)[0]
    assert_equal(t2 - t1, t0)
    # ensure we can go back and forth
    t3 = raw.index_as_time(raw.time_as_index([0], True), True)
    assert_array_almost_equal(t3, [0.0], 2)
    t3 = raw.index_as_time(raw.time_as_index(raw.info['sfreq'], True), True)
    assert_array_almost_equal(t3, [raw.info['sfreq']], 2)
    t3 = raw.index_as_time(raw.time_as_index(raw.info['sfreq'], False), False)
    assert_array_almost_equal(t3, [raw.info['sfreq']], 2)
    i0 = raw.time_as_index(raw.index_as_time([0], True), True)
    assert_equal(i0[0], 0)
    i1 = raw.time_as_index(raw.index_as_time([100], True), True)
    assert_equal(i1[0], 100)
    # Have to add small amount of time because we truncate via int casting
    i1 = raw.time_as_index(raw.index_as_time([100.0001], False), False)
    assert_equal(i1[0], 100)
예제 #55
0
def test_load_bad_channels():
    """Test reading/writing of bad channels
    """
    tempdir = _TempDir()
    # Load correctly marked file (manually done in mne_process_raw)
    raw_marked = Raw(fif_bad_marked_fname)
    correct_bads = raw_marked.info['bads']
    raw = Raw(test_fif_fname)
    # Make sure it starts clean
    assert_array_equal(raw.info['bads'], [])

    # Test normal case
    raw.load_bad_channels(bad_file_works)
    # Write it out, read it in, and check
    raw.save(op.join(tempdir, 'foo_raw.fif'))
    raw_new = Raw(op.join(tempdir, 'foo_raw.fif'))
    assert_equal(correct_bads, raw_new.info['bads'])
    # Reset it
    raw.info['bads'] = []

    # Test bad case
    assert_raises(ValueError, raw.load_bad_channels, bad_file_wrong)

    # Test forcing the bad case
    with warnings.catch_warnings(record=True) as w:
        warnings.simplefilter('always')
        raw.load_bad_channels(bad_file_wrong, force=True)
        n_found = sum(['1 bad channel' in str(ww.message) for ww in w])
        assert_equal(n_found, 1)  # there could be other irrelevant errors
        # write it out, read it in, and check
        raw.save(op.join(tempdir, 'foo_raw.fif'), overwrite=True)
        raw_new = Raw(op.join(tempdir, 'foo_raw.fif'))
        assert_equal(correct_bads, raw_new.info['bads'])

    # Check that bad channels are cleared
    raw.load_bad_channels(None)
    raw.save(op.join(tempdir, 'foo_raw.fif'), overwrite=True)
    raw_new = Raw(op.join(tempdir, 'foo_raw.fif'))
    assert_equal([], raw_new.info['bads'])
예제 #56
0
def test_compute_proj_epochs():
    """Test SSP computation on epochs"""
    tempdir = _TempDir()
    event_id, tmin, tmax = 1, -0.2, 0.3

    raw = Raw(raw_fname, preload=True)
    events = read_events(event_fname)
    bad_ch = 'MEG 2443'
    picks = pick_types(raw.info, meg=True, eeg=False, stim=False, eog=False,
                       exclude=[])
    epochs = Epochs(raw, events, event_id, tmin, tmax, picks=picks,
                    baseline=None, proj=False)

    evoked = epochs.average()
    projs = compute_proj_epochs(epochs, n_grad=1, n_mag=1, n_eeg=0, n_jobs=1)
    write_proj(op.join(tempdir, 'test-proj.fif.gz'), projs)
    for p_fname in [proj_fname, proj_gz_fname,
                    op.join(tempdir, 'test-proj.fif.gz')]:
        projs2 = read_proj(p_fname)

        assert_true(len(projs) == len(projs2))

        for p1, p2 in zip(projs, projs2):
            assert_true(p1['desc'] == p2['desc'])
            assert_true(p1['data']['col_names'] == p2['data']['col_names'])
            assert_true(p1['active'] == p2['active'])
            # compare with sign invariance
            p1_data = p1['data']['data'] * np.sign(p1['data']['data'][0, 0])
            p2_data = p2['data']['data'] * np.sign(p2['data']['data'][0, 0])
            if bad_ch in p1['data']['col_names']:
                bad = p1['data']['col_names'].index('MEG 2443')
                mask = np.ones(p1_data.size, dtype=np.bool)
                mask[bad] = False
                p1_data = p1_data[:, mask]
                p2_data = p2_data[:, mask]
            corr = np.corrcoef(p1_data, p2_data)[0, 1]
            assert_array_almost_equal(corr, 1.0, 5)

    # test that you can compute the projection matrix
    projs = activate_proj(projs)
    proj, nproj, U = make_projector(projs, epochs.ch_names, bads=[])

    assert_true(nproj == 2)
    assert_true(U.shape[1] == 2)

    # test that you can save them
    epochs.info['projs'] += projs
    evoked = epochs.average()
    evoked.save(op.join(tempdir, 'foo-ave.fif'))

    projs = read_proj(proj_fname)

    projs_evoked = compute_proj_evoked(evoked, n_grad=1, n_mag=1, n_eeg=0)
    assert_true(len(projs_evoked) == 2)
    # XXX : test something

    # test parallelization
    projs = compute_proj_epochs(epochs, n_grad=1, n_mag=1, n_eeg=0, n_jobs=2)
    projs = activate_proj(projs)
    proj_par, _, _ = make_projector(projs, epochs.ch_names, bads=[])
    assert_allclose(proj, proj_par, rtol=1e-8, atol=1e-16)

    # test warnings on bad filenames
    clean_warning_registry()
    with warnings.catch_warnings(record=True) as w:
        warnings.simplefilter('always')
        proj_badname = op.join(tempdir, 'test-bad-name.fif.gz')
        write_proj(proj_badname, projs)
        read_proj(proj_badname)
        print([ww.message for ww in w])
    assert_equal(len(w), 2)
예제 #57
0
"""
# Author: Alexandre Gramfort <*****@*****.**>
#
# License: BSD (3-clause)

print(__doc__)

import mne
from mne.datasets import sample
from mne.io import Raw

data_path = sample.data_path()
fname = data_path + '/MEG/sample/sample_audvis_raw.fif'

# Reading events
raw = Raw(fname)

events = mne.find_events(raw, stim_channel='STI 014')

# Writing events
mne.write_events('events.fif', events)

for ind, before, after in events[:5]:
    print("At sample %d stim channel went from %d to %d" %
          (ind, before, after))

# Plot the events to get an idea of the paradigm
# Specify colors and an event_id dictionary for the legend.
event_id = {
    'aud_l': 1,
    'aud_r': 2,
예제 #58
0
# License: BSD (3-clause)

print(__doc__)

import numpy as np
import mne

from mne.io import Raw
from mne.datasets import sample

data_path = sample.data_path()
raw_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw.fif'

###############################################################################
# get raw data
raw = Raw(raw_fname)

# set picks
picks = mne.pick_types(raw.info, meg=True, eeg=False, eog=False,
                       stim=False, exclude='bads')

# pick times relative to the onset of the MEG measurement.
start, stop = raw.time_as_index([100, 115], use_first_samp=False)

# export to nitime using a copy of the data
raw_ts = raw.to_nitime(start=start, stop=stop, picks=picks, copy=True)

###############################################################################
# explore some nitime timeseries features

# get start
예제 #59
0
def test_needs_eeg_average_ref_proj():
    """Test checking whether a recording needs an EEG average reference"""
    raw = Raw(raw_fname, add_eeg_ref=False, preload=False)
    assert_true(_needs_eeg_average_ref_proj(raw.info))

    raw = Raw(raw_fname, add_eeg_ref=True, preload=False)
    assert_true(not _needs_eeg_average_ref_proj(raw.info))

    # No EEG channels
    raw = Raw(raw_fname, add_eeg_ref=False, preload=True)
    eeg = [raw.ch_names[c] for c in pick_types(raw.info, meg=False, eeg=True)]
    raw.drop_channels(eeg)
    assert_true(not _needs_eeg_average_ref_proj(raw.info))

    # Custom ref flag set
    raw = Raw(raw_fname, add_eeg_ref=False, preload=False)
    raw.info['custom_ref_applied'] = True
    assert_true(not _needs_eeg_average_ref_proj(raw.info))
예제 #60
0
def test_io_raw():
    """Test IO for raw data (Neuromag + CTF + gz)
    """
    tempdir = _TempDir()
    # test unicode io
    for chars in [b'\xc3\xa4\xc3\xb6\xc3\xa9', b'a']:
        with Raw(fif_fname) as r:
            assert_true('Raw' in repr(r))
            desc1 = r.info['description'] = chars.decode('utf-8')
            temp_file = op.join(tempdir, 'raw.fif')
            r.save(temp_file, overwrite=True)
            with Raw(temp_file) as r2:
                desc2 = r2.info['description']
            assert_equal(desc1, desc2)

    # Let's construct a simple test for IO first
    raw = Raw(fif_fname).crop(0, 3.5, False)
    raw.load_data()
    # put in some data that we know the values of
    data = np.random.randn(raw._data.shape[0], raw._data.shape[1])
    raw._data[:, :] = data
    # save it somewhere
    fname = op.join(tempdir, 'test_copy_raw.fif')
    raw.save(fname, buffer_size_sec=1.0)
    # read it in, make sure the whole thing matches
    raw = Raw(fname)
    assert_allclose(data, raw[:, :][0], rtol=1e-6, atol=1e-20)
    # let's read portions across the 1-sec tag boundary, too
    inds = raw.time_as_index([1.75, 2.25])
    sl = slice(inds[0], inds[1])
    assert_allclose(data[:, sl], raw[:, sl][0], rtol=1e-6, atol=1e-20)

    # now let's do some real I/O
    fnames_in = [fif_fname, test_fif_gz_fname, ctf_fname]
    fnames_out = ['raw.fif', 'raw.fif.gz', 'raw.fif']
    for fname_in, fname_out in zip(fnames_in, fnames_out):
        fname_out = op.join(tempdir, fname_out)
        raw = Raw(fname_in)

        nchan = raw.info['nchan']
        ch_names = raw.info['ch_names']
        meg_channels_idx = [k for k in range(nchan)
                            if ch_names[k][0] == 'M']
        n_channels = 100
        meg_channels_idx = meg_channels_idx[:n_channels]
        start, stop = raw.time_as_index([0, 5])
        data, times = raw[meg_channels_idx, start:(stop + 1)]
        meg_ch_names = [ch_names[k] for k in meg_channels_idx]

        # Set up pick list: MEG + STI 014 - bad channels
        include = ['STI 014']
        include += meg_ch_names
        picks = pick_types(raw.info, meg=True, eeg=False, stim=True,
                           misc=True, ref_meg=True, include=include,
                           exclude='bads')

        # Writing with drop_small_buffer True
        raw.save(fname_out, picks, tmin=0, tmax=4, buffer_size_sec=3,
                 drop_small_buffer=True, overwrite=True)
        raw2 = Raw(fname_out)

        sel = pick_channels(raw2.ch_names, meg_ch_names)
        data2, times2 = raw2[sel, :]
        assert_true(times2.max() <= 3)

        # Writing
        raw.save(fname_out, picks, tmin=0, tmax=5, overwrite=True)

        if fname_in == fif_fname or fname_in == fif_fname + '.gz':
            assert_equal(len(raw.info['dig']), 146)

        raw2 = Raw(fname_out)

        sel = pick_channels(raw2.ch_names, meg_ch_names)
        data2, times2 = raw2[sel, :]

        assert_allclose(data, data2, rtol=1e-6, atol=1e-20)
        assert_allclose(times, times2)
        assert_allclose(raw.info['sfreq'], raw2.info['sfreq'], rtol=1e-5)

        # check transformations
        for trans in ['dev_head_t', 'dev_ctf_t', 'ctf_head_t']:
            if raw.info[trans] is None:
                assert_true(raw2.info[trans] is None)
            else:
                assert_array_equal(raw.info[trans]['trans'],
                                   raw2.info[trans]['trans'])

                # check transformation 'from' and 'to'
                if trans.startswith('dev'):
                    from_id = FIFF.FIFFV_COORD_DEVICE
                else:
                    from_id = FIFF.FIFFV_MNE_COORD_CTF_HEAD
                if trans[4:8] == 'head':
                    to_id = FIFF.FIFFV_COORD_HEAD
                else:
                    to_id = FIFF.FIFFV_MNE_COORD_CTF_HEAD
                for raw_ in [raw, raw2]:
                    assert_equal(raw_.info[trans]['from'], from_id)
                    assert_equal(raw_.info[trans]['to'], to_id)

        if fname_in == fif_fname or fname_in == fif_fname + '.gz':
            assert_allclose(raw.info['dig'][0]['r'], raw2.info['dig'][0]['r'])

    # test warnings on bad filenames
    with warnings.catch_warnings(record=True) as w:
        warnings.simplefilter("always")
        raw_badname = op.join(tempdir, 'test-bad-name.fif.gz')
        raw.save(raw_badname)
        Raw(raw_badname)
    assert_true(len(w) > 0)  # len(w) should be 2 but Travis sometimes has more