예제 #1
1
def test_saving_picked():
    """Test saving picked CTF instances."""
    temp_dir = _TempDir()
    out_fname = op.join(temp_dir, 'test_py_raw.fif')
    raw = read_raw_ctf(op.join(ctf_dir, ctf_fname_1_trial))
    raw.crop(0, 1).load_data()
    assert raw.compensation_grade == get_current_comp(raw.info) == 0
    assert len(raw.info['comps']) == 5
    pick_kwargs = dict(meg=True, ref_meg=False, verbose=True)
    for comp_grade in [0, 1]:
        raw.apply_gradient_compensation(comp_grade)
        with catch_logging() as log:
            raw_pick = raw.copy().pick_types(**pick_kwargs)
        assert len(raw.info['comps']) == 5
        assert len(raw_pick.info['comps']) == 0
        log = log.getvalue()
        assert 'Removing 5 compensators' in log
        raw_pick.save(out_fname, overwrite=True)  # should work
        raw2 = read_raw_fif(out_fname)
        assert (raw_pick.ch_names == raw2.ch_names)
        assert_array_equal(raw_pick.times, raw2.times)
        assert_allclose(raw2[0:20][0], raw_pick[0:20][0], rtol=1e-6,
                        atol=1e-20)  # atol is very small but > 0

        raw2 = read_raw_fif(out_fname, preload=True)
        assert (raw_pick.ch_names == raw2.ch_names)
        assert_array_equal(raw_pick.times, raw2.times)
        assert_allclose(raw2[0:20][0], raw_pick[0:20][0], rtol=1e-6,
                        atol=1e-20)  # atol is very small but > 0
예제 #2
0
def test_read_selection():
    """Test reading of selections."""
    # test one channel for each selection
    ch_names = ['MEG 2211', 'MEG 0223', 'MEG 1312', 'MEG 0412', 'MEG 1043',
                'MEG 2042', 'MEG 2032', 'MEG 0522', 'MEG 1031']
    sel_names = ['Vertex', 'Left-temporal', 'Right-temporal', 'Left-parietal',
                 'Right-parietal', 'Left-occipital', 'Right-occipital',
                 'Left-frontal', 'Right-frontal']

    raw = read_raw_fif(raw_fname)
    for i, name in enumerate(sel_names):
        sel = read_selection(name)
        assert_true(ch_names[i] in sel)
        sel_info = read_selection(name, info=raw.info)
        assert_equal(sel, sel_info)

    # test some combinations
    all_ch = read_selection(['L', 'R'])
    left = read_selection('L')
    right = read_selection('R')

    assert_true(len(all_ch) == len(left) + len(right))
    assert_true(len(set(left).intersection(set(right))) == 0)

    frontal = read_selection('frontal')
    occipital = read_selection('Right-occipital')
    assert_true(len(set(frontal).intersection(set(occipital))) == 0)

    ch_names_new = [ch.replace(' ', '') for ch in ch_names]
    raw_new = read_raw_fif(raw_new_fname)
    for i, name in enumerate(sel_names):
        sel = read_selection(name, info=raw_new.info)
        assert_true(ch_names_new[i] in sel)

    assert_raises(TypeError, read_selection, name, info='foo')
예제 #3
0
def test_chpi_subtraction():
    """Test subtraction of cHPI signals."""
    raw = read_raw_fif(chpi_fif_fname, allow_maxshield='yes', preload=True)
    raw.info['bads'] = ['MEG0111']
    raw.del_proj()
    with catch_logging() as log:
        filter_chpi(raw, include_line=False, verbose=True)
    assert 'No average EEG' not in log.getvalue()
    assert '5 cHPI' in log.getvalue()
    # MaxFilter doesn't do quite as well as our algorithm with the last bit
    raw.crop(0, 16)
    # remove cHPI status chans
    raw_c = read_raw_fif(sss_hpisubt_fname).crop(0, 16).load_data()
    raw_c.pick_types(
        meg=True, eeg=True, eog=True, ecg=True, stim=True, misc=True)
    assert_meg_snr(raw, raw_c, 143, 624)

    # Degenerate cases
    raw_nohpi = read_raw_fif(test_fif_fname, preload=True)
    pytest.raises(RuntimeError, filter_chpi, raw_nohpi)

    # When MaxFliter downsamples, like::
    #     $ maxfilter -nosss -ds 2 -f test_move_anon_raw.fif \
    #           -o test_move_anon_ds2_raw.fif
    # it can strip out some values of info, which we emulate here:
    raw = read_raw_fif(chpi_fif_fname, allow_maxshield='yes')
    raw = raw.crop(0, 1).load_data().resample(600., npad='auto')
    raw.info['lowpass'] = 200.
    del raw.info['maxshield']
    del raw.info['hpi_results'][0]['moments']
    del raw.info['hpi_subsystem']['event_channel']
    with catch_logging() as log:
        filter_chpi(raw, verbose=True)
    pytest.raises(ValueError, filter_chpi, raw, t_window=-1)
    assert '2 cHPI' in log.getvalue()
예제 #4
0
def test_calculate_chpi_positions():
    """Test calculation of cHPI positions."""
    trans, rot, t = head_pos_to_trans_rot_t(read_head_pos(pos_fname))
    raw = read_raw_fif(chpi_fif_fname, allow_maxshield='yes', preload=True,
                       add_eeg_ref=False)
    t -= raw.first_samp / raw.info['sfreq']
    quats = _calculate_chpi_positions(raw, verbose='debug')
    trans_est, rot_est, t_est = head_pos_to_trans_rot_t(quats)
    _compare_positions((trans, rot, t), (trans_est, rot_est, t_est), 0.003)

    # degenerate conditions
    raw_no_chpi = read_raw_fif(test_fif_fname, add_eeg_ref=False)
    assert_raises(RuntimeError, _calculate_chpi_positions, raw_no_chpi)
    raw_bad = raw.copy()
    for d in raw_bad.info['dig']:
        if d['kind'] == FIFF.FIFFV_POINT_HPI:
            d['coord_frame'] = 999
            break
    assert_raises(RuntimeError, _calculate_chpi_positions, raw_bad)
    raw_bad = raw.copy()
    for d in raw_bad.info['dig']:
        if d['kind'] == FIFF.FIFFV_POINT_HPI:
            d['r'] = np.ones(3)
    raw_bad.crop(0, 1., copy=False)
    with warnings.catch_warnings(record=True):  # bad pos
        with catch_logging() as log_file:
            _calculate_chpi_positions(raw_bad, verbose=True)
    # ignore HPI info header and [done] footer
    assert_true('0/5 good' in log_file.getvalue().strip().split('\n')[-2])

    # half the rate cuts off cHPI coils
    with warnings.catch_warnings(record=True):  # uint cast suggestion
        raw.resample(300., npad='auto')
    assert_raises_regex(RuntimeError, 'above the',
                        _calculate_chpi_positions, raw)
예제 #5
0
def test_mne_c_design():
    """Test MNE-C filter design."""
    tempdir = _TempDir()
    temp_fname = op.join(tempdir, 'test_raw.fif')
    out_fname = op.join(tempdir, 'test_c_raw.fif')
    x = np.zeros((1, 10001))
    x[0, 5000] = 1.
    time_sl = slice(5000 - 4096, 5000 + 4097)
    sfreq = 1000.
    RawArray(x, create_info(1, sfreq, 'eeg')).save(temp_fname)

    tols = dict(rtol=1e-4, atol=1e-4)
    cmd = ('mne_process_raw', '--projoff', '--raw', temp_fname,
           '--save', out_fname)
    run_subprocess(cmd)
    h = design_mne_c_filter(sfreq, None, 40)
    h_c = read_raw_fif(out_fname)[0][0][0][time_sl]
    assert_allclose(h, h_c, **tols)

    run_subprocess(cmd + ('--highpass', '5', '--highpassw', '2.5'))
    h = design_mne_c_filter(sfreq, 5, 40, 2.5)
    h_c = read_raw_fif(out_fname)[0][0][0][time_sl]
    assert_allclose(h, h_c, **tols)

    run_subprocess(cmd + ('--lowpass', '1000', '--highpass', '10'))
    h = design_mne_c_filter(sfreq, 10, None, verbose=True)
    h_c = read_raw_fif(out_fname)[0][0][0][time_sl]
    assert_allclose(h, h_c, **tols)
예제 #6
0
def test_bad_proj():
    """Test dealing with bad projection application."""
    raw = read_raw_fif(raw_fname, preload=True)
    events = read_events(event_fname)
    picks = pick_types(raw.info, meg=True, stim=False, ecg=False,
                       eog=False, exclude='bads')
    picks = picks[2:9:3]
    _check_warnings(raw, events, picks)
    # still bad
    raw.pick_channels([raw.ch_names[ii] for ii in picks])
    _check_warnings(raw, events)
    # "fixed"
    raw.info.normalize_proj()  # avoid projection warnings
    _check_warnings(raw, events, count=0)
    # eeg avg ref is okay
    raw = read_raw_fif(raw_fname, preload=True).pick_types(meg=False, eeg=True)
    raw.set_eeg_reference()
    _check_warnings(raw, events, count=0)
    raw.info['bads'] = raw.ch_names[:10]
    _check_warnings(raw, events, count=0)

    raw = read_raw_fif(raw_fname)
    assert_raises(ValueError, raw.del_proj, 'foo')
    n_proj = len(raw.info['projs'])
    raw.del_proj(0)
    assert_equal(len(raw.info['projs']), n_proj - 1)
    raw.del_proj()
    assert_equal(len(raw.info['projs']), 0)
예제 #7
0
def test_set_eeg_reference():
    """Test rereference eeg data."""
    raw = read_raw_fif(fif_fname, preload=True)
    raw.info['projs'] = []

    # Test setting an average reference
    assert_true(not _has_eeg_average_ref_proj(raw.info['projs']))
    reref, ref_data = set_eeg_reference(raw)
    assert_true(_has_eeg_average_ref_proj(reref.info['projs']))
    assert_true(not reref.info['projs'][0]['active'])
    assert_true(ref_data is None)
    reref.apply_proj()
    eeg_chans = [raw.ch_names[ch]
                 for ch in pick_types(raw.info, meg=False, eeg=True)]
    _test_reference(raw, reref, ref_data,
                    [ch for ch in eeg_chans if ch not in raw.info['bads']])

    # Test setting an average reference when one was already present
    with warnings.catch_warnings(record=True):
        reref, ref_data = set_eeg_reference(raw, copy=False)
    assert_true(ref_data is None)

    # Test setting an average reference on non-preloaded data
    raw_nopreload = read_raw_fif(fif_fname, preload=False)
    raw_nopreload.info['projs'] = []
    reref, ref_data = set_eeg_reference(raw_nopreload)
    assert_true(_has_eeg_average_ref_proj(reref.info['projs']))
    assert_true(not reref.info['projs'][0]['active'])

    # Rereference raw data by creating a copy of original data
    reref, ref_data = set_eeg_reference(raw, ['EEG 001', 'EEG 002'], copy=True)
    assert_true(reref.info['custom_ref_applied'])
    _test_reference(raw, reref, ref_data, ['EEG 001', 'EEG 002'])

    # Test that data is modified in place when copy=False
    reref, ref_data = set_eeg_reference(raw, ['EEG 001', 'EEG 002'],
                                        copy=False)
    assert_true(raw is reref)

    # Test moving from custom to average reference
    reref, ref_data = set_eeg_reference(raw, ['EEG 001', 'EEG 002'])
    reref, _ = set_eeg_reference(reref)
    assert_true(_has_eeg_average_ref_proj(reref.info['projs']))
    assert_equal(reref.info['custom_ref_applied'], False)

    # When creating an average reference fails, make sure the
    # custom_ref_applied flag remains untouched.
    reref = raw.copy()
    reref.info['custom_ref_applied'] = True
    reref.pick_types(eeg=False)  # Cause making average ref fail
    assert_raises(ValueError, set_eeg_reference, reref)
    assert_true(reref.info['custom_ref_applied'])

    # Test moving from average to custom reference
    reref, ref_data = set_eeg_reference(raw)
    reref, _ = set_eeg_reference(reref, ['EEG 001', 'EEG 002'])
    assert_true(not _has_eeg_average_ref_proj(reref.info['projs']))
    assert_equal(reref.info['custom_ref_applied'], True)
예제 #8
0
def test_set_channel_types():
    """Test set_channel_types"""
    raw = read_raw_fif(raw_fname)
    # Error Tests
    # Test channel name exists in ch_names
    mapping = {'EEG 160': 'EEG060'}
    assert_raises(ValueError, raw.set_channel_types, mapping)
    # Test change to illegal channel type
    mapping = {'EOG 061': 'xxx'}
    assert_raises(ValueError, raw.set_channel_types, mapping)
    # Test changing type if in proj (avg eeg ref here)
    mapping = {'EEG 058': 'ecog', 'EEG 059': 'ecg', 'EEG 060': 'eog',
               'EOG 061': 'seeg', 'MEG 2441': 'eeg', 'MEG 2443': 'eeg',
               'MEG 2442': 'hbo'}
    assert_raises(RuntimeError, raw.set_channel_types, mapping)
    # Test type change
    raw2 = read_raw_fif(raw_fname)
    raw2.info['bads'] = ['EEG 059', 'EEG 060', 'EOG 061']
    with warnings.catch_warnings(record=True):  # MEG channel change
        assert_raises(RuntimeError, raw2.set_channel_types, mapping)  # has prj
    raw2.add_proj([], remove_existing=True)
    with warnings.catch_warnings(record=True) as w:
        warnings.simplefilter('always')
        raw2.set_channel_types(mapping)
    assert_true(len(w) >= 1, msg=[str(ww.message) for ww in w])
    assert_true(all('The unit for channel' in str(ww.message) for ww in w))
    info = raw2.info
    assert_true(info['chs'][372]['ch_name'] == 'EEG 058')
    assert_true(info['chs'][372]['kind'] == FIFF.FIFFV_ECOG_CH)
    assert_true(info['chs'][372]['unit'] == FIFF.FIFF_UNIT_V)
    assert_true(info['chs'][372]['coil_type'] == FIFF.FIFFV_COIL_EEG)
    assert_true(info['chs'][373]['ch_name'] == 'EEG 059')
    assert_true(info['chs'][373]['kind'] == FIFF.FIFFV_ECG_CH)
    assert_true(info['chs'][373]['unit'] == FIFF.FIFF_UNIT_V)
    assert_true(info['chs'][373]['coil_type'] == FIFF.FIFFV_COIL_NONE)
    assert_true(info['chs'][374]['ch_name'] == 'EEG 060')
    assert_true(info['chs'][374]['kind'] == FIFF.FIFFV_EOG_CH)
    assert_true(info['chs'][374]['unit'] == FIFF.FIFF_UNIT_V)
    assert_true(info['chs'][374]['coil_type'] == FIFF.FIFFV_COIL_NONE)
    assert_true(info['chs'][375]['ch_name'] == 'EOG 061')
    assert_true(info['chs'][375]['kind'] == FIFF.FIFFV_SEEG_CH)
    assert_true(info['chs'][375]['unit'] == FIFF.FIFF_UNIT_V)
    assert_true(info['chs'][375]['coil_type'] == FIFF.FIFFV_COIL_EEG)
    for idx in pick_channels(raw.ch_names, ['MEG 2441', 'MEG 2443']):
        assert_true(info['chs'][idx]['kind'] == FIFF.FIFFV_EEG_CH)
        assert_true(info['chs'][idx]['unit'] == FIFF.FIFF_UNIT_V)
        assert_true(info['chs'][idx]['coil_type'] == FIFF.FIFFV_COIL_EEG)
    idx = pick_channels(raw.ch_names, ['MEG 2442'])[0]
    assert_true(info['chs'][idx]['kind'] == FIFF.FIFFV_FNIRS_CH)
    assert_true(info['chs'][idx]['unit'] == FIFF.FIFF_UNIT_MOL)
    assert_true(info['chs'][idx]['coil_type'] == FIFF.FIFFV_COIL_FNIRS_HBO)

    # Test meaningful error when setting channel type with unknown unit
    raw.info['chs'][0]['unit'] = 0.
    ch_types = {raw.ch_names[0]: 'misc'}
    assert_raises(ValueError, raw.set_channel_types, ch_types)
예제 #9
0
def test_raw():
    """Test bti conversion to Raw object."""
    for pdf, config, hs, exported in zip(pdf_fnames, config_fnames, hs_fnames,
                                         exported_fnames):
        # rx = 2 if 'linux' in pdf else 0
        pytest.raises(ValueError, read_raw_bti, pdf, 'eggs', preload=False)
        pytest.raises(ValueError, read_raw_bti, pdf, config, 'spam',
                      preload=False)
        if op.exists(tmp_raw_fname):
            os.remove(tmp_raw_fname)
        ex = read_raw_fif(exported, preload=True)
        ra = read_raw_bti(pdf, config, hs, preload=False)
        assert ('RawBTi' in repr(ra))
        assert_equal(ex.ch_names[:NCH], ra.ch_names[:NCH])
        assert_array_almost_equal(ex.info['dev_head_t']['trans'],
                                  ra.info['dev_head_t']['trans'], 7)
        assert len(ex.info['dig']) in (3563, 5154)
        assert_dig_allclose(ex.info, ra.info, limit=100)
        coil1, coil2 = [np.concatenate([d['loc'].flatten()
                                        for d in r_.info['chs'][:NCH]])
                        for r_ in (ra, ex)]
        assert_array_almost_equal(coil1, coil2, 7)

        loc1, loc2 = [np.concatenate([d['loc'].flatten()
                                      for d in r_.info['chs'][:NCH]])
                      for r_ in (ra, ex)]
        assert_allclose(loc1, loc2)

        assert_allclose(ra[:NCH][0], ex[:NCH][0])
        assert_array_equal([c['range'] for c in ra.info['chs'][:NCH]],
                           [c['range'] for c in ex.info['chs'][:NCH]])
        assert_array_equal([c['cal'] for c in ra.info['chs'][:NCH]],
                           [c['cal'] for c in ex.info['chs'][:NCH]])
        assert_array_equal(ra._cals[:NCH], ex._cals[:NCH])

        # check our transforms
        for key in ('dev_head_t', 'dev_ctf_t', 'ctf_head_t'):
            if ex.info[key] is None:
                pass
            else:
                assert (ra.info[key] is not None)
                for ent in ('to', 'from', 'trans'):
                    assert_allclose(ex.info[key][ent],
                                    ra.info[key][ent])

        ra.save(tmp_raw_fname)
        re = read_raw_fif(tmp_raw_fname)
        print(re)
        for key in ('dev_head_t', 'dev_ctf_t', 'ctf_head_t'):
            assert (isinstance(re.info[key], dict))
            this_t = re.info[key]['trans']
            assert_equal(this_t.shape, (4, 4))
            # check that matrix by is not identity
            assert (not np.allclose(this_t, np.eye(4)))
        os.remove(tmp_raw_fname)
예제 #10
0
def test_hpi_info():
    """Test getting HPI info"""
    tempdir = _TempDir()
    temp_name = op.join(tempdir, 'temp_raw.fif')
    for fname in (chpi_fif_fname, sss_fif_fname):
        raw = read_raw_fif(fname, allow_maxshield='yes')
        assert_true(len(raw.info['hpi_subsystem']) > 0)
        raw.save(temp_name, overwrite=True)
        raw_2 = read_raw_fif(temp_name, allow_maxshield='yes')
        assert_equal(len(raw_2.info['hpi_subsystem']),
                     len(raw.info['hpi_subsystem']))
예제 #11
0
def test_calculate_chpi_positions():
    """Test calculation of cHPI positions."""
    # Check to make sure our fits match MF decently
    mf_quats = read_head_pos(pos_fname)
    raw = read_raw_fif(chpi_fif_fname, allow_maxshield='yes', preload=True)
    # This is a little hack (aliasing while decimating) to make it much faster
    # for testing purposes only. We can relax this later if we find it breaks
    # something.
    raw_dec = _decimate_chpi(raw, 15)
    with catch_logging() as log:
        py_quats = _calculate_chpi_positions(raw_dec, t_step_max=1.,
                                             verbose='debug')
    assert_true(log.getvalue().startswith('HPIFIT'))
    _assert_quats(py_quats, mf_quats, dist_tol=0.004, angle_tol=2.5)

    # degenerate conditions
    raw_no_chpi = read_raw_fif(test_fif_fname)
    assert_raises(RuntimeError, _calculate_chpi_positions, raw_no_chpi)
    raw_bad = raw.copy()
    del raw_bad.info['hpi_meas'][0]['hpi_coils'][0]['coil_freq']
    assert_raises(RuntimeError, _calculate_chpi_positions, raw_bad)
    raw_bad = raw.copy()
    for d in raw_bad.info['dig']:
        if d['kind'] == FIFF.FIFFV_POINT_HPI:
            d['coord_frame'] = FIFF.FIFFV_COORD_UNKNOWN
            break
    assert_raises(RuntimeError, _calculate_chpi_positions, raw_bad)
    for d in raw_bad.info['dig']:
        if d['kind'] == FIFF.FIFFV_POINT_HPI:
            d['coord_frame'] = FIFF.FIFFV_COORD_HEAD
            d['r'] = np.ones(3)
    raw_bad.crop(0, 1.)
    picks = np.concatenate([np.arange(306, len(raw_bad.ch_names)),
                            pick_types(raw_bad.info, meg=True)[::16]])
    raw_bad.pick_channels([raw_bad.ch_names[pick] for pick in picks])
    with warnings.catch_warnings(record=True):  # bad pos
        with catch_logging() as log_file:
            _calculate_chpi_positions(raw_bad, t_step_min=1., verbose=True)
    # ignore HPI info header and [done] footer
    assert_true('0/5 good' in log_file.getvalue().strip().split('\n')[-2])

    # half the rate cuts off cHPI coils
    raw.info['lowpass'] /= 2.
    assert_raises_regex(RuntimeError, 'above the',
                        _calculate_chpi_positions, raw)

    # test on 5k artemis data
    raw = read_raw_artemis123(art_fname, preload=True)
    mf_quats = read_head_pos(art_mc_fname)
    with catch_logging() as log:
        py_quats = _calculate_chpi_positions(raw, t_step_min=2.,
                                             verbose='debug')
    _assert_quats(py_quats, mf_quats, dist_tol=0.004, angle_tol=2.5)
예제 #12
0
def test_set_channel_types():
    """Test set_channel_types."""
    raw = read_raw_fif(raw_fname)
    # Error Tests
    # Test channel name exists in ch_names
    mapping = {'EEG 160': 'EEG060'}
    pytest.raises(ValueError, raw.set_channel_types, mapping)
    # Test change to illegal channel type
    mapping = {'EOG 061': 'xxx'}
    pytest.raises(ValueError, raw.set_channel_types, mapping)
    # Test changing type if in proj (avg eeg ref here)
    mapping = {'EEG 058': 'ecog', 'EEG 059': 'ecg', 'EEG 060': 'eog',
               'EOG 061': 'seeg', 'MEG 2441': 'eeg', 'MEG 2443': 'eeg',
               'MEG 2442': 'hbo'}
    pytest.raises(RuntimeError, raw.set_channel_types, mapping)
    # Test type change
    raw2 = read_raw_fif(raw_fname)
    raw2.info['bads'] = ['EEG 059', 'EEG 060', 'EOG 061']
    pytest.raises(RuntimeError, raw2.set_channel_types, mapping)  # has prj
    raw2.add_proj([], remove_existing=True)
    with pytest.warns(RuntimeWarning, match='The unit for channel'):
        raw2.set_channel_types(mapping)
    info = raw2.info
    assert info['chs'][372]['ch_name'] == 'EEG 058'
    assert info['chs'][372]['kind'] == FIFF.FIFFV_ECOG_CH
    assert info['chs'][372]['unit'] == FIFF.FIFF_UNIT_V
    assert info['chs'][372]['coil_type'] == FIFF.FIFFV_COIL_EEG
    assert info['chs'][373]['ch_name'] == 'EEG 059'
    assert info['chs'][373]['kind'] == FIFF.FIFFV_ECG_CH
    assert info['chs'][373]['unit'] == FIFF.FIFF_UNIT_V
    assert info['chs'][373]['coil_type'] == FIFF.FIFFV_COIL_NONE
    assert info['chs'][374]['ch_name'] == 'EEG 060'
    assert info['chs'][374]['kind'] == FIFF.FIFFV_EOG_CH
    assert info['chs'][374]['unit'] == FIFF.FIFF_UNIT_V
    assert info['chs'][374]['coil_type'] == FIFF.FIFFV_COIL_NONE
    assert info['chs'][375]['ch_name'] == 'EOG 061'
    assert info['chs'][375]['kind'] == FIFF.FIFFV_SEEG_CH
    assert info['chs'][375]['unit'] == FIFF.FIFF_UNIT_V
    assert info['chs'][375]['coil_type'] == FIFF.FIFFV_COIL_EEG
    for idx in pick_channels(raw.ch_names, ['MEG 2441', 'MEG 2443']):
        assert info['chs'][idx]['kind'] == FIFF.FIFFV_EEG_CH
        assert info['chs'][idx]['unit'] == FIFF.FIFF_UNIT_V
        assert info['chs'][idx]['coil_type'] == FIFF.FIFFV_COIL_EEG
    idx = pick_channels(raw.ch_names, ['MEG 2442'])[0]
    assert info['chs'][idx]['kind'] == FIFF.FIFFV_FNIRS_CH
    assert info['chs'][idx]['unit'] == FIFF.FIFF_UNIT_MOL
    assert info['chs'][idx]['coil_type'] == FIFF.FIFFV_COIL_FNIRS_HBO

    # Test meaningful error when setting channel type with unknown unit
    raw.info['chs'][0]['unit'] = 0.
    ch_types = {raw.ch_names[0]: 'misc'}
    pytest.raises(ValueError, raw.set_channel_types, ch_types)
예제 #13
0
def test_read_selection():
    """Test reading of selections"""
    # test one channel for each selection
    ch_names = [
        "MEG 2211",
        "MEG 0223",
        "MEG 1312",
        "MEG 0412",
        "MEG 1043",
        "MEG 2042",
        "MEG 2032",
        "MEG 0522",
        "MEG 1031",
    ]
    sel_names = [
        "Vertex",
        "Left-temporal",
        "Right-temporal",
        "Left-parietal",
        "Right-parietal",
        "Left-occipital",
        "Right-occipital",
        "Left-frontal",
        "Right-frontal",
    ]

    raw = read_raw_fif(raw_fname)
    for i, name in enumerate(sel_names):
        sel = read_selection(name)
        assert_true(ch_names[i] in sel)
        sel_info = read_selection(name, info=raw.info)
        assert_equal(sel, sel_info)

    # test some combinations
    all_ch = read_selection(["L", "R"])
    left = read_selection("L")
    right = read_selection("R")

    assert_true(len(all_ch) == len(left) + len(right))
    assert_true(len(set(left).intersection(set(right))) == 0)

    frontal = read_selection("frontal")
    occipital = read_selection("Right-occipital")
    assert_true(len(set(frontal).intersection(set(occipital))) == 0)

    ch_names_new = [ch.replace(" ", "") for ch in ch_names]
    raw_new = read_raw_fif(raw_new_fname)
    for i, name in enumerate(sel_names):
        sel = read_selection(name, info=raw_new.info)
        assert_true(ch_names_new[i] in sel)

    assert_raises(TypeError, read_selection, name, info="foo")
예제 #14
0
def test_cov_estimation_on_raw():
    """Test estimation from raw (typically empty room)"""
    tempdir = _TempDir()
    raw = read_raw_fif(raw_fname, preload=True)
    cov_mne = read_cov(erm_cov_fname)

    # The pure-string uses the more efficient numpy-based method, the
    # the list gets triaged to compute_covariance (should be equivalent
    # but use more memory)
    for method in (None, ['empirical']):  # None is cast to 'empirical'
        cov = compute_raw_covariance(raw, tstep=None, method=method)
        assert_equal(cov.ch_names, cov_mne.ch_names)
        assert_equal(cov.nfree, cov_mne.nfree)
        assert_snr(cov.data, cov_mne.data, 1e4)

        cov = compute_raw_covariance(raw, method=method)  # tstep=0.2 (default)
        assert_equal(cov.nfree, cov_mne.nfree - 119)  # cutoff some samples
        assert_snr(cov.data, cov_mne.data, 1e2)

        # test IO when computation done in Python
        cov.save(op.join(tempdir, 'test-cov.fif'))  # test saving
        cov_read = read_cov(op.join(tempdir, 'test-cov.fif'))
        assert_true(cov_read.ch_names == cov.ch_names)
        assert_true(cov_read.nfree == cov.nfree)
        assert_array_almost_equal(cov.data, cov_read.data)

        # test with a subset of channels
        picks = pick_channels(raw.ch_names, include=raw.ch_names[:5])
        raw_pick = raw.copy().pick_channels(
            [raw.ch_names[pick] for pick in picks])
        raw_pick.info.normalize_proj()
        cov = compute_raw_covariance(raw_pick, picks=picks, tstep=None,
                                     method=method)
        assert_true(cov_mne.ch_names[:5] == cov.ch_names)
        assert_snr(cov.data, cov_mne.data[picks][:, picks], 1e4)
        cov = compute_raw_covariance(raw_pick, picks=picks, method=method)
        assert_snr(cov.data, cov_mne.data[picks][:, picks], 90)  # cutoff samps
        # make sure we get a warning with too short a segment
        raw_2 = read_raw_fif(raw_fname).crop(0, 1, copy=False)
        with warnings.catch_warnings(record=True) as w:
            warnings.simplefilter('always')
            cov = compute_raw_covariance(raw_2, method=method)
        assert_true(any('Too few samples' in str(ww.message) for ww in w))
        # no epochs found due to rejection
        assert_raises(ValueError, compute_raw_covariance, raw, tstep=None,
                      method='empirical', reject=dict(eog=200e-6))
        # but this should work
        cov = compute_raw_covariance(raw.copy().crop(0, 10., copy=False),
                                     tstep=None, method=method,
                                     reject=dict(eog=1000e-6))
예제 #15
0
def test_cov_estimation_on_raw(method, tmpdir):
    """Test estimation from raw (typically empty room)."""
    tempdir = str(tmpdir)
    raw = read_raw_fif(raw_fname, preload=True)
    cov_mne = read_cov(erm_cov_fname)

    # The pure-string uses the more efficient numpy-based method, the
    # the list gets triaged to compute_covariance (should be equivalent
    # but use more memory)
    with pytest.warns(None):  # can warn about EEG ref
        cov = compute_raw_covariance(raw, tstep=None, method=method,
                                     rank='full')
    assert_equal(cov.ch_names, cov_mne.ch_names)
    assert_equal(cov.nfree, cov_mne.nfree)
    assert_snr(cov.data, cov_mne.data, 1e4)

    # tstep=0.2 (default)
    with pytest.warns(None):  # can warn about EEG ref
        cov = compute_raw_covariance(raw, method=method, rank='full')
    assert_equal(cov.nfree, cov_mne.nfree - 119)  # cutoff some samples
    assert_snr(cov.data, cov_mne.data, 1e2)

    # test IO when computation done in Python
    cov.save(op.join(tempdir, 'test-cov.fif'))  # test saving
    cov_read = read_cov(op.join(tempdir, 'test-cov.fif'))
    assert cov_read.ch_names == cov.ch_names
    assert cov_read.nfree == cov.nfree
    assert_array_almost_equal(cov.data, cov_read.data)

    # test with a subset of channels
    raw_pick = raw.copy().pick_channels(raw.ch_names[:5])
    raw_pick.info.normalize_proj()
    cov = compute_raw_covariance(raw_pick, tstep=None, method=method,
                                 rank='full')
    assert cov_mne.ch_names[:5] == cov.ch_names
    assert_snr(cov.data, cov_mne.data[:5, :5], 1e4)
    cov = compute_raw_covariance(raw_pick, method=method, rank='full')
    assert_snr(cov.data, cov_mne.data[:5, :5], 90)  # cutoff samps
    # make sure we get a warning with too short a segment
    raw_2 = read_raw_fif(raw_fname).crop(0, 1)
    with pytest.warns(RuntimeWarning, match='Too few samples'):
        cov = compute_raw_covariance(raw_2, method=method)
    # no epochs found due to rejection
    pytest.raises(ValueError, compute_raw_covariance, raw, tstep=None,
                  method='empirical', reject=dict(eog=200e-6))
    # but this should work
    cov = compute_raw_covariance(raw.copy().crop(0, 10.),
                                 tstep=None, method=method,
                                 reject=dict(eog=1000e-6),
                                 verbose='error')
예제 #16
0
def test_get_set_sensor_positions():
    """Test get/set functions for sensor positions."""
    raw1 = read_raw_fif(raw_fname)
    picks = pick_types(raw1.info, meg=False, eeg=True)
    pos = np.array([ch['loc'][:3] for ch in raw1.info['chs']])[picks]
    raw_pos = raw1._get_channel_positions(picks=picks)
    assert_array_equal(raw_pos, pos)

    ch_name = raw1.info['ch_names'][13]
    pytest.raises(ValueError, raw1._set_channel_positions, [1, 2], ['name'])
    raw2 = read_raw_fif(raw_fname)
    raw2.info['chs'][13]['loc'][:3] = np.array([1, 2, 3])
    raw1._set_channel_positions([[1, 2, 3]], [ch_name])
    assert_array_equal(raw1.info['chs'][13]['loc'],
                       raw2.info['chs'][13]['loc'])
예제 #17
0
def test_apply_mne_inverse_raw():
    """Test MNE with precomputed inverse operator on Raw."""
    start = 3
    stop = 10
    raw = read_raw_fif(fname_raw)
    label_lh = read_label(fname_label % 'Aud-lh')
    _, times = raw[0, start:stop]
    inverse_operator = read_inverse_operator(fname_full)
    inverse_operator = prepare_inverse_operator(inverse_operator, nave=1,
                                                lambda2=lambda2, method="dSPM")
    for pick_ori in [None, "normal", "vector"]:
        stc = apply_inverse_raw(raw, inverse_operator, lambda2, "dSPM",
                                label=label_lh, start=start, stop=stop, nave=1,
                                pick_ori=pick_ori, buffer_size=None,
                                prepared=True)

        stc2 = apply_inverse_raw(raw, inverse_operator, lambda2, "dSPM",
                                 label=label_lh, start=start, stop=stop,
                                 nave=1, pick_ori=pick_ori,
                                 buffer_size=3, prepared=True)

        if pick_ori is None:
            assert_true(np.all(stc.data > 0))
            assert_true(np.all(stc2.data > 0))

        assert_true(stc.subject == 'sample')
        assert_true(stc2.subject == 'sample')
        assert_array_almost_equal(stc.times, times)
        assert_array_almost_equal(stc2.times, times)
        assert_array_almost_equal(stc.data, stc2.data)
예제 #18
0
def test_cov_mismatch():
    """Test estimation with MEG<->Head mismatch."""
    raw = read_raw_fif(raw_fname).crop(0, 5).load_data()
    events = find_events(raw, stim_channel='STI 014')
    raw.pick_channels(raw.ch_names[:5])
    raw.add_proj([], remove_existing=True)
    epochs = Epochs(raw, events, None, tmin=-0.2, tmax=0., preload=True)
    for kind in ('shift', 'None'):
        epochs_2 = epochs.copy()
        # This should be fine
        with warnings.catch_warnings(record=True) as w:
            compute_covariance([epochs, epochs_2])
            assert_equal(len(w), 0)
            if kind == 'shift':
                epochs_2.info['dev_head_t']['trans'][:3, 3] += 0.001
            else:  # None
                epochs_2.info['dev_head_t'] = None
            assert_raises(ValueError, compute_covariance, [epochs, epochs_2])
            assert_equal(len(w), 0)
            compute_covariance([epochs, epochs_2], on_mismatch='ignore')
            assert_equal(len(w), 0)
            compute_covariance([epochs, epochs_2], on_mismatch='warn')
            assert_raises(ValueError, compute_covariance, epochs,
                          on_mismatch='x')
        assert_true(any('transform mismatch' in str(ww.message) for ww in w))
    # This should work
    epochs.info['dev_head_t'] = None
    epochs_2.info['dev_head_t'] = None
    compute_covariance([epochs, epochs_2], method=None)
예제 #19
0
def test_calculate_chpi_coil_locs():
    """Test computing just cHPI locations."""
    raw = read_raw_fif(chpi_fif_fname, allow_maxshield='yes', preload=True)
    # This is a little hack (aliasing while decimating) to make it much faster
    # for testing purposes only. We can relax this later if we find it breaks
    # something.
    raw_dec = _decimate_chpi(raw, 15)
    times, cHPI_digs = _calculate_chpi_coil_locs(raw_dec, verbose='debug')

    # spot check
    assert_allclose(times[9], 9.9, atol=1e-3)
    assert_allclose(cHPI_digs[9][2]['r'],
                    [-0.01937833, 0.00346804, 0.06331209], atol=1e-3)
    assert_allclose(cHPI_digs[9][2]['gof'], 0.9957976, atol=1e-3)

    assert_allclose(cHPI_digs[9][4]['r'],
                    [0.05442122, 0.00997692, 0.03721696], atol=1e-3)
    assert_allclose(cHPI_digs[9][4]['gof'], 0.075700080794629199, atol=1e-3)

    # test on 5k artemis data
    raw = read_raw_artemis123(art_fname, preload=True)
    times, cHPI_digs = _calculate_chpi_coil_locs(raw, verbose='debug')

    assert_allclose(times[2], 2.9, atol=1e-3)
    assert_allclose(cHPI_digs[2][0]['gof'], 0.9980471794552791, atol=1e-3)
    assert_allclose(cHPI_digs[2][0]['r'],
                    [-0.0157762, 0.06655744, 0.00545172], atol=1e-3)
예제 #20
0
def test_unsupervised_spatial_filter():
    """Test unsupervised spatial filter."""
    from sklearn.decomposition import PCA
    from sklearn.kernel_ridge import KernelRidge
    raw = io.read_raw_fif(raw_fname)
    events = read_events(event_name)
    picks = pick_types(raw.info, meg=True, stim=False, ecg=False,
                       eog=False, exclude='bads')
    picks = picks[1:13:3]
    epochs = Epochs(raw, events, event_id, tmin, tmax, picks=picks,
                    preload=True, baseline=None, verbose=False)

    # Test estimator
    assert_raises(ValueError, UnsupervisedSpatialFilter, KernelRidge(2))

    # Test fit
    X = epochs.get_data()
    n_components = 4
    usf = UnsupervisedSpatialFilter(PCA(n_components))
    usf.fit(X)
    usf1 = UnsupervisedSpatialFilter(PCA(n_components))

    # test transform
    assert_equal(usf.transform(X).ndim, 3)
    # test fit_transform
    assert_array_almost_equal(usf.transform(X), usf1.fit_transform(X))
    # assert shape
    assert_equal(usf.transform(X).shape[1], n_components)

    # Test with average param
    usf = UnsupervisedSpatialFilter(PCA(4), average=True)
    usf.fit_transform(X)
    assert_raises(ValueError, UnsupervisedSpatialFilter, PCA(4), 2)
예제 #21
0
def test_define_events():
    """Test defining response events."""
    events = read_events(fname)
    raw = read_raw_fif(raw_fname)
    events_, _ = define_target_events(events, 5, 32, raw.info['sfreq'],
                                      .2, 0.7, 42, 99)
    n_target = events[events[:, 2] == 5].shape[0]
    n_miss = events_[events_[:, 2] == 99].shape[0]
    n_target_ = events_[events_[:, 2] == 42].shape[0]

    assert_true(n_target_ == (n_target - n_miss))

    events = np.array([[0, 0, 1],
                       [375, 0, 2],
                       [500, 0, 1],
                       [875, 0, 3],
                       [1000, 0, 1],
                       [1375, 0, 3],
                       [1100, 0, 1],
                       [1475, 0, 2],
                       [1500, 0, 1],
                       [1875, 0, 2]])
    true_lag_nofill = [1500., 1500., 1500.]
    true_lag_fill = [1500., np.nan, np.nan, 1500., 1500.]
    n, lag_nofill = define_target_events(events, 1, 2, 250., 1.4, 1.6, 5)
    n, lag_fill = define_target_events(events, 1, 2, 250., 1.4, 1.6, 5, 99)

    assert_array_equal(true_lag_fill, lag_fill)
    assert_array_equal(true_lag_nofill, lag_nofill)
예제 #22
0
def test_plot_ica_overlay():
    """Test plotting of ICA cleaning."""
    import matplotlib.pyplot as plt
    raw = _get_raw(preload=True)
    picks = _get_picks(raw)
    ica = ICA(noise_cov=read_cov(cov_fname), n_components=2,
              max_pca_components=3, n_pca_components=3)
    # can't use info.normalize_proj here because of how and when ICA and Epochs
    # objects do picking of Raw data
    with pytest.warns(RuntimeWarning, match='projection'):
        ica.fit(raw, picks=picks)
    # don't test raw, needs preload ...
    with pytest.warns(RuntimeWarning, match='projection'):
        ecg_epochs = create_ecg_epochs(raw, picks=picks)
    ica.plot_overlay(ecg_epochs.average())
    with pytest.warns(RuntimeWarning, match='projection'):
        eog_epochs = create_eog_epochs(raw, picks=picks)
    ica.plot_overlay(eog_epochs.average())
    pytest.raises(TypeError, ica.plot_overlay, raw[:2, :3][0])
    ica.plot_overlay(raw)
    plt.close('all')

    # smoke test for CTF
    raw = read_raw_fif(raw_ctf_fname)
    raw.apply_gradient_compensation(3)
    picks = pick_types(raw.info, meg=True, ref_meg=False)
    ica = ICA(n_components=2, max_pca_components=3, n_pca_components=3)
    ica.fit(raw, picks=picks)
    with pytest.warns(RuntimeWarning, match='longer than'):
        ecg_epochs = create_ecg_epochs(raw)
    ica.plot_overlay(ecg_epochs.average())
    plt.close('all')
예제 #23
0
def test_eog_channel(method):
    """Test that EOG channel is included when performing ICA."""
    _skip_check_picard(method)
    raw = read_raw_fif(raw_fname, preload=True)
    events = read_events(event_name)
    picks = pick_types(raw.info, meg=True, stim=True, ecg=False,
                       eog=True, exclude='bads')
    epochs = Epochs(raw, events, event_id, tmin, tmax, picks=picks,
                    baseline=(None, 0), preload=True)
    n_components = 0.9
    ica = ICA(n_components=n_components, method=method)
    # Test case for MEG and EOG data. Should have EOG channel
    for inst in [raw, epochs]:
        picks1a = pick_types(inst.info, meg=True, stim=False, ecg=False,
                             eog=False, exclude='bads')[:4]
        picks1b = pick_types(inst.info, meg=False, stim=False, ecg=False,
                             eog=True, exclude='bads')
        picks1 = np.append(picks1a, picks1b)
        ica.fit(inst, picks=picks1)
        assert (any('EOG' in ch for ch in ica.ch_names))
    # Test case for MEG data. Should have no EOG channel
    for inst in [raw, epochs]:
        picks1 = pick_types(inst.info, meg=True, stim=False, ecg=False,
                            eog=False, exclude='bads')[:5]
        ica.fit(inst, picks=picks1)
        assert not any('EOG' in ch for ch in ica.ch_names)
예제 #24
0
def test_n_components_and_max_pca_components_none(method):
    """Test n_components and max_pca_components=None."""
    _skip_check_picard(method)
    raw = read_raw_fif(raw_fname).crop(1.5, stop).load_data()
    events = read_events(event_name)
    picks = pick_types(raw.info, eeg=True, meg=False)
    epochs = Epochs(raw, events, event_id, tmin, tmax, picks=picks,
                    baseline=(None, 0), preload=True)

    max_pca_components = None
    n_components = None
    random_state = 12345

    tempdir = _TempDir()
    output_fname = op.join(tempdir, 'test_ica-ica.fif')
    ica = ICA(max_pca_components=max_pca_components, method=method,
              n_components=n_components, random_state=random_state)
    with pytest.warns(None):  # convergence
        ica.fit(epochs)
    ica.save(output_fname)

    ica = read_ica(output_fname)

    # ICA.fit() replaced max_pca_components, which was previously None,
    # with the appropriate integer value.
    assert_equal(ica.max_pca_components, epochs.info['nchan'])
    assert ica.n_components is None
예제 #25
0
def test_ica_rank_reduction(method):
    """Test recovery ICA rank reduction."""
    _skip_check_picard(method)
    # Most basic recovery
    raw = read_raw_fif(raw_fname).crop(0.5, stop).load_data()
    picks = pick_types(raw.info, meg=True, stim=False, ecg=False,
                       eog=False, exclude='bads')[:10]
    n_components = 5
    max_pca_components = len(picks)
    for n_pca_components in [6, 10]:
        with pytest.warns(UserWarning, match='did not converge'):
            ica = ICA(n_components=n_components,
                      max_pca_components=max_pca_components,
                      n_pca_components=n_pca_components,
                      method=method, max_iter=1).fit(raw, picks=picks)

        rank_before = _compute_rank_int(raw.copy().pick(picks), proj=False)
        assert_equal(rank_before, len(picks))
        raw_clean = ica.apply(raw.copy())
        rank_after = _compute_rank_int(raw_clean.copy().pick(picks),
                                       proj=False)
        # interaction between ICA rejection and PCA components difficult
        # to preduct. Rank_after often seems to be 1 higher then
        # n_pca_components
        assert (n_components < n_pca_components <= rank_after <=
                rank_before)
예제 #26
0
def test_ica_reset(method):
    """Test ICA resetting."""
    _skip_check_picard(method)
    raw = read_raw_fif(raw_fname).crop(0.5, stop).load_data()
    picks = pick_types(raw.info, meg=True, stim=False, ecg=False,
                       eog=False, exclude='bads')[:10]

    run_time_attrs = (
        'pre_whitener_',
        'unmixing_matrix_',
        'mixing_matrix_',
        'n_components_',
        'n_samples_',
        'pca_components_',
        'pca_explained_variance_',
        'pca_mean_'
    )
    with pytest.warns(UserWarning, match='did not converge'):
        ica = ICA(
            n_components=3, max_pca_components=3, n_pca_components=3,
            method=method, max_iter=1).fit(raw, picks=picks)

    assert (all(hasattr(ica, attr) for attr in run_time_attrs))
    assert ica.labels_ is not None
    ica._reset()
    assert (not any(hasattr(ica, attr) for attr in run_time_attrs))
    assert ica.labels_ is not None
예제 #27
0
def test_min_distance_fit_dipole():
    """Test dipole min_dist to inner_skull."""
    subject = 'sample'
    raw = read_raw_fif(fname_raw, preload=True)

    # select eeg data
    picks = pick_types(raw.info, meg=False, eeg=True, exclude='bads')
    info = pick_info(raw.info, picks)

    # Let's use cov = Identity
    cov = read_cov(fname_cov)
    cov['data'] = np.eye(cov['data'].shape[0])

    # Simulated scal map
    simulated_scalp_map = np.zeros(picks.shape[0])
    simulated_scalp_map[27:34] = 1

    simulated_scalp_map = simulated_scalp_map[:, None]

    evoked = EvokedArray(simulated_scalp_map, info, tmin=0)

    min_dist = 5.  # distance in mm

    bem = read_bem_solution(fname_bem)
    dip, residual = fit_dipole(evoked, cov, bem, fname_trans,
                               min_dist=min_dist)

    dist = _compute_depth(dip, fname_bem, fname_trans, subject, subjects_dir)

    # Constraints are not exact, so bump the minimum slightly
    assert_true(min_dist - 0.1 < (dist[0] * 1000.) < (min_dist + 1.))

    assert_raises(ValueError, fit_dipole, evoked, cov, fname_bem, fname_trans,
                  -1.)
예제 #28
0
def test_find_ch_connectivity():
    """Test computing the connectivity matrix."""
    data_path = testing.data_path()

    raw = read_raw_fif(raw_fname, preload=True)
    sizes = {'mag': 828, 'grad': 1700, 'eeg': 386}
    nchans = {'mag': 102, 'grad': 204, 'eeg': 60}
    for ch_type in ['mag', 'grad', 'eeg']:
        conn, ch_names = find_ch_connectivity(raw.info, ch_type)
        # Silly test for checking the number of neighbors.
        assert_equal(conn.getnnz(), sizes[ch_type])
        assert_equal(len(ch_names), nchans[ch_type])
    pytest.raises(ValueError, find_ch_connectivity, raw.info, None)

    # Test computing the conn matrix with gradiometers.
    conn, ch_names = _compute_ch_connectivity(raw.info, 'grad')
    assert_equal(conn.getnnz(), 2680)

    # Test ch_type=None.
    raw.pick_types(meg='mag')
    find_ch_connectivity(raw.info, None)

    bti_fname = op.join(data_path, 'BTi', 'erm_HFH', 'c,rfDC')
    bti_config_name = op.join(data_path, 'BTi', 'erm_HFH', 'config')
    raw = read_raw_bti(bti_fname, bti_config_name, None)
    _, ch_names = find_ch_connectivity(raw.info, 'mag')
    assert 'A1' in ch_names

    ctf_fname = op.join(data_path, 'CTF', 'testdata_ctf_short.ds')
    raw = read_raw_ctf(ctf_fname)
    _, ch_names = find_ch_connectivity(raw.info, 'mag')
    assert 'MLC11' in ch_names

    pytest.raises(ValueError, find_ch_connectivity, raw.info, 'eog')
예제 #29
0
def test_ch_loc():
    """Test raw kit loc."""
    raw_py = read_raw_kit(sqd_path, mrk_path, elp_txt_path, hsp_txt_path,
                          stim='<')
    raw_bin = read_raw_fif(op.join(data_dir, 'test_bin_raw.fif'))

    ch_py = np.array([ch['loc'] for ch in
                      raw_py._raw_extras[0]['channels'][:160]])
    # ch locs stored as m, not mm
    ch_py[:, :3] *= 1e3
    ch_sns = read_sns(op.join(data_dir, 'sns.txt'))
    assert_array_almost_equal(ch_py, ch_sns, 2)

    assert_array_almost_equal(raw_py.info['dev_head_t']['trans'],
                              raw_bin.info['dev_head_t']['trans'], 4)
    for py_ch, bin_ch in zip(raw_py.info['chs'], raw_bin.info['chs']):
        if bin_ch['ch_name'].startswith('MEG'):
            # the stored ch locs have more precision than the sns.txt
            assert_array_almost_equal(py_ch['loc'], bin_ch['loc'], decimal=2)

    # test when more than one marker file provided
    mrks = [mrk_path, mrk2_path, mrk3_path]
    read_raw_kit(sqd_path, mrks, elp_txt_path, hsp_txt_path, preload=False)
    # this dataset does not have the equivalent set of points :(
    raw_bin.info['dig'] = raw_bin.info['dig'][:8]
    raw_py.info['dig'] = raw_py.info['dig'][:8]
    assert_dig_allclose(raw_py.info, raw_bin.info)
예제 #30
0
def test_drop_channels():
    """Test if dropping channels works with various arguments."""
    raw = read_raw_fif(raw_fname, preload=True).crop(0, 0.1)
    raw.drop_channels(["MEG 0111"])  # list argument
    raw.drop_channels("MEG 0112")  # str argument
    pytest.raises(ValueError, raw.drop_channels, ["MEG 0111", 5])
    pytest.raises(ValueError, raw.drop_channels, 5)  # must be list or str
예제 #31
0
def test_find_events():
    """Test find events in raw file."""
    events = read_events(fname)
    raw = read_raw_fif(raw_fname, preload=True)
    # let's test the defaulting behavior while we're at it
    extra_ends = ['', '_1']
    orig_envs = [os.getenv('MNE_STIM_CHANNEL%s' % s) for s in extra_ends]
    os.environ['MNE_STIM_CHANNEL'] = 'STI 014'
    if 'MNE_STIM_CHANNEL_1' in os.environ:
        del os.environ['MNE_STIM_CHANNEL_1']
    events2 = find_events(raw)
    assert_array_almost_equal(events, events2)
    # now test with mask
    events11 = find_events(raw, mask=3, mask_type='not_and')
    with pytest.warns(RuntimeWarning, match='events masked'):
        events22 = read_events(fname, mask=3, mask_type='not_and')
    assert_array_equal(events11, events22)

    # Reset some data for ease of comparison
    raw._first_samps[0] = 0
    raw.info['sfreq'] = 1000

    stim_channel = 'STI 014'
    stim_channel_idx = pick_channels(raw.info['ch_names'],
                                     include=[stim_channel])

    # test digital masking
    raw._data[stim_channel_idx, :5] = np.arange(5)
    raw._data[stim_channel_idx, 5:] = 0
    # 1 == '0b1', 2 == '0b10', 3 == '0b11', 4 == '0b100'

    pytest.raises(TypeError, find_events, raw, mask="0", mask_type='and')
    pytest.raises(ValueError, find_events, raw, mask=0, mask_type='blah')
    # testing mask_type. default = 'not_and'
    assert_array_equal(
        find_events(raw, shortest_event=1, mask=1, mask_type='not_and'),
        [[2, 0, 2], [4, 2, 4]])
    assert_array_equal(
        find_events(raw, shortest_event=1, mask=2, mask_type='not_and'),
        [[1, 0, 1], [3, 0, 1], [4, 1, 4]])
    assert_array_equal(
        find_events(raw, shortest_event=1, mask=3, mask_type='not_and'),
        [[4, 0, 4]])
    assert_array_equal(
        find_events(raw, shortest_event=1, mask=4, mask_type='not_and'),
        [[1, 0, 1], [2, 1, 2], [3, 2, 3]])
    # testing with mask_type = 'and'
    assert_array_equal(
        find_events(raw, shortest_event=1, mask=1, mask_type='and'),
        [[1, 0, 1], [3, 0, 1]])
    assert_array_equal(
        find_events(raw, shortest_event=1, mask=2, mask_type='and'),
        [[2, 0, 2]])
    assert_array_equal(
        find_events(raw, shortest_event=1, mask=3, mask_type='and'),
        [[1, 0, 1], [2, 1, 2], [3, 2, 3]])
    assert_array_equal(
        find_events(raw, shortest_event=1, mask=4, mask_type='and'),
        [[4, 0, 4]])

    # test empty events channel
    raw._data[stim_channel_idx, :] = 0
    assert_array_equal(find_events(raw), np.empty((0, 3), dtype='int32'))

    raw._data[stim_channel_idx, :4] = 1
    assert_array_equal(find_events(raw), np.empty((0, 3), dtype='int32'))

    raw._data[stim_channel_idx, -1:] = 9
    assert_array_equal(find_events(raw), [[14399, 0, 9]])

    # Test that we can handle consecutive events with no gap
    raw._data[stim_channel_idx, 10:20] = 5
    raw._data[stim_channel_idx, 20:30] = 6
    raw._data[stim_channel_idx, 30:32] = 5
    raw._data[stim_channel_idx, 40] = 6

    assert_array_equal(find_events(raw, consecutive=False),
                       [[10, 0, 5], [40, 0, 6], [14399, 0, 9]])
    assert_array_equal(
        find_events(raw, consecutive=True),
        [[10, 0, 5], [20, 5, 6], [30, 6, 5], [40, 0, 6], [14399, 0, 9]])
    assert_array_equal(find_events(raw),
                       [[10, 0, 5], [20, 5, 6], [40, 0, 6], [14399, 0, 9]])
    assert_array_equal(find_events(raw, output='offset', consecutive=False),
                       [[31, 0, 5], [40, 0, 6], [14399, 0, 9]])
    assert_array_equal(
        find_events(raw, output='offset', consecutive=True),
        [[19, 6, 5], [29, 5, 6], [31, 0, 5], [40, 0, 6], [14399, 0, 9]])
    pytest.raises(ValueError,
                  find_events,
                  raw,
                  output='step',
                  consecutive=True)
    assert_array_equal(
        find_events(raw, output='step', consecutive=True, shortest_event=1),
        [[10, 0, 5], [20, 5, 6], [30, 6, 5], [32, 5, 0], [40, 0, 6],
         [41, 6, 0], [14399, 0, 9], [14400, 9, 0]])
    assert_array_equal(find_events(raw, output='offset'),
                       [[19, 6, 5], [31, 0, 6], [40, 0, 6], [14399, 0, 9]])
    assert_array_equal(find_events(raw, consecutive=False, min_duration=0.002),
                       [[10, 0, 5]])
    assert_array_equal(find_events(raw, consecutive=True, min_duration=0.002),
                       [[10, 0, 5], [20, 5, 6], [30, 6, 5]])
    assert_array_equal(
        find_events(raw,
                    output='offset',
                    consecutive=False,
                    min_duration=0.002), [[31, 0, 5]])
    assert_array_equal(
        find_events(raw, output='offset', consecutive=True,
                    min_duration=0.002), [[19, 6, 5], [29, 5, 6], [31, 0, 5]])
    assert_array_equal(find_events(raw, consecutive=True, min_duration=0.003),
                       [[10, 0, 5], [20, 5, 6]])

    # test find_stim_steps merge parameter
    raw._data[stim_channel_idx, :] = 0
    raw._data[stim_channel_idx, 0] = 1
    raw._data[stim_channel_idx, 10] = 4
    raw._data[stim_channel_idx, 11:20] = 5
    assert_array_equal(
        find_stim_steps(raw, pad_start=0, merge=0, stim_channel=stim_channel),
        [[0, 0, 1], [1, 1, 0], [10, 0, 4], [11, 4, 5], [20, 5, 0]])
    assert_array_equal(
        find_stim_steps(raw, merge=-1, stim_channel=stim_channel),
        [[1, 1, 0], [10, 0, 5], [20, 5, 0]])
    assert_array_equal(
        find_stim_steps(raw, merge=1, stim_channel=stim_channel),
        [[1, 1, 0], [11, 0, 5], [20, 5, 0]])

    # put back the env vars we trampled on
    for s, o in zip(extra_ends, orig_envs):
        if o is not None:
            os.environ['MNE_STIM_CHANNEL%s' % s] = o

    # Test with list of stim channels
    raw._data[stim_channel_idx, 1:101] = np.zeros(100)
    raw._data[stim_channel_idx, 10:11] = 1
    raw._data[stim_channel_idx, 30:31] = 3
    stim_channel2 = 'STI 015'
    stim_channel2_idx = pick_channels(raw.info['ch_names'],
                                      include=[stim_channel2])
    raw._data[stim_channel2_idx, :] = 0
    raw._data[stim_channel2_idx, :100] = raw._data[stim_channel_idx, 5:105]
    events1 = find_events(raw, stim_channel='STI 014')
    events2 = events1.copy()
    events2[:, 0] -= 5
    events = find_events(raw, stim_channel=['STI 014', stim_channel2])
    assert_array_equal(events[::2], events2)
    assert_array_equal(events[1::2], events1)

    # test initial_event argument
    info = create_info(['MYSTI'], 1000, 'stim')
    data = np.zeros((1, 1000))
    raw = RawArray(data, info)
    data[0, :10] = 100
    data[0, 30:40] = 200
    assert_array_equal(find_events(raw, 'MYSTI'), [[30, 0, 200]])
    assert_array_equal(find_events(raw, 'MYSTI', initial_event=True),
                       [[0, 0, 100], [30, 0, 200]])

    # test error message for raw without stim channels
    raw = read_raw_fif(raw_fname, preload=True)
    raw.pick_types(meg=True, stim=False)
    # raw does not have annotations
    with pytest.raises(ValueError, match="'stim_channel'"):
        find_events(raw)
    # if raw has annotations, we show a different error message
    raw.set_annotations(Annotations(0, 2, "test"))
    with pytest.raises(ValueError, match="mne.events_from_annotations"):
        find_events(raw)
예제 #32
0
def test_getitem_epochsTFR():
    """Test GetEpochsMixin in the context of EpochsTFR."""
    from pandas import DataFrame
    # Setup for reading the raw data and select a few trials
    raw = read_raw_fif(raw_fname)
    events = read_events(event_fname)
    # create fake data, test with and without dropping epochs
    for n_drop_epochs in [0, 2]:
        n_events = 12
        # create fake metadata
        rng = np.random.RandomState(42)
        rt = rng.uniform(size=(n_events, ))
        trialtypes = np.array(['face', 'place'])
        trial = trialtypes[(rng.uniform(size=(n_events, )) > .5).astype(int)]
        meta = DataFrame(dict(RT=rt, Trial=trial))
        event_id = dict(a=1, b=2, c=3, d=4)
        epochs = Epochs(raw,
                        events[:n_events],
                        event_id=event_id,
                        metadata=meta,
                        decim=1)
        epochs.drop(np.arange(n_drop_epochs))
        n_events -= n_drop_epochs

        freqs = np.arange(12., 17., 2.)  # define frequencies of interest
        n_cycles = freqs / 2.  # 0.5 second time windows for all frequencies

        # Choose time x (full) bandwidth product
        time_bandwidth = 4.0
        # With 0.5 s time windows, this gives 8 Hz smoothing
        kwargs = dict(freqs=freqs,
                      n_cycles=n_cycles,
                      use_fft=True,
                      time_bandwidth=time_bandwidth,
                      return_itc=False,
                      average=False,
                      n_jobs=1)
        power = tfr_multitaper(epochs, **kwargs)

        # Check that power and epochs metadata is the same
        assert_metadata_equal(epochs.metadata, power.metadata)
        assert_metadata_equal(epochs[::2].metadata, power[::2].metadata)
        assert_metadata_equal(epochs['RT < .5'].metadata,
                              power['RT < .5'].metadata)
        assert_array_equal(epochs.selection, power.selection)
        assert epochs.drop_log == power.drop_log

        # Check that get power is functioning
        assert_array_equal(power[3:6].data, power.data[3:6])
        assert_array_equal(power[3:6].events, power.events[3:6])
        assert_array_equal(epochs.selection[3:6], power.selection[3:6])

        indx_check = (power.metadata['Trial'] == 'face')
        try:
            indx_check = indx_check.to_numpy()
        except Exception:
            pass  # older Pandas
        indx_check = indx_check.nonzero()
        assert_array_equal(power['Trial == "face"'].events,
                           power.events[indx_check])
        assert_array_equal(power['Trial == "face"'].data,
                           power.data[indx_check])

        # Check that the wrong Key generates a Key Error for Metadata search
        with pytest.raises(KeyError):
            power['Trialz == "place"']

        # Test length function
        assert len(power) == n_events
        assert len(power[3:6]) == 3

        # Test iteration function
        for ind, power_ep in enumerate(power):
            assert_array_equal(power_ep, power.data[ind])
            if ind == 5:
                break

        # Test that current state is maintained
        assert_array_equal(power.next(), power.data[ind + 1])

    # Check decim affects sfreq
    power_decim = tfr_multitaper(epochs, decim=2, **kwargs)
    assert power.info['sfreq'] / 2. == power_decim.info['sfreq']
예제 #33
0
def test_time_frequency():
    """Test time-frequency transform (PSD and ITC)."""
    # Set parameters
    event_id = 1
    tmin = -0.2
    tmax = 0.498  # Allows exhaustive decimation testing

    # Setup for reading the raw data
    raw = read_raw_fif(raw_fname)
    events = read_events(event_fname)

    include = []
    exclude = raw.info['bads'] + ['MEG 2443', 'EEG 053']  # bads + 2 more

    # picks MEG gradiometers
    picks = pick_types(raw.info,
                       meg='grad',
                       eeg=False,
                       stim=False,
                       include=include,
                       exclude=exclude)

    picks = picks[:2]
    epochs = Epochs(raw, events, event_id, tmin, tmax, picks=picks)
    data = epochs.get_data()
    times = epochs.times
    nave = len(data)

    epochs_nopicks = Epochs(raw, events, event_id, tmin, tmax)

    freqs = np.arange(6, 20, 5)  # define frequencies of interest
    n_cycles = freqs / 4.

    # Test first with a single epoch
    power, itc = tfr_morlet(epochs[0],
                            freqs=freqs,
                            n_cycles=n_cycles,
                            use_fft=True,
                            return_itc=True)

    # Now compute evoked
    evoked = epochs.average()
    pytest.raises(ValueError, tfr_morlet, evoked, freqs, 1., return_itc=True)
    power, itc = tfr_morlet(epochs,
                            freqs=freqs,
                            n_cycles=n_cycles,
                            use_fft=True,
                            return_itc=True)
    power_, itc_ = tfr_morlet(epochs,
                              freqs=freqs,
                              n_cycles=n_cycles,
                              use_fft=True,
                              return_itc=True,
                              decim=slice(0, 2))
    # Test picks argument and average parameter
    pytest.raises(ValueError,
                  tfr_morlet,
                  epochs,
                  freqs=freqs,
                  n_cycles=n_cycles,
                  return_itc=True,
                  average=False)

    power_picks, itc_picks = \
        tfr_morlet(epochs_nopicks,
                   freqs=freqs, n_cycles=n_cycles, use_fft=True,
                   return_itc=True, picks=picks, average=True)

    epochs_power_picks = \
        tfr_morlet(epochs_nopicks,
                   freqs=freqs, n_cycles=n_cycles, use_fft=True,
                   return_itc=False, picks=picks, average=False)
    power_picks_avg = epochs_power_picks.average()
    # the actual data arrays here are equivalent, too...
    assert_allclose(power.data, power_picks.data)
    assert_allclose(power.data, power_picks_avg.data)
    assert_allclose(itc.data, itc_picks.data)

    # test on evoked
    power_evoked = tfr_morlet(evoked,
                              freqs,
                              n_cycles,
                              use_fft=True,
                              return_itc=False)
    # one is squared magnitude of the average (evoked) and
    # the other is average of the squared magnitudes (epochs PSD)
    # so values shouldn't match, but shapes should
    assert_array_equal(power.data.shape, power_evoked.data.shape)
    pytest.raises(AssertionError, assert_allclose, power.data,
                  power_evoked.data)

    # complex output
    pytest.raises(ValueError,
                  tfr_morlet,
                  epochs,
                  freqs,
                  n_cycles,
                  return_itc=False,
                  average=True,
                  output="complex")
    pytest.raises(ValueError,
                  tfr_morlet,
                  epochs,
                  freqs,
                  n_cycles,
                  output="complex",
                  average=False,
                  return_itc=True)
    epochs_power_complex = tfr_morlet(epochs,
                                      freqs,
                                      n_cycles,
                                      output="complex",
                                      average=False,
                                      return_itc=False)
    epochs_amplitude_2 = abs(epochs_power_complex)
    epochs_amplitude_3 = epochs_amplitude_2.copy()
    epochs_amplitude_3.data[:] = np.inf  # test that it's actually copied

    # test that the power computed via `complex` is equivalent to power
    # computed within the method.
    assert_allclose(epochs_amplitude_2.data**2, epochs_power_picks.data)

    print(itc)  # test repr
    print(itc.ch_names)  # test property
    itc += power  # test add
    itc -= power  # test sub
    ret = itc * 23  # test mult
    itc = ret / 23  # test dic

    power = power.apply_baseline(baseline=(-0.1, 0), mode='logratio')

    assert 'meg' in power
    assert 'grad' in power
    assert 'mag' not in power
    assert 'eeg' not in power

    assert power.nave == nave
    assert itc.nave == nave
    assert (power.data.shape == (len(picks), len(freqs), len(times)))
    assert (power.data.shape == itc.data.shape)
    assert (power_.data.shape == (len(picks), len(freqs), 2))
    assert (power_.data.shape == itc_.data.shape)
    assert (np.sum(itc.data >= 1) == 0)
    assert (np.sum(itc.data <= 0) == 0)

    # grand average
    itc2 = itc.copy()
    itc2.info['bads'] = [itc2.ch_names[0]]  # test channel drop
    gave = grand_average([itc2, itc])
    assert gave.data.shape == (itc2.data.shape[0] - 1, itc2.data.shape[1],
                               itc2.data.shape[2])
    assert itc2.ch_names[1:] == gave.ch_names
    assert gave.nave == 2
    itc2.drop_channels(itc2.info["bads"])
    assert_allclose(gave.data, itc2.data)
    itc2.data = np.ones(itc2.data.shape)
    itc.data = np.zeros(itc.data.shape)
    itc2.nave = 2
    itc.nave = 1
    itc.drop_channels([itc.ch_names[0]])
    combined_itc = combine_tfr([itc2, itc])
    assert_allclose(combined_itc.data,
                    np.ones(combined_itc.data.shape) * 2 / 3)

    # more tests
    power, itc = tfr_morlet(epochs,
                            freqs=freqs,
                            n_cycles=2,
                            use_fft=False,
                            return_itc=True)

    assert (power.data.shape == (len(picks), len(freqs), len(times)))
    assert (power.data.shape == itc.data.shape)
    assert (np.sum(itc.data >= 1) == 0)
    assert (np.sum(itc.data <= 0) == 0)

    tfr = tfr_morlet(epochs[0],
                     freqs,
                     use_fft=True,
                     n_cycles=2,
                     average=False,
                     return_itc=False)
    tfr_data = tfr.data[0]
    assert (tfr_data.shape == (len(picks), len(freqs), len(times)))
    tfr2 = tfr_morlet(epochs[0],
                      freqs,
                      use_fft=True,
                      n_cycles=2,
                      decim=slice(0, 2),
                      average=False,
                      return_itc=False).data[0]
    assert (tfr2.shape == (len(picks), len(freqs), 2))

    single_power = tfr_morlet(epochs,
                              freqs,
                              2,
                              average=False,
                              return_itc=False).data
    single_power2 = tfr_morlet(epochs,
                               freqs,
                               2,
                               decim=slice(0, 2),
                               average=False,
                               return_itc=False).data
    single_power3 = tfr_morlet(epochs,
                               freqs,
                               2,
                               decim=slice(1, 3),
                               average=False,
                               return_itc=False).data
    single_power4 = tfr_morlet(epochs,
                               freqs,
                               2,
                               decim=slice(2, 4),
                               average=False,
                               return_itc=False).data

    assert_allclose(np.mean(single_power, axis=0), power.data)
    assert_allclose(np.mean(single_power2, axis=0), power.data[:, :, :2])
    assert_allclose(np.mean(single_power3, axis=0), power.data[:, :, 1:3])
    assert_allclose(np.mean(single_power4, axis=0), power.data[:, :, 2:4])

    power_pick = power.pick_channels(power.ch_names[:10:2])
    assert_equal(len(power_pick.ch_names), len(power.ch_names[:10:2]))
    assert_equal(power_pick.data.shape[0], len(power.ch_names[:10:2]))
    power_drop = power.drop_channels(power.ch_names[1:10:2])
    assert_equal(power_drop.ch_names, power_pick.ch_names)
    assert_equal(power_pick.data.shape[0], len(power_drop.ch_names))

    power_pick, power_drop = mne.equalize_channels([power_pick, power_drop])
    assert_equal(power_pick.ch_names, power_drop.ch_names)
    assert_equal(power_pick.data.shape, power_drop.data.shape)

    # Test decimation:
    # 2: multiple of len(times) even
    # 3: multiple odd
    # 8: not multiple, even
    # 9: not multiple, odd
    for decim in [2, 3, 8, 9]:
        for use_fft in [True, False]:
            power, itc = tfr_morlet(epochs,
                                    freqs=freqs,
                                    n_cycles=2,
                                    use_fft=use_fft,
                                    return_itc=True,
                                    decim=decim)
            assert_equal(power.data.shape[2],
                         np.ceil(float(len(times)) / decim))
    freqs = list(range(50, 55))
    decim = 2
    _, n_chan, n_time = data.shape
    tfr = tfr_morlet(epochs[0],
                     freqs,
                     2.,
                     decim=decim,
                     average=False,
                     return_itc=False).data[0]
    assert_equal(tfr.shape, (n_chan, len(freqs), n_time // decim))

    # Test cwt modes
    Ws = morlet(512, [10, 20], n_cycles=2)
    pytest.raises(ValueError, cwt, data[0, :, :], Ws, mode='foo')
    for use_fft in [True, False]:
        for mode in ['same', 'valid', 'full']:
            cwt(data[0], Ws, use_fft=use_fft, mode=mode)

    # Test invalid frequency arguments
    with pytest.raises(ValueError, match=" 'freqs' must be greater than 0"):
        tfr_morlet(epochs, freqs=np.arange(0, 3), n_cycles=7)
    with pytest.raises(ValueError, match=" 'freqs' must be greater than 0"):
        tfr_morlet(epochs, freqs=np.arange(-4, -1), n_cycles=7)

    # Test decim parameter checks
    pytest.raises(TypeError,
                  tfr_morlet,
                  epochs,
                  freqs=freqs,
                  n_cycles=n_cycles,
                  use_fft=True,
                  return_itc=True,
                  decim='decim')

    # When convolving in time, wavelets must not be longer than the data
    pytest.raises(ValueError,
                  cwt,
                  data[0, :, :Ws[0].size - 1],
                  Ws,
                  use_fft=False)
    with pytest.warns(UserWarning, match='one of the wavelets.*is longer'):
        cwt(data[0, :, :Ws[0].size - 1], Ws, use_fft=True)

    # Check for off-by-one errors when using wavelets with an even number of
    # samples
    psd = cwt(data[0], [Ws[0][:-1]], use_fft=False, mode='full')
    assert_equal(psd.shape, (2, 1, 420))
def test_set_eeg_reference():
    """Test rereference eeg data."""
    raw = read_raw_fif(fif_fname, preload=True)
    raw.info['projs'] = []

    # Test setting an average reference projection
    assert (not _has_eeg_average_ref_proj(raw.info['projs']))
    reref, ref_data = set_eeg_reference(raw, projection=True)
    assert (_has_eeg_average_ref_proj(reref.info['projs']))
    assert (not reref.info['projs'][0]['active'])
    assert (ref_data is None)
    reref.apply_proj()
    eeg_chans = [raw.ch_names[ch]
                 for ch in pick_types(raw.info, meg=False, eeg=True)]
    _test_reference(raw, reref, ref_data,
                    [ch for ch in eeg_chans if ch not in raw.info['bads']])

    # Test setting an average reference when one was already present
    with pytest.warns(RuntimeWarning, match='untouched'):
        reref, ref_data = set_eeg_reference(raw, copy=False, projection=True)
    assert ref_data is None

    # Test setting an average reference on non-preloaded data
    raw_nopreload = read_raw_fif(fif_fname, preload=False)
    raw_nopreload.info['projs'] = []
    reref, ref_data = set_eeg_reference(raw_nopreload, projection=True)
    assert (_has_eeg_average_ref_proj(reref.info['projs']))
    assert (not reref.info['projs'][0]['active'])

    # Rereference raw data by creating a copy of original data
    reref, ref_data = set_eeg_reference(raw, ['EEG 001', 'EEG 002'], copy=True)
    assert (reref.info['custom_ref_applied'])
    _test_reference(raw, reref, ref_data, ['EEG 001', 'EEG 002'])

    # Test that data is modified in place when copy=False
    reref, ref_data = set_eeg_reference(raw, ['EEG 001', 'EEG 002'],
                                        copy=False)
    assert (raw is reref)

    # Test moving from custom to average reference
    reref, ref_data = set_eeg_reference(raw, ['EEG 001', 'EEG 002'])
    reref, _ = set_eeg_reference(reref, projection=True)
    assert (_has_eeg_average_ref_proj(reref.info['projs']))
    assert_equal(reref.info['custom_ref_applied'], False)

    # When creating an average reference fails, make sure the
    # custom_ref_applied flag remains untouched.
    reref = raw.copy()
    reref.info['custom_ref_applied'] = True
    reref.pick_types(eeg=False)  # Cause making average ref fail
    pytest.raises(ValueError, set_eeg_reference, reref, projection=True)
    assert (reref.info['custom_ref_applied'])

    # Test moving from average to custom reference
    reref, ref_data = set_eeg_reference(raw, projection=True)
    reref, _ = set_eeg_reference(reref, ['EEG 001', 'EEG 002'])
    assert not _has_eeg_average_ref_proj(reref.info['projs'])
    assert len(reref.info['projs']) == 0
    assert_equal(reref.info['custom_ref_applied'], True)

    # Test that disabling the reference does not change the data
    assert _has_eeg_average_ref_proj(raw.info['projs'])
    reref, _ = set_eeg_reference(raw, [])
    assert_array_equal(raw._data, reref._data)
    assert not _has_eeg_average_ref_proj(reref.info['projs'])

    # make sure ref_channels=[] removes average reference projectors
    assert _has_eeg_average_ref_proj(raw.info['projs'])
    reref, _ = set_eeg_reference(raw, [])
    assert (not _has_eeg_average_ref_proj(reref.info['projs']))

    # Test that average reference gives identical results when calculated
    # via SSP projection (projection=True) or directly (projection=False)
    raw.info['projs'] = []
    reref_1, _ = set_eeg_reference(raw.copy(), projection=True)
    reref_1.apply_proj()
    reref_2, _ = set_eeg_reference(raw.copy(), projection=False)
    assert_allclose(reref_1._data, reref_2._data, rtol=1e-6, atol=1e-15)

    # Test average reference without projection
    reref, ref_data = set_eeg_reference(raw.copy(), ref_channels="average",
                                        projection=False)
    _test_reference(raw, reref, ref_data, eeg_chans)

    with pytest.raises(ValueError, match='supported for ref_channels="averag'):
        set_eeg_reference(raw, [], True, True)
    with pytest.raises(ValueError, match='supported for ref_channels="averag'):
        set_eeg_reference(raw, ['EEG 001'], True, True)

    # gh-6454
    rng = np.random.RandomState(0)
    data = rng.randn(3, 1000)
    raw = RawArray(data, create_info(3, 1000., ['ecog'] * 2 + ['misc']))
    reref, ref_data = set_eeg_reference(raw.copy())
    _test_reference(raw, reref, ref_data, ['0', '1'])
예제 #35
0
def test_set_channel_types():
    """Test set_channel_types"""
    raw = read_raw_fif(raw_fname)
    # Error Tests
    # Test channel name exists in ch_names
    mapping = {'EEG 160': 'EEG060'}
    assert_raises(ValueError, raw.set_channel_types, mapping)
    # Test change to illegal channel type
    mapping = {'EOG 061': 'xxx'}
    assert_raises(ValueError, raw.set_channel_types, mapping)
    # Test changing type if in proj (avg eeg ref here)
    mapping = {
        'EEG 058': 'ecog',
        'EEG 059': 'ecg',
        'EEG 060': 'eog',
        'EOG 061': 'seeg',
        'MEG 2441': 'eeg',
        'MEG 2443': 'eeg',
        'MEG 2442': 'hbo'
    }
    assert_raises(RuntimeError, raw.set_channel_types, mapping)
    # Test type change
    raw2 = read_raw_fif(raw_fname)
    raw2.info['bads'] = ['EEG 059', 'EEG 060', 'EOG 061']
    with warnings.catch_warnings(record=True):  # MEG channel change
        assert_raises(RuntimeError, raw2.set_channel_types, mapping)  # has prj
    raw2.add_proj([], remove_existing=True)
    with warnings.catch_warnings(record=True) as w:
        warnings.simplefilter('always')
        raw2.set_channel_types(mapping)
    assert_true(len(w) >= 1, msg=[str(ww.message) for ww in w])
    assert_true(all('The unit for channel' in str(ww.message) for ww in w))
    info = raw2.info
    assert_true(info['chs'][372]['ch_name'] == 'EEG 058')
    assert_true(info['chs'][372]['kind'] == FIFF.FIFFV_ECOG_CH)
    assert_true(info['chs'][372]['unit'] == FIFF.FIFF_UNIT_V)
    assert_true(info['chs'][372]['coil_type'] == FIFF.FIFFV_COIL_EEG)
    assert_true(info['chs'][373]['ch_name'] == 'EEG 059')
    assert_true(info['chs'][373]['kind'] == FIFF.FIFFV_ECG_CH)
    assert_true(info['chs'][373]['unit'] == FIFF.FIFF_UNIT_V)
    assert_true(info['chs'][373]['coil_type'] == FIFF.FIFFV_COIL_NONE)
    assert_true(info['chs'][374]['ch_name'] == 'EEG 060')
    assert_true(info['chs'][374]['kind'] == FIFF.FIFFV_EOG_CH)
    assert_true(info['chs'][374]['unit'] == FIFF.FIFF_UNIT_V)
    assert_true(info['chs'][374]['coil_type'] == FIFF.FIFFV_COIL_NONE)
    assert_true(info['chs'][375]['ch_name'] == 'EOG 061')
    assert_true(info['chs'][375]['kind'] == FIFF.FIFFV_SEEG_CH)
    assert_true(info['chs'][375]['unit'] == FIFF.FIFF_UNIT_V)
    assert_true(info['chs'][375]['coil_type'] == FIFF.FIFFV_COIL_EEG)
    for idx in pick_channels(raw.ch_names, ['MEG 2441', 'MEG 2443']):
        assert_true(info['chs'][idx]['kind'] == FIFF.FIFFV_EEG_CH)
        assert_true(info['chs'][idx]['unit'] == FIFF.FIFF_UNIT_V)
        assert_true(info['chs'][idx]['coil_type'] == FIFF.FIFFV_COIL_EEG)
    idx = pick_channels(raw.ch_names, ['MEG 2442'])[0]
    assert_true(info['chs'][idx]['kind'] == FIFF.FIFFV_FNIRS_CH)
    assert_true(info['chs'][idx]['unit'] == FIFF.FIFF_UNIT_MOL)
    assert_true(info['chs'][idx]['coil_type'] == FIFF.FIFFV_COIL_FNIRS_HBO)

    # Test meaningful error when setting channel type with unknown unit
    raw.info['chs'][0]['unit'] = 0.
    ch_types = {raw.ch_names[0]: 'misc'}
    assert_raises(ValueError, raw.set_channel_types, ch_types)
예제 #36
0
def test_psd():
    """Tests the welch and multitaper PSD
    """
    raw = io.read_raw_fif(raw_fname)
    picks_psd = [0, 1]

    # Populate raw with sinusoids
    rng = np.random.RandomState(40)
    data = 0.1 * rng.randn(len(raw.ch_names), raw.n_times)
    freqs_sig = [8., 50.]
    for ix, freq in zip(picks_psd, freqs_sig):
        data[ix, :] += 2 * np.sin(np.pi * 2. * freq * raw.times)
    first_samp = raw._first_samps[0]
    raw = RawArray(data, raw.info)

    tmin, tmax = 0, 20  # use a few seconds of data
    fmin, fmax = 2, 70  # look at frequencies between 2 and 70Hz
    n_fft = 128

    # -- Raw --
    kws_psd = dict(tmin=tmin, tmax=tmax, fmin=fmin, fmax=fmax,
                   picks=picks_psd)  # Common to all
    kws_welch = dict(n_fft=n_fft)
    kws_mt = dict(low_bias=True)
    funcs = [(psd_welch, kws_welch), (psd_multitaper, kws_mt)]

    for func, kws in funcs:
        kws = kws.copy()
        kws.update(kws_psd)
        psds, freqs = func(raw, proj=False, **kws)
        psds_proj, freqs_proj = func(raw, proj=True, **kws)

        assert_true(psds.shape == (len(kws['picks']), len(freqs)))
        assert_true(np.sum(freqs < 0) == 0)
        assert_true(np.sum(psds < 0) == 0)

        # Is power found where it should be
        ixs_max = np.argmax(psds, axis=1)
        for ixmax, ifreq in zip(ixs_max, freqs_sig):
            # Find nearest frequency to the "true" freq
            ixtrue = np.argmin(np.abs(ifreq - freqs))
            assert_true(np.abs(ixmax - ixtrue) < 2)

        # Make sure the projection doesn't change channels it shouldn't
        assert_array_almost_equal(psds, psds_proj)
        # Array input shouldn't work
        assert_raises(ValueError, func, raw[:3, :20][0])

    # -- Epochs/Evoked --
    events = read_events(event_fname)
    events[:, 0] -= first_samp
    tmin, tmax, event_id = -0.5, 0.5, 1
    epochs = Epochs(raw,
                    events[:10],
                    event_id,
                    tmin,
                    tmax,
                    picks=picks_psd,
                    proj=False,
                    preload=True,
                    baseline=None)
    evoked = epochs.average()

    tmin_full, tmax_full = -1, 1
    epochs_full = Epochs(raw,
                         events[:10],
                         event_id,
                         tmin_full,
                         tmax_full,
                         picks=picks_psd,
                         proj=False,
                         preload=True,
                         baseline=None)
    kws_psd = dict(tmin=tmin, tmax=tmax, fmin=fmin, fmax=fmax,
                   picks=picks_psd)  # Common to all
    funcs = [(psd_welch, kws_welch), (psd_multitaper, kws_mt)]

    for func, kws in funcs:
        kws = kws.copy()
        kws.update(kws_psd)

        psds, freqs = func(epochs[:1], proj=False, **kws)
        psds_proj, freqs_proj = func(epochs[:1], proj=True, **kws)
        psds_f, freqs_f = func(epochs_full[:1], proj=False, **kws)

        # this one will fail if you add for example 0.1 to tmin
        assert_array_almost_equal(psds, psds_f, 27)
        # Make sure the projection doesn't change channels it shouldn't
        assert_array_almost_equal(psds, psds_proj, 27)

        # Is power found where it should be
        ixs_max = np.argmax(psds.mean(0), axis=1)
        for ixmax, ifreq in zip(ixs_max, freqs_sig):
            # Find nearest frequency to the "true" freq
            ixtrue = np.argmin(np.abs(ifreq - freqs))
            assert_true(np.abs(ixmax - ixtrue) < 2)
        assert_true(psds.shape == (1, len(kws['picks']), len(freqs)))
        assert_true(np.sum(freqs < 0) == 0)
        assert_true(np.sum(psds < 0) == 0)

        # Array input shouldn't work
        assert_raises(ValueError, func, epochs.get_data())

        # Testing evoked (doesn't work w/ compute_epochs_psd)
        psds_ev, freqs_ev = func(evoked, proj=False, **kws)
        psds_ev_proj, freqs_ev_proj = func(evoked, proj=True, **kws)

        # Is power found where it should be
        ixs_max = np.argmax(psds_ev, axis=1)
        for ixmax, ifreq in zip(ixs_max, freqs_sig):
            # Find nearest frequency to the "true" freq
            ixtrue = np.argmin(np.abs(ifreq - freqs_ev))
            assert_true(np.abs(ixmax - ixtrue) < 2)

        # Make sure the projection doesn't change channels it shouldn't
        assert_array_almost_equal(psds_ev, psds_ev_proj, 27)
        assert_true(psds_ev.shape == (len(kws['picks']), len(freqs)))
예제 #37
0
def test_basics():
    """Test annotation class."""
    raw = read_raw_fif(fif_fname)
    assert raw.annotations is not None  # XXX to be fixed in #5416
    assert len(raw.annotations.onset) == 0  # XXX to be fixed in #5416
    pytest.raises(IOError, read_annotations, fif_fname)
    onset = np.array(range(10))
    duration = np.ones(10)
    description = np.repeat('test', 10)
    dt = datetime.utcnow()
    meas_date = raw.info['meas_date']
    # Test time shifts.
    for orig_time in [None, dt, meas_date[0], meas_date]:
        annot = Annotations(onset, duration, description, orig_time)

    pytest.raises(ValueError, Annotations, onset, duration, description[:9])
    pytest.raises(ValueError, Annotations, [onset, 1], duration, description)
    pytest.raises(ValueError, Annotations, onset, [duration, 1], description)

    # Test combining annotations with concatenate_raws
    raw2 = raw.copy()
    delta = raw.times[-1] + 1. / raw.info['sfreq']
    orig_time = (meas_date[0] + meas_date[1] * 1e-6 + raw2._first_time)
    offset = orig_time - _handle_meas_date(raw2.info['meas_date'])
    annot = Annotations(onset, duration, description, orig_time)
    assert ' segments' in repr(annot)
    raw2.set_annotations(annot)
    assert_array_equal(raw2.annotations.onset, onset + offset)
    assert id(raw2.annotations) != id(annot)
    concatenate_raws([raw, raw2])
    raw.annotations.delete(-1)  # remove boundary annotations
    raw.annotations.delete(-1)

    assert_allclose(onset + offset + delta, raw.annotations.onset, rtol=1e-5)
    assert_array_equal(annot.duration, raw.annotations.duration)
    assert_array_equal(raw.annotations.description, np.repeat('test', 10))

    # Test combining with RawArray and orig_times
    data = np.random.randn(2, 1000) * 10e-12
    sfreq = 100.
    info = create_info(ch_names=['MEG1', 'MEG2'],
                       ch_types=['grad'] * 2,
                       sfreq=sfreq)
    info['meas_date'] = (np.pi, 0)
    raws = []
    for first_samp in [12300, 100, 12]:
        raw = RawArray(data.copy(), info, first_samp=first_samp)
        ants = Annotations([1., 2.], [.5, .5], 'x', np.pi + first_samp / sfreq)
        raw.set_annotations(ants)
        raws.append(raw)
    raw = RawArray(data.copy(), info)
    raw.set_annotations(Annotations([1.], [.5], 'x', None))
    raws.append(raw)
    raw = concatenate_raws(raws, verbose='debug')
    boundary_idx = np.where(raw.annotations.description == 'BAD boundary')[0]
    assert len(boundary_idx) == 3
    raw.annotations.delete(boundary_idx)
    boundary_idx = np.where(raw.annotations.description == 'EDGE boundary')[0]
    assert len(boundary_idx) == 3
    raw.annotations.delete(boundary_idx)
    assert_array_equal(raw.annotations.onset,
                       [124., 125., 134., 135., 144., 145., 154.])
    raw.annotations.delete(2)
    assert_array_equal(raw.annotations.onset,
                       [124., 125., 135., 144., 145., 154.])
    raw.annotations.append(5, 1.5, 'y')
    assert_array_equal(raw.annotations.onset,
                       [124., 125., 135., 144., 145., 154., 5.])
    assert_array_equal(raw.annotations.duration, [.5, .5, .5, .5, .5, .5, 1.5])
    assert_array_equal(raw.annotations.description,
                       ['x', 'x', 'x', 'x', 'x', 'x', 'y'])
예제 #38
0
def test_events_from_annot_in_raw_objects():
    """Test basic functionality of events_fron_annot for raw objects."""
    raw = read_raw_fif(fif_fname)
    events = mne.find_events(raw)
    event_id = {
        'Auditory/Left': 1,
        'Auditory/Right': 2,
        'Visual/Left': 3,
        'Visual/Right': 4,
        'Visual/Smiley': 32,
        'Motor/Button': 5
    }
    event_map = {v: k for k, v in event_id.items()}
    annot = Annotations(onset=raw.times[events[:, 0] - raw.first_samp],
                        duration=np.zeros(len(events)),
                        description=[event_map[vv] for vv in events[:, 2]],
                        orig_time=None)
    raw.set_annotations(annot)

    events2, event_id2 = \
        events_from_annotations(raw, event_id=event_id, regexp=None)
    assert_array_equal(events, events2)
    assert_equal(event_id, event_id2)

    events3, event_id3 = \
        events_from_annotations(raw, event_id=None, regexp=None)

    assert_array_equal(events[:, 0], events3[:, 0])
    assert set(event_id.keys()) == set(event_id3.keys())

    first = np.unique(events3[:, 2])
    second = np.arange(1, len(event_id) + 1, 1).astype(first.dtype)
    assert_array_equal(first, second)

    first = np.unique(list(event_id3.values()))
    second = np.arange(1, len(event_id) + 1, 1).astype(first.dtype)
    assert_array_equal(first, second)

    events4, event_id4 =\
        events_from_annotations(raw, event_id=None, regexp='.*Left')

    expected_event_id4 = {k: v for k, v in event_id.items() if 'Left' in k}
    assert_equal(event_id4.keys(), expected_event_id4.keys())

    expected_events4 = events[(events[:, 2] == 1) | (events[:, 2] == 3)]
    assert_array_equal(expected_events4[:, 0], events4[:, 0])

    events5, event_id5 = \
        events_from_annotations(raw, event_id=event_id, regexp='.*Left')

    expected_event_id5 = {k: v for k, v in event_id.items() if 'Left' in k}
    assert_equal(event_id5, expected_event_id5)

    expected_events5 = events[(events[:, 2] == 1) | (events[:, 2] == 3)]
    assert_array_equal(expected_events5, events5)

    with pytest.raises(ValueError, match='not find any of the events'):
        events_from_annotations(raw, regexp='not_there')

    raw.set_annotations(None)
    events7, _ = events_from_annotations(raw)
    assert_array_equal(events7, np.empty((0, 3), dtype=int))
예제 #39
0
def test_compute_proj_epochs(tmpdir):
    """Test SSP computation on epochs."""
    tempdir = str(tmpdir)
    event_id, tmin, tmax = 1, -0.2, 0.3

    raw = read_raw_fif(raw_fname, preload=True)
    events = read_events(event_fname)
    bad_ch = 'MEG 2443'
    picks = pick_types(raw.info, meg=True, eeg=False, stim=False, eog=False,
                       exclude=[])
    epochs = Epochs(raw, events, event_id, tmin, tmax, picks=picks,
                    baseline=None, proj=False)

    evoked = epochs.average()
    projs = compute_proj_epochs(epochs, n_grad=1, n_mag=1, n_eeg=0, n_jobs=1)
    write_proj(op.join(tempdir, 'test-proj.fif.gz'), projs)
    for p_fname in [proj_fname, proj_gz_fname,
                    op.join(tempdir, 'test-proj.fif.gz')]:
        projs2 = read_proj(p_fname)

        assert len(projs) == len(projs2)

        for p1, p2 in zip(projs, projs2):
            assert p1['desc'] == p2['desc']
            assert p1['data']['col_names'] == p2['data']['col_names']
            assert p1['active'] == p2['active']
            # compare with sign invariance
            p1_data = p1['data']['data'] * np.sign(p1['data']['data'][0, 0])
            p2_data = p2['data']['data'] * np.sign(p2['data']['data'][0, 0])
            if bad_ch in p1['data']['col_names']:
                bad = p1['data']['col_names'].index('MEG 2443')
                mask = np.ones(p1_data.size, dtype=bool)
                mask[bad] = False
                p1_data = p1_data[:, mask]
                p2_data = p2_data[:, mask]
            corr = np.corrcoef(p1_data, p2_data)[0, 1]
            assert_array_almost_equal(corr, 1.0, 5)
            if p2['explained_var']:
                assert_array_almost_equal(p1['explained_var'],
                                          p2['explained_var'])

    # test that you can compute the projection matrix
    projs = activate_proj(projs)
    proj, nproj, U = make_projector(projs, epochs.ch_names, bads=[])

    assert nproj == 2
    assert U.shape[1] == 2

    # test that you can save them
    epochs.info['projs'] += projs
    evoked = epochs.average()
    evoked.save(op.join(tempdir, 'foo-ave.fif'))

    projs = read_proj(proj_fname)

    projs_evoked = compute_proj_evoked(evoked, n_grad=1, n_mag=1, n_eeg=0)
    assert len(projs_evoked) == 2
    # XXX : test something

    # test parallelization
    projs = compute_proj_epochs(epochs, n_grad=1, n_mag=1, n_eeg=0, n_jobs=1,
                                desc_prefix='foobar')
    assert all('foobar' in x['desc'] for x in projs)
    projs = activate_proj(projs)
    proj_par, _, _ = make_projector(projs, epochs.ch_names, bads=[])
    assert_allclose(proj, proj_par, rtol=1e-8, atol=1e-16)

    # test warnings on bad filenames
    proj_badname = op.join(tempdir, 'test-bad-name.fif.gz')
    with pytest.warns(RuntimeWarning, match='-proj.fif'):
        write_proj(proj_badname, projs)
    with pytest.warns(RuntimeWarning, match='-proj.fif'):
        read_proj(proj_badname)

    # bad inputs
    fname = op.join(tempdir, 'out-proj.fif')
    with pytest.raises(TypeError, match='projs'):
        write_proj(fname, 'foo')
    with pytest.raises(TypeError, match=r'projs\[0\] must be .*'):
        write_proj(fname, ['foo'])
예제 #40
0
def raw_orig():
    """Get raw data without any change to it from mne.io.tests.data."""
    raw = read_raw_fif(fname_raw_io, preload=True)
    return raw
예제 #41
0
from mne import find_events, fit_dipole
from mne.datasets.brainstorm import bst_phantom_elekta
from mne.io import read_raw_fif

from mayavi import mlab

print(__doc__)

###############################################################################
# The data were collected with an Elekta Neuromag VectorView system at 1000 Hz
# and low-pass filtered at 330 Hz. Here the medium-amplitude (200 nAm) data
# are read to construct instances of :class:`mne.io.Raw`.
data_path = bst_phantom_elekta.data_path(verbose=True)

raw_fname = op.join(data_path, 'kojak_all_200nAm_pp_no_chpi_no_ms_raw.fif')
raw = read_raw_fif(raw_fname)

###############################################################################
# Data channel array consisted of 204 MEG planor gradiometers,
# 102 axial magnetometers, and 3 stimulus channels. Let's get the events
# for the phantom, where each dipole (1-32) gets its own event:

events = find_events(raw, 'STI201')
raw.plot(events=events)
raw.info['bads'] = ['MEG2421']

###############################################################################
# The data have strong line frequency (60 Hz and harmonics) and cHPI coil
# noise (five peaks around 300 Hz). Here we plot only out to 60 seconds
# to save memory:
예제 #42
0
def test_chpi_subtraction_filter_chpi():
    """Test subtraction of cHPI signals."""
    raw = read_raw_fif(chpi_fif_fname, allow_maxshield='yes', preload=True)
    raw.info['bads'] = ['MEG0111']
    raw.del_proj()
    raw_orig = raw.copy().crop(0, 16)
    with catch_logging() as log:
        filter_chpi(raw, include_line=False, t_window=0.2, verbose=True)
    log = log.getvalue()
    assert 'No average EEG' not in log
    assert '5 cHPI' in log
    # MaxFilter doesn't do quite as well as our algorithm with the last bit
    raw.crop(0, 16)
    # remove cHPI status chans
    raw_c = read_raw_fif(sss_hpisubt_fname).crop(0, 16).load_data()
    raw_c.pick_types(
        meg=True, eeg=True, eog=True, ecg=True, stim=True, misc=True)
    assert_meg_snr(raw, raw_c, 143, 624)
    # cHPI suppressed but not line freqs (or others)
    assert_suppressed(raw, raw_orig, np.arange(83, 324, 60), [30, 60, 150])
    raw = raw_orig.copy()
    with catch_logging() as log:
        filter_chpi(raw, include_line=True, t_window=0.2, verbose=True)
    log = log.getvalue()
    assert '5 cHPI' in log
    assert '6 line' in log
    # cHPI and line freqs suppressed
    suppressed = np.sort(np.concatenate([
        np.arange(83, 324, 60), np.arange(60, 301, 60),
    ]))
    assert_suppressed(raw, raw_orig, suppressed, [30, 150])

    # No HPI information
    raw = read_raw_fif(sample_fname, preload=True)
    raw_orig = raw.copy()
    assert raw.info['line_freq'] is None
    with pytest.raises(RuntimeError, match='line_freq.*consider setting it'):
        filter_chpi(raw, t_window=0.2)
    with raw.info._unlock():
        raw.info['line_freq'] = 60.
    with pytest.raises(ValueError, match='No appropriate cHPI information'):
        filter_chpi(raw, t_window=0.2)
    # but this is allowed
    with catch_logging() as log:
        filter_chpi(raw, t_window='auto', allow_line_only=True, verbose=True)
    log = log.getvalue()
    assert '0 cHPI' in log
    assert '1 line' in log
    # Our one line freq suppressed but not others
    assert_suppressed(raw, raw_orig, [60], [30, 45, 75])

    # When MaxFliter downsamples, like::
    #     $ maxfilter -nosss -ds 2 -f test_move_anon_raw.fif \
    #           -o test_move_anon_ds2_raw.fif
    # it can strip out some values of info, which we emulate here:
    raw = read_raw_fif(chpi_fif_fname, allow_maxshield='yes')
    raw = raw.crop(0, 1).load_data().resample(600., npad='auto')
    with raw.info._unlock():
        raw.info['lowpass'] = 200.
        del raw.info['maxshield']
        del raw.info['hpi_results'][0]['moments']
        del raw.info['hpi_subsystem']['event_channel']
    with catch_logging() as log:
        filter_chpi(raw, t_window='auto', verbose=True)
    with pytest.raises(ValueError, match='must be > 0'):
        filter_chpi(raw, t_window=-1)
    assert '2 cHPI' in log.getvalue()
from mne.datasets import sample

print(__doc__)

# %%
# Set parameters
data_path = sample.data_path()
meg_path = data_path / 'MEG' / 'sample'
raw_fname = meg_path / 'sample_audvis_filt-0-40_raw.fif'
event_fname = meg_path / 'sample_audvis_filt-0-40_raw-eve.fif'
event_id = 1
tmin = -0.2
tmax = 0.5

#   Setup for reading the raw data
raw = io.read_raw_fif(raw_fname)
events = mne.read_events(event_fname)

# pick MEG Gradiometers
picks = mne.pick_types(raw.info, meg='grad', eeg=False, stim=False, eog=True,
                       exclude='bads')
epochs = mne.Epochs(raw, events, event_id, tmin, tmax, picks=picks,
                    baseline=(None, 0), reject=dict(grad=4000e-13, eog=150e-6))
data = epochs.get_data()
times = epochs.times

temporal_mask = np.logical_and(0.04 <= times, times <= 0.06)
data = np.mean(data[:, :, temporal_mask], axis=2)

n_permutations = 50000
T0, p_values, H0 = permutation_t_test(data, n_permutations, n_jobs=1)
예제 #44
0
def test_add_reference():
    """Test adding a reference."""
    raw = read_raw_fif(fif_fname, preload=True)
    picks_eeg = pick_types(raw.info, meg=False, eeg=True)
    # check if channel already exists
    assert_raises(ValueError, add_reference_channels, raw,
                  raw.info['ch_names'][0])
    # add reference channel to Raw
    raw_ref = add_reference_channels(raw, 'Ref', copy=True)
    assert_equal(raw_ref._data.shape[0], raw._data.shape[0] + 1)
    assert_array_equal(raw._data[picks_eeg, :], raw_ref._data[picks_eeg, :])
    _check_channel_names(raw_ref, 'Ref')

    orig_nchan = raw.info['nchan']
    raw = add_reference_channels(raw, 'Ref', copy=False)
    assert_array_equal(raw._data, raw_ref._data)
    assert_equal(raw.info['nchan'], orig_nchan + 1)
    _check_channel_names(raw, 'Ref')

    # for Neuromag fif's, the reference electrode location is placed in
    # elements [3:6] of each "data" electrode location
    assert_allclose(raw.info['chs'][-1]['loc'][:3],
                    raw.info['chs'][picks_eeg[0]]['loc'][3:6], 1e-6)

    ref_idx = raw.ch_names.index('Ref')
    ref_data, _ = raw[ref_idx]
    assert_array_equal(ref_data, 0)

    # add reference channel to Raw when no digitization points exist
    raw = read_raw_fif(fif_fname).crop(0, 1).load_data()
    picks_eeg = pick_types(raw.info, meg=False, eeg=True)
    del raw.info['dig']

    raw_ref = add_reference_channels(raw, 'Ref', copy=True)

    assert_equal(raw_ref._data.shape[0], raw._data.shape[0] + 1)
    assert_array_equal(raw._data[picks_eeg, :], raw_ref._data[picks_eeg, :])
    _check_channel_names(raw_ref, 'Ref')

    orig_nchan = raw.info['nchan']
    raw = add_reference_channels(raw, 'Ref', copy=False)
    assert_array_equal(raw._data, raw_ref._data)
    assert_equal(raw.info['nchan'], orig_nchan + 1)
    _check_channel_names(raw, 'Ref')

    # Test adding an existing channel as reference channel
    assert_raises(ValueError, add_reference_channels, raw,
                  raw.info['ch_names'][0])

    # add two reference channels to Raw
    raw_ref = add_reference_channels(raw, ['M1', 'M2'], copy=True)
    _check_channel_names(raw_ref, ['M1', 'M2'])
    assert_equal(raw_ref._data.shape[0], raw._data.shape[0] + 2)
    assert_array_equal(raw._data[picks_eeg, :], raw_ref._data[picks_eeg, :])
    assert_array_equal(raw_ref._data[-2:, :], 0)

    raw = add_reference_channels(raw, ['M1', 'M2'], copy=False)
    _check_channel_names(raw, ['M1', 'M2'])
    ref_idx = raw.ch_names.index('M1')
    ref_idy = raw.ch_names.index('M2')
    ref_data, _ = raw[[ref_idx, ref_idy]]
    assert_array_equal(ref_data, 0)

    # add reference channel to epochs
    raw = read_raw_fif(fif_fname, preload=True)
    events = read_events(eve_fname)
    picks_eeg = pick_types(raw.info, meg=False, eeg=True)
    epochs = Epochs(raw,
                    events=events,
                    event_id=1,
                    tmin=-0.2,
                    tmax=0.5,
                    picks=picks_eeg,
                    preload=True)
    # default: proj=True, after which adding a Ref channel is prohibited
    assert_raises(RuntimeError, add_reference_channels, epochs, 'Ref')

    # create epochs in delayed mode, allowing removal of CAR when re-reffing
    epochs = Epochs(raw,
                    events=events,
                    event_id=1,
                    tmin=-0.2,
                    tmax=0.5,
                    picks=picks_eeg,
                    preload=True,
                    proj='delayed')
    epochs_ref = add_reference_channels(epochs, 'Ref', copy=True)

    assert_equal(epochs_ref._data.shape[1], epochs._data.shape[1] + 1)
    _check_channel_names(epochs_ref, 'Ref')
    ref_idx = epochs_ref.ch_names.index('Ref')
    ref_data = epochs_ref.get_data()[:, ref_idx, :]
    assert_array_equal(ref_data, 0)
    picks_eeg = pick_types(epochs.info, meg=False, eeg=True)
    assert_array_equal(epochs.get_data()[:, picks_eeg, :],
                       epochs_ref.get_data()[:, picks_eeg, :])

    # add two reference channels to epochs
    raw = read_raw_fif(fif_fname, preload=True)
    events = read_events(eve_fname)
    picks_eeg = pick_types(raw.info, meg=False, eeg=True)
    # create epochs in delayed mode, allowing removal of CAR when re-reffing
    epochs = Epochs(raw,
                    events=events,
                    event_id=1,
                    tmin=-0.2,
                    tmax=0.5,
                    picks=picks_eeg,
                    preload=True,
                    proj='delayed')
    with warnings.catch_warnings(record=True):  # multiple set zero
        epochs_ref = add_reference_channels(epochs, ['M1', 'M2'], copy=True)
    assert_equal(epochs_ref._data.shape[1], epochs._data.shape[1] + 2)
    _check_channel_names(epochs_ref, ['M1', 'M2'])
    ref_idx = epochs_ref.ch_names.index('M1')
    ref_idy = epochs_ref.ch_names.index('M2')
    assert_equal(epochs_ref.info['chs'][ref_idx]['ch_name'], 'M1')
    assert_equal(epochs_ref.info['chs'][ref_idy]['ch_name'], 'M2')
    ref_data = epochs_ref.get_data()[:, [ref_idx, ref_idy], :]
    assert_array_equal(ref_data, 0)
    picks_eeg = pick_types(epochs.info, meg=False, eeg=True)
    assert_array_equal(epochs.get_data()[:, picks_eeg, :],
                       epochs_ref.get_data()[:, picks_eeg, :])

    # add reference channel to evoked
    raw = read_raw_fif(fif_fname, preload=True)
    events = read_events(eve_fname)
    picks_eeg = pick_types(raw.info, meg=False, eeg=True)
    # create epochs in delayed mode, allowing removal of CAR when re-reffing
    epochs = Epochs(raw,
                    events=events,
                    event_id=1,
                    tmin=-0.2,
                    tmax=0.5,
                    picks=picks_eeg,
                    preload=True,
                    proj='delayed')
    evoked = epochs.average()
    evoked_ref = add_reference_channels(evoked, 'Ref', copy=True)
    assert_equal(evoked_ref.data.shape[0], evoked.data.shape[0] + 1)
    _check_channel_names(evoked_ref, 'Ref')
    ref_idx = evoked_ref.ch_names.index('Ref')
    ref_data = evoked_ref.data[ref_idx, :]
    assert_array_equal(ref_data, 0)
    picks_eeg = pick_types(evoked.info, meg=False, eeg=True)
    assert_array_equal(evoked.data[picks_eeg, :],
                       evoked_ref.data[picks_eeg, :])

    # add two reference channels to evoked
    raw = read_raw_fif(fif_fname, preload=True)
    events = read_events(eve_fname)
    picks_eeg = pick_types(raw.info, meg=False, eeg=True)
    # create epochs in delayed mode, allowing removal of CAR when re-reffing
    epochs = Epochs(raw,
                    events=events,
                    event_id=1,
                    tmin=-0.2,
                    tmax=0.5,
                    picks=picks_eeg,
                    preload=True,
                    proj='delayed')
    evoked = epochs.average()
    with warnings.catch_warnings(record=True):  # multiple set zero
        evoked_ref = add_reference_channels(evoked, ['M1', 'M2'], copy=True)
    assert_equal(evoked_ref.data.shape[0], evoked.data.shape[0] + 2)
    _check_channel_names(evoked_ref, ['M1', 'M2'])
    ref_idx = evoked_ref.ch_names.index('M1')
    ref_idy = evoked_ref.ch_names.index('M2')
    ref_data = evoked_ref.data[[ref_idx, ref_idy], :]
    assert_array_equal(ref_data, 0)
    picks_eeg = pick_types(evoked.info, meg=False, eeg=True)
    assert_array_equal(evoked.data[picks_eeg, :],
                       evoked_ref.data[picks_eeg, :])

    # Test invalid inputs
    raw_np = read_raw_fif(fif_fname, preload=False)
    assert_raises(RuntimeError, add_reference_channels, raw_np, ['Ref'])
    assert_raises(ValueError, add_reference_channels, raw, 1)
예제 #45
0
def test_set_eeg_reference():
    """Test rereference eeg data."""
    raw = read_raw_fif(fif_fname, preload=True)
    raw.info['projs'] = []

    # Test setting an average reference projection
    assert_true(not _has_eeg_average_ref_proj(raw.info['projs']))
    reref, ref_data = set_eeg_reference(raw, projection=True)
    assert_true(_has_eeg_average_ref_proj(reref.info['projs']))
    assert_true(not reref.info['projs'][0]['active'])
    assert_true(ref_data is None)
    reref.apply_proj()
    eeg_chans = [
        raw.ch_names[ch] for ch in pick_types(raw.info, meg=False, eeg=True)
    ]
    _test_reference(raw, reref, ref_data,
                    [ch for ch in eeg_chans if ch not in raw.info['bads']])

    # Test setting an average reference when one was already present
    with warnings.catch_warnings(record=True):
        reref, ref_data = set_eeg_reference(raw, copy=False, projection=True)
    assert_true(ref_data is None)

    # Test setting an average reference on non-preloaded data
    raw_nopreload = read_raw_fif(fif_fname, preload=False)
    raw_nopreload.info['projs'] = []
    reref, ref_data = set_eeg_reference(raw_nopreload, projection=True)
    assert_true(_has_eeg_average_ref_proj(reref.info['projs']))
    assert_true(not reref.info['projs'][0]['active'])

    # Rereference raw data by creating a copy of original data
    reref, ref_data = set_eeg_reference(raw, ['EEG 001', 'EEG 002'], copy=True)
    assert_true(reref.info['custom_ref_applied'])
    _test_reference(raw, reref, ref_data, ['EEG 001', 'EEG 002'])

    # Test that data is modified in place when copy=False
    reref, ref_data = set_eeg_reference(raw, ['EEG 001', 'EEG 002'],
                                        copy=False)
    assert_true(raw is reref)

    # Test moving from custom to average reference
    reref, ref_data = set_eeg_reference(raw, ['EEG 001', 'EEG 002'])
    reref, _ = set_eeg_reference(reref, projection=True)
    assert_true(_has_eeg_average_ref_proj(reref.info['projs']))
    assert_equal(reref.info['custom_ref_applied'], False)

    # When creating an average reference fails, make sure the
    # custom_ref_applied flag remains untouched.
    reref = raw.copy()
    reref.info['custom_ref_applied'] = True
    reref.pick_types(eeg=False)  # Cause making average ref fail
    assert_raises(ValueError, set_eeg_reference, reref, projection=True)
    assert_true(reref.info['custom_ref_applied'])

    # Test moving from average to custom reference
    reref, ref_data = set_eeg_reference(raw, projection=True)
    reref, _ = set_eeg_reference(reref, ['EEG 001', 'EEG 002'])
    assert_true(not _has_eeg_average_ref_proj(reref.info['projs']))
    assert_equal(reref.info['custom_ref_applied'], True)

    # Test that disabling the reference does not change anything
    reref, ref_data = set_eeg_reference(raw, [])
    assert_array_equal(raw._data, reref._data)

    # Test that average reference gives identical results when calculated
    # via SSP projection (projection=True) or directly (projection=False)
    raw.info['projs'] = []
    reref_1, _ = set_eeg_reference(raw.copy(), projection=True)
    reref_1.apply_proj()
    reref_2, _ = set_eeg_reference(raw.copy(), projection=False)
    assert_allclose(reref_1._data, reref_2._data, rtol=1e-6, atol=1e-15)

    # Test average reference without projection
    reref, ref_data = set_eeg_reference(raw.copy(),
                                        ref_channels="average",
                                        projection=False)
    _test_reference(raw, reref, ref_data, eeg_chans)

    # projection=True only works for ref_channels='average'
    assert_raises(ValueError, set_eeg_reference, raw, [], True, True)
    assert_raises(ValueError, set_eeg_reference, raw, ['EEG 001'], True, True)
예제 #46
0
def test_apply_reference():
    """Test base function for rereferencing."""
    raw = read_raw_fif(fif_fname, preload=True)

    # Rereference raw data by creating a copy of original data
    reref, ref_data = _apply_reference(raw.copy(),
                                       ref_from=['EEG 001', 'EEG 002'])
    assert_true(reref.info['custom_ref_applied'])
    _test_reference(raw, reref, ref_data, ['EEG 001', 'EEG 002'])

    # The CAR reference projection should have been removed by the function
    assert_true(not _has_eeg_average_ref_proj(reref.info['projs']))

    # Test that data is modified in place when copy=False
    reref, ref_data = _apply_reference(raw, ['EEG 001', 'EEG 002'])
    assert_true(raw is reref)

    # Test that disabling the reference does not change anything
    reref, ref_data = _apply_reference(raw.copy(), [])
    assert_array_equal(raw._data, reref._data)

    # Test re-referencing Epochs object
    raw = read_raw_fif(fif_fname, preload=False)
    events = read_events(eve_fname)
    picks_eeg = pick_types(raw.info, meg=False, eeg=True)
    epochs = Epochs(raw,
                    events=events,
                    event_id=1,
                    tmin=-0.2,
                    tmax=0.5,
                    picks=picks_eeg,
                    preload=True)
    reref, ref_data = _apply_reference(epochs.copy(),
                                       ref_from=['EEG 001', 'EEG 002'])
    assert_true(reref.info['custom_ref_applied'])
    _test_reference(epochs, reref, ref_data, ['EEG 001', 'EEG 002'])

    # Test re-referencing Evoked object
    evoked = epochs.average()
    reref, ref_data = _apply_reference(evoked.copy(),
                                       ref_from=['EEG 001', 'EEG 002'])
    assert_true(reref.info['custom_ref_applied'])
    _test_reference(evoked, reref, ref_data, ['EEG 001', 'EEG 002'])

    # Referencing needs data to be preloaded
    raw_np = read_raw_fif(fif_fname, preload=False)
    assert_raises(RuntimeError, _apply_reference, raw_np, ['EEG 001'])

    # Test having inactive SSP projections that deal with channels involved
    # during re-referencing
    raw = read_raw_fif(fif_fname, preload=True)
    raw.add_proj(
        Projection(
            active=False,
            data=dict(col_names=['EEG 001', 'EEG 002'],
                      row_names=None,
                      data=np.array([[1, 1]]),
                      ncol=2,
                      nrow=1),
            desc='test',
            kind=1,
        ))
    # Projection concerns channels mentioned in projector
    assert_raises(RuntimeError, _apply_reference, raw, ['EEG 001'])

    # Projection does not concern channels mentioned in projector, no error
    _apply_reference(raw, ['EEG 003'], ['EEG 004'])
예제 #47
0
from mne import io
from mne.datasets import sample
from mne.minimum_norm import read_inverse_operator, compute_source_psd

print(__doc__)

# %%
# Set parameters
data_path = sample.data_path()
meg_path = data_path / 'MEG' / 'sample'
raw_fname = meg_path / 'sample_audvis_raw.fif'
fname_inv = meg_path / 'sample_audvis-meg-oct-6-meg-inv.fif'
fname_label = meg_path / 'labels' / 'Aud-lh.label'

# Setup for reading the raw data
raw = io.read_raw_fif(raw_fname, verbose=False)
events = mne.find_events(raw, stim_channel='STI 014')
inverse_operator = read_inverse_operator(fname_inv)
raw.info['bads'] = ['MEG 2443', 'EEG 053']

# picks MEG gradiometers
picks = mne.pick_types(raw.info,
                       meg=True,
                       eeg=False,
                       eog=True,
                       stim=False,
                       exclude='bads')

tmin, tmax = 0, 120  # use the first 120s of data
fmin, fmax = 4, 100  # look at frequencies between 4 and 100Hz
n_fft = 2048  # the FFT size (n_fft). Ideally a power of 2
예제 #48
0
def test_set_bipolar_reference():
    """Test bipolar referencing."""
    raw = read_raw_fif(fif_fname, preload=True)
    raw.apply_proj()

    reref = set_bipolar_reference(raw, 'EEG 001', 'EEG 002', 'bipolar', {
        'kind': FIFF.FIFFV_EOG_CH,
        'extra': 'some extra value'
    })
    assert_true(reref.info['custom_ref_applied'])

    # Compare result to a manual calculation
    a = raw.copy().pick_channels(['EEG 001', 'EEG 002'])
    a = a._data[0, :] - a._data[1, :]
    b = reref.copy().pick_channels(['bipolar'])._data[0, :]
    assert_allclose(a, b)

    # Original channels should be replaced by a virtual one
    assert_true('EEG 001' not in reref.ch_names)
    assert_true('EEG 002' not in reref.ch_names)
    assert_true('bipolar' in reref.ch_names)

    # Check channel information
    bp_info = reref.info['chs'][reref.ch_names.index('bipolar')]
    an_info = reref.info['chs'][raw.ch_names.index('EEG 001')]
    for key in bp_info:
        if key == 'loc':
            assert_array_equal(bp_info[key], 0)
        elif key == 'coil_type':
            assert_equal(bp_info[key], FIFF.FIFFV_COIL_EEG_BIPOLAR)
        elif key == 'kind':
            assert_equal(bp_info[key], FIFF.FIFFV_EOG_CH)
        else:
            assert_equal(bp_info[key], an_info[key])
    assert_equal(bp_info['extra'], 'some extra value')

    # Minimalist call
    reref = set_bipolar_reference(raw, 'EEG 001', 'EEG 002')
    assert_true('EEG 001-EEG 002' in reref.ch_names)

    # Minimalist call with twice the same anode
    reref = set_bipolar_reference(raw, ['EEG 001', 'EEG 001', 'EEG 002'],
                                  ['EEG 002', 'EEG 003', 'EEG 003'])
    assert_true('EEG 001-EEG 002' in reref.ch_names)
    assert_true('EEG 001-EEG 003' in reref.ch_names)

    # Set multiple references at once
    reref = set_bipolar_reference(
        raw,
        ['EEG 001', 'EEG 003'],
        ['EEG 002', 'EEG 004'],
        ['bipolar1', 'bipolar2'],
        [{
            'kind': FIFF.FIFFV_EOG_CH,
            'extra': 'some extra value'
        }, {
            'kind': FIFF.FIFFV_EOG_CH,
            'extra': 'some extra value'
        }],
    )
    a = raw.copy().pick_channels(['EEG 001', 'EEG 002', 'EEG 003', 'EEG 004'])
    a = np.array(
        [a._data[0, :] - a._data[1, :], a._data[2, :] - a._data[3, :]])
    b = reref.copy().pick_channels(['bipolar1', 'bipolar2'])._data
    assert_allclose(a, b)

    # Test creating a bipolar reference that doesn't involve EEG channels:
    # it should not set the custom_ref_applied flag
    reref = set_bipolar_reference(raw,
                                  'MEG 0111',
                                  'MEG 0112',
                                  ch_info={'kind': FIFF.FIFFV_MEG_CH})
    assert_true(not reref.info['custom_ref_applied'])
    assert_true('MEG 0111-MEG 0112' in reref.ch_names)

    # Test a battery of invalid inputs
    assert_raises(ValueError, set_bipolar_reference, raw, 'EEG 001',
                  ['EEG 002', 'EEG 003'], 'bipolar')
    assert_raises(ValueError, set_bipolar_reference, raw,
                  ['EEG 001', 'EEG 002'], 'EEG 003', 'bipolar')
    assert_raises(ValueError, set_bipolar_reference, raw, 'EEG 001', 'EEG 002',
                  ['bipolar1', 'bipolar2'])
    assert_raises(ValueError,
                  set_bipolar_reference,
                  raw,
                  'EEG 001',
                  'EEG 002',
                  'bipolar',
                  ch_info=[{
                      'foo': 'bar'
                  }, {
                      'foo': 'bar'
                  }])
    assert_raises(ValueError,
                  set_bipolar_reference,
                  raw,
                  'EEG 001',
                  'EEG 002',
                  ch_name='EEG 003')
예제 #49
0
def test_ica_core():
    """Test ICA on raw and epochs."""
    raw = read_raw_fif(raw_fname).crop(1.5, stop).load_data()
    picks = pick_types(raw.info, meg=True, stim=False, ecg=False,
                       eog=False, exclude='bads')
    # XXX. The None cases helped revealing bugs but are time consuming.
    test_cov = read_cov(test_cov_name)
    events = read_events(event_name)
    picks = pick_types(raw.info, meg=True, stim=False, ecg=False,
                       eog=False, exclude='bads')
    epochs = Epochs(raw, events[:4], event_id, tmin, tmax, picks=picks,
                    baseline=(None, 0), preload=True)
    noise_cov = [None, test_cov]
    # removed None cases to speed up...
    n_components = [2, 1.0]  # for future dbg add cases
    max_pca_components = [3]
    picks_ = [picks]
    methods = ['fastica']
    iter_ica_params = product(noise_cov, n_components, max_pca_components,
                              picks_, methods)

    # # test init catchers
    assert_raises(ValueError, ICA, n_components=3, max_pca_components=2)
    assert_raises(ValueError, ICA, n_components=2.3, max_pca_components=2)

    # test essential core functionality
    for n_cov, n_comp, max_n, pcks, method in iter_ica_params:
        # Test ICA raw
        ica = ICA(noise_cov=n_cov, n_components=n_comp,
                  max_pca_components=max_n, n_pca_components=max_n,
                  random_state=0, method=method, max_iter=1)
        assert_raises(ValueError, ica.__contains__, 'mag')

        print(ica)  # to test repr

        # test fit checker
        assert_raises(RuntimeError, ica.get_sources, raw)
        assert_raises(RuntimeError, ica.get_sources, epochs)

        # test decomposition
        with warnings.catch_warnings(record=True):
            ica.fit(raw, picks=pcks, start=start, stop=stop)
            repr(ica)  # to test repr
        assert_true('mag' in ica)  # should now work without error

        # test re-fit
        unmixing1 = ica.unmixing_matrix_
        with warnings.catch_warnings(record=True):
            ica.fit(raw, picks=pcks, start=start, stop=stop)
        assert_array_almost_equal(unmixing1, ica.unmixing_matrix_)

        raw_sources = ica.get_sources(raw)
        # test for #3804
        assert_equal(raw_sources._filenames, [None])
        print(raw_sources)

        sources = raw_sources[:, :][0]
        assert_true(sources.shape[0] == ica.n_components_)

        # test preload filter
        raw3 = raw.copy()
        raw3.preload = False
        assert_raises(ValueError, ica.apply, raw3,
                      include=[1, 2])

        #######################################################################
        # test epochs decomposition
        ica = ICA(noise_cov=n_cov, n_components=n_comp,
                  max_pca_components=max_n, n_pca_components=max_n,
                  random_state=0)
        with warnings.catch_warnings(record=True):
            ica.fit(epochs, picks=picks)
        data = epochs.get_data()[:, 0, :]
        n_samples = np.prod(data.shape)
        assert_equal(ica.n_samples_, n_samples)
        print(ica)  # to test repr

        sources = ica.get_sources(epochs).get_data()
        assert_true(sources.shape[1] == ica.n_components_)

        assert_raises(ValueError, ica.score_sources, epochs,
                      target=np.arange(1))

        # test preload filter
        epochs3 = epochs.copy()
        epochs3.preload = False
        assert_raises(ValueError, ica.apply, epochs3,
                      include=[1, 2])

    # test for bug with whitener updating
    _pre_whitener = ica._pre_whitener.copy()
    epochs._data[:, 0, 10:15] *= 1e12
    ica.apply(epochs.copy())
    assert_array_equal(_pre_whitener, ica._pre_whitener)

    # test expl. var threshold leading to empty sel
    ica.n_components = 0.1
    assert_raises(RuntimeError, ica.fit, epochs)

    offender = 1, 2, 3,
    assert_raises(ValueError, ica.get_sources, offender)
    assert_raises(ValueError, ica.fit, offender)
    assert_raises(ValueError, ica.apply, offender)
예제 #50
0
def _get_raw(preload=False):
    """Get raw data."""
    return read_raw_fif(raw_fname, preload=preload)
def test_kit2fiff_model():
    """Test Kit2Fiff model."""
    from mne.gui._kit2fiff_gui import Kit2FiffModel
    tempdir = _TempDir()
    tgt_fname = os.path.join(tempdir, 'test-raw.fif')

    model = Kit2FiffModel()
    assert not model.can_save
    assert model.misc_chs_desc == "No SQD file selected..."
    assert model.stim_chs_comment == ""
    model.markers.mrk1.file = mrk_pre_path
    model.markers.mrk2.file = mrk_post_path
    model.sqd_file = sqd_path
    assert model.misc_chs_desc == "160:192"
    model.hsp_file = hsp_path
    assert not model.can_save
    model.fid_file = fid_path
    assert model.can_save

    # events
    model.stim_slope = '+'
    assert model.get_event_info() == {1: 2}
    model.stim_slope = '-'
    assert model.get_event_info() == {254: 2, 255: 2}

    # stim channels
    model.stim_chs = "181:184, 186"
    assert_array_equal(model.stim_chs_array, [181, 182, 183, 186])
    assert model.stim_chs_ok
    assert model.get_event_info() == {}
    model.stim_chs = "181:184, bad"
    assert not model.stim_chs_ok
    assert not model.can_save
    model.stim_chs = ""
    assert model.can_save

    # export raw
    raw_out = model.get_raw()
    raw_out.save(tgt_fname)
    raw = read_raw_fif(tgt_fname)

    # Compare exported raw with the original binary conversion
    raw_bin = read_raw_fif(fif_path)
    trans_bin = raw.info['dev_head_t']['trans']
    want_keys = list(raw_bin.info.keys())
    assert sorted(want_keys) == sorted(list(raw.info.keys()))
    trans_transform = raw_bin.info['dev_head_t']['trans']
    assert_allclose(trans_transform, trans_bin, 0.1)

    # Averaging markers
    model.markers.mrk3.method = "Average"
    trans_avg = model.dev_head_trans
    assert not np.all(trans_avg == trans_transform)
    assert_allclose(trans_avg, trans_bin, 0.1)

    # Test exclusion of one marker
    model.markers.mrk3.method = "Transform"
    model.use_mrk = [1, 2, 3, 4]
    assert not np.all(model.dev_head_trans == trans_transform)
    assert not np.all(model.dev_head_trans == trans_avg)
    assert not np.all(model.dev_head_trans == np.eye(4))

    # test setting stim channels
    model.stim_slope = '+'
    events_bin = mne.find_events(raw_bin, stim_channel='STI 014')

    model.stim_coding = '<'
    raw = model.get_raw()
    events = mne.find_events(raw, stim_channel='STI 014')
    assert_array_equal(events, events_bin)

    events_rev = events_bin.copy()
    events_rev[:, 2] = 1
    model.stim_coding = '>'
    raw = model.get_raw()
    events = mne.find_events(raw, stim_channel='STI 014')
    assert_array_equal(events, events_rev)

    model.stim_coding = 'channel'
    model.stim_chs = "160:161"
    raw = model.get_raw()
    events = mne.find_events(raw, stim_channel='STI 014')
    assert_array_equal(events, events_bin + [0, 0, 32])

    # test reset
    model.clear_all()
    assert model.use_mrk == [0, 1, 2, 3, 4]
    assert model.sqd_file == ""
예제 #52
0
def test_ica_additional():
    """Test additional ICA functionality."""
    import matplotlib.pyplot as plt
    tempdir = _TempDir()
    stop2 = 500
    raw = read_raw_fif(raw_fname).crop(1.5, stop).load_data()
    # XXX This breaks the tests :(
    # raw.info['bads'] = [raw.ch_names[1]]
    test_cov = read_cov(test_cov_name)
    events = read_events(event_name)
    picks = pick_types(raw.info, meg=True, stim=False, ecg=False,
                       eog=False, exclude='bads')
    epochs = Epochs(raw, events[:4], event_id, tmin, tmax, picks=picks,
                    baseline=(None, 0), preload=True)
    # test if n_components=None works
    with warnings.catch_warnings(record=True):
        ica = ICA(n_components=None,
                  max_pca_components=None,
                  n_pca_components=None, random_state=0)
        ica.fit(epochs, picks=picks, decim=3)
    # for testing eog functionality
    picks2 = pick_types(raw.info, meg=True, stim=False, ecg=False,
                        eog=True, exclude='bads')
    epochs_eog = Epochs(raw, events[:4], event_id, tmin, tmax, picks=picks2,
                        baseline=(None, 0), preload=True)

    test_cov2 = test_cov.copy()
    ica = ICA(noise_cov=test_cov2, n_components=3, max_pca_components=4,
              n_pca_components=4)
    assert_true(ica.info is None)
    with warnings.catch_warnings(record=True):
        ica.fit(raw, picks[:5])
    assert_true(isinstance(ica.info, Info))
    assert_true(ica.n_components_ < 5)

    ica = ICA(n_components=3, max_pca_components=4,
              n_pca_components=4)
    assert_raises(RuntimeError, ica.save, '')
    with warnings.catch_warnings(record=True):
        ica.fit(raw, picks=[1, 2, 3, 4, 5], start=start, stop=stop2)

    # test corrmap
    ica2 = ica.copy()
    ica3 = ica.copy()
    corrmap([ica, ica2], (0, 0), threshold='auto', label='blinks', plot=True,
            ch_type="mag")
    corrmap([ica, ica2], (0, 0), threshold=2, plot=False, show=False)
    assert_true(ica.labels_["blinks"] == ica2.labels_["blinks"])
    assert_true(0 in ica.labels_["blinks"])
    # test retrieval of component maps as arrays
    components = ica.get_components()
    template = components[:, 0]
    EvokedArray(components, ica.info, tmin=0.).plot_topomap([0])

    corrmap([ica, ica3], template, threshold='auto', label='blinks', plot=True,
            ch_type="mag")
    assert_true(ica2.labels_["blinks"] == ica3.labels_["blinks"])

    plt.close('all')

    # test warnings on bad filenames
    with warnings.catch_warnings(record=True) as w:
        warnings.simplefilter('always')
        ica_badname = op.join(op.dirname(tempdir), 'test-bad-name.fif.gz')
        ica.save(ica_badname)
        read_ica(ica_badname)
    assert_naming(w, 'test_ica.py', 2)

    # test decim
    ica = ICA(n_components=3, max_pca_components=4,
              n_pca_components=4)
    raw_ = raw.copy()
    for _ in range(3):
        raw_.append(raw_)
    n_samples = raw_._data.shape[1]
    with warnings.catch_warnings(record=True):
        ica.fit(raw, picks=None, decim=3)
    assert_true(raw_._data.shape[1], n_samples)

    # test expl var
    ica = ICA(n_components=1.0, max_pca_components=4,
              n_pca_components=4)
    with warnings.catch_warnings(record=True):
        ica.fit(raw, picks=None, decim=3)
    assert_true(ica.n_components_ == 4)
    ica_var = _ica_explained_variance(ica, raw, normalize=True)
    assert_true(np.all(ica_var[:-1] >= ica_var[1:]))

    # test ica sorting
    ica.exclude = [0]
    ica.labels_ = dict(blink=[0], think=[1])
    ica_sorted = _sort_components(ica, [3, 2, 1, 0], copy=True)
    assert_equal(ica_sorted.exclude, [3])
    assert_equal(ica_sorted.labels_, dict(blink=[3], think=[2]))

    # epochs extraction from raw fit
    assert_raises(RuntimeError, ica.get_sources, epochs)
    # test reading and writing
    test_ica_fname = op.join(op.dirname(tempdir), 'test-ica.fif')
    for cov in (None, test_cov):
        ica = ICA(noise_cov=cov, n_components=2, max_pca_components=4,
                  n_pca_components=4)
        with warnings.catch_warnings(record=True):  # ICA does not converge
            ica.fit(raw, picks=picks, start=start, stop=stop2)
        sources = ica.get_sources(epochs).get_data()
        assert_true(ica.mixing_matrix_.shape == (2, 2))
        assert_true(ica.unmixing_matrix_.shape == (2, 2))
        assert_true(ica.pca_components_.shape == (4, len(picks)))
        assert_true(sources.shape[1] == ica.n_components_)

        for exclude in [[], [0]]:
            ica.exclude = exclude
            ica.labels_ = {'foo': [0]}
            ica.save(test_ica_fname)
            ica_read = read_ica(test_ica_fname)
            assert_true(ica.exclude == ica_read.exclude)
            assert_equal(ica.labels_, ica_read.labels_)
            ica.exclude = []
            ica.apply(raw, exclude=[1])
            assert_true(ica.exclude == [])

            ica.exclude = [0, 1]
            ica.apply(raw, exclude=[1])
            assert_true(ica.exclude == [0, 1])

            ica_raw = ica.get_sources(raw)
            assert_true(ica.exclude == [ica_raw.ch_names.index(e) for e in
                                        ica_raw.info['bads']])

        # test filtering
        d1 = ica_raw._data[0].copy()
        ica_raw.filter(4, 20, l_trans_bandwidth='auto',
                       h_trans_bandwidth='auto', filter_length='auto',
                       phase='zero', fir_window='hamming')
        assert_equal(ica_raw.info['lowpass'], 20.)
        assert_equal(ica_raw.info['highpass'], 4.)
        assert_true((d1 != ica_raw._data[0]).any())
        d1 = ica_raw._data[0].copy()
        ica_raw.notch_filter([10], filter_length='auto', trans_bandwidth=10,
                             phase='zero', fir_window='hamming')
        assert_true((d1 != ica_raw._data[0]).any())

        ica.n_pca_components = 2
        ica.method = 'fake'
        ica.save(test_ica_fname)
        ica_read = read_ica(test_ica_fname)
        assert_true(ica.n_pca_components == ica_read.n_pca_components)
        assert_equal(ica.method, ica_read.method)
        assert_equal(ica.labels_, ica_read.labels_)

        # check type consistency
        attrs = ('mixing_matrix_ unmixing_matrix_ pca_components_ '
                 'pca_explained_variance_ _pre_whitener')

        def f(x, y):
            return getattr(x, y).dtype

        for attr in attrs.split():
            assert_equal(f(ica_read, attr), f(ica, attr))

        ica.n_pca_components = 4
        ica_read.n_pca_components = 4

        ica.exclude = []
        ica.save(test_ica_fname)
        ica_read = read_ica(test_ica_fname)
        for attr in ['mixing_matrix_', 'unmixing_matrix_', 'pca_components_',
                     'pca_mean_', 'pca_explained_variance_',
                     '_pre_whitener']:
            assert_array_almost_equal(getattr(ica, attr),
                                      getattr(ica_read, attr))

        assert_true(ica.ch_names == ica_read.ch_names)
        assert_true(isinstance(ica_read.info, Info))

        sources = ica.get_sources(raw)[:, :][0]
        sources2 = ica_read.get_sources(raw)[:, :][0]
        assert_array_almost_equal(sources, sources2)

        _raw1 = ica.apply(raw, exclude=[1])
        _raw2 = ica_read.apply(raw, exclude=[1])
        assert_array_almost_equal(_raw1[:, :][0], _raw2[:, :][0])

    os.remove(test_ica_fname)
    # check scrore funcs
    for name, func in get_score_funcs().items():
        if name in score_funcs_unsuited:
            continue
        scores = ica.score_sources(raw, target='EOG 061', score_func=func,
                                   start=0, stop=10)
        assert_true(ica.n_components_ == len(scores))

    # check univariate stats
    scores = ica.score_sources(raw, score_func=stats.skew)
    # check exception handling
    assert_raises(ValueError, ica.score_sources, raw,
                  target=np.arange(1))

    params = []
    params += [(None, -1, slice(2), [0, 1])]  # varicance, kurtosis idx params
    params += [(None, 'MEG 1531')]  # ECG / EOG channel params
    for idx, ch_name in product(*params):
        ica.detect_artifacts(raw, start_find=0, stop_find=50, ecg_ch=ch_name,
                             eog_ch=ch_name, skew_criterion=idx,
                             var_criterion=idx, kurt_criterion=idx)

    evoked = epochs.average()
    evoked_data = evoked.data.copy()
    raw_data = raw[:][0].copy()
    epochs_data = epochs.get_data().copy()
    with warnings.catch_warnings(record=True):
        idx, scores = ica.find_bads_ecg(raw, method='ctps')
        assert_equal(len(scores), ica.n_components_)
        idx, scores = ica.find_bads_ecg(raw, method='correlation')
        assert_equal(len(scores), ica.n_components_)

        idx, scores = ica.find_bads_eog(raw)
        assert_equal(len(scores), ica.n_components_)

        idx, scores = ica.find_bads_ecg(epochs, method='ctps')
        assert_equal(len(scores), ica.n_components_)
        assert_raises(ValueError, ica.find_bads_ecg, epochs.average(),
                      method='ctps')
        assert_raises(ValueError, ica.find_bads_ecg, raw,
                      method='crazy-coupling')

        raw.info['chs'][raw.ch_names.index('EOG 061') - 1]['kind'] = 202
        idx, scores = ica.find_bads_eog(raw)
        assert_true(isinstance(scores, list))
        assert_equal(len(scores[0]), ica.n_components_)

        idx, scores = ica.find_bads_eog(evoked, ch_name='MEG 1441')
        assert_equal(len(scores), ica.n_components_)

        idx, scores = ica.find_bads_ecg(evoked, method='correlation')
        assert_equal(len(scores), ica.n_components_)

    assert_array_equal(raw_data, raw[:][0])
    assert_array_equal(epochs_data, epochs.get_data())
    assert_array_equal(evoked_data, evoked.data)

    # check score funcs
    for name, func in get_score_funcs().items():
        if name in score_funcs_unsuited:
            continue
        scores = ica.score_sources(epochs_eog, target='EOG 061',
                                   score_func=func)
        assert_true(ica.n_components_ == len(scores))

    # check univariate stats
    scores = ica.score_sources(epochs, score_func=stats.skew)

    # check exception handling
    assert_raises(ValueError, ica.score_sources, epochs,
                  target=np.arange(1))

    # ecg functionality
    ecg_scores = ica.score_sources(raw, target='MEG 1531',
                                   score_func='pearsonr')

    with warnings.catch_warnings(record=True):  # filter attenuation warning
        ecg_events = ica_find_ecg_events(raw,
                                         sources[np.abs(ecg_scores).argmax()])

    assert_true(ecg_events.ndim == 2)

    # eog functionality
    eog_scores = ica.score_sources(raw, target='EOG 061',
                                   score_func='pearsonr')
    with warnings.catch_warnings(record=True):  # filter attenuation warning
        eog_events = ica_find_eog_events(raw,
                                         sources[np.abs(eog_scores).argmax()])

    assert_true(eog_events.ndim == 2)

    # Test ica fiff export
    ica_raw = ica.get_sources(raw, start=0, stop=100)
    assert_true(ica_raw.last_samp - ica_raw.first_samp == 100)
    assert_equal(len(ica_raw._filenames), 1)  # API consistency
    ica_chans = [ch for ch in ica_raw.ch_names if 'ICA' in ch]
    assert_true(ica.n_components_ == len(ica_chans))
    test_ica_fname = op.join(op.abspath(op.curdir), 'test-ica_raw.fif')
    ica.n_components = np.int32(ica.n_components)
    ica_raw.save(test_ica_fname, overwrite=True)
    ica_raw2 = read_raw_fif(test_ica_fname, preload=True)
    assert_allclose(ica_raw._data, ica_raw2._data, rtol=1e-5, atol=1e-4)
    ica_raw2.close()
    os.remove(test_ica_fname)

    # Test ica epochs export
    ica_epochs = ica.get_sources(epochs)
    assert_true(ica_epochs.events.shape == epochs.events.shape)
    ica_chans = [ch for ch in ica_epochs.ch_names if 'ICA' in ch]
    assert_true(ica.n_components_ == len(ica_chans))
    assert_true(ica.n_components_ == ica_epochs.get_data().shape[1])
    assert_true(ica_epochs._raw is None)
    assert_true(ica_epochs.preload is True)

    # test float n pca components
    ica.pca_explained_variance_ = np.array([0.2] * 5)
    ica.n_components_ = 0
    for ncomps, expected in [[0.3, 1], [0.9, 4], [1, 1]]:
        ncomps_ = ica._check_n_pca_components(ncomps)
        assert_true(ncomps_ == expected)

    ica = ICA()
    ica.fit(raw, picks=picks[:5])
    ica.find_bads_ecg(raw)
    ica.find_bads_eog(epochs, ch_name='MEG 0121')
    assert_array_equal(raw_data, raw[:][0])

    raw.drop_channels(['MEG 0122'])
    assert_raises(RuntimeError, ica.find_bads_eog, raw)
    assert_raises(RuntimeError, ica.find_bads_ecg, raw)
예제 #53
0
def test_crop():
    """Test cropping with annotations."""
    raw = read_raw_fif(fif_fname)
    events = mne.find_events(raw)
    onset = events[events[:, 2] == 1, 0] / raw.info['sfreq']
    duration = np.full_like(onset, 0.5)
    description = ['bad %d' % k for k in range(len(onset))]
    annot = mne.Annotations(onset,
                            duration,
                            description,
                            orig_time=raw.info['meas_date'])
    raw.set_annotations(annot)

    split_time = raw.times[-1] / 2. + 2.
    split_idx = len(onset) // 2 + 1
    raw_cropped_left = raw.copy().crop(0., split_time - 1. / raw.info['sfreq'])
    assert_array_equal(raw_cropped_left.annotations.description,
                       raw.annotations.description[:split_idx])
    assert_allclose(raw_cropped_left.annotations.duration,
                    raw.annotations.duration[:split_idx])
    assert_allclose(raw_cropped_left.annotations.onset,
                    raw.annotations.onset[:split_idx])
    raw_cropped_right = raw.copy().crop(split_time, None)
    assert_array_equal(raw_cropped_right.annotations.description,
                       raw.annotations.description[split_idx:])
    assert_allclose(raw_cropped_right.annotations.duration,
                    raw.annotations.duration[split_idx:])
    assert_allclose(raw_cropped_right.annotations.onset,
                    raw.annotations.onset[split_idx:])
    raw_concat = mne.concatenate_raws([raw_cropped_left, raw_cropped_right],
                                      verbose='debug')
    assert_allclose(raw_concat.times, raw.times)
    assert_allclose(raw_concat[:][0], raw[:][0], atol=1e-20)
    # Get rid of the boundary events
    raw_concat.annotations.delete(-1)
    raw_concat.annotations.delete(-1)
    # Ensure we annotations survive round-trip crop->concat
    assert_array_equal(raw_concat.annotations.description,
                       raw.annotations.description)
    for attr in ('onset', 'duration'):
        assert_allclose(getattr(raw_concat.annotations, attr),
                        getattr(raw.annotations, attr),
                        err_msg='Failed for %s:' % (attr, ))

    raw.set_annotations(None)  # undo

    # Test concatenating annotations with and without orig_time.
    raw2 = raw.copy()
    raw.set_annotations(Annotations([45.], [3], 'test', raw.info['meas_date']))
    raw2.set_annotations(Annotations([2.], [3], 'BAD', None))
    expected_onset = [45., 2. + raw._last_time]
    raw = concatenate_raws([raw, raw2])
    raw.annotations.delete(-1)  # remove boundary annotations
    raw.annotations.delete(-1)
    assert_array_almost_equal(raw.annotations.onset, expected_onset, decimal=2)

    # Test IO
    tempdir = _TempDir()
    fname = op.join(tempdir, 'test-annot.fif')
    raw.annotations.save(fname)
    annot_read = read_annotations(fname)
    for attr in ('onset', 'duration', 'orig_time'):
        assert_allclose(getattr(annot_read, attr),
                        getattr(raw.annotations, attr))
    assert_array_equal(annot_read.description, raw.annotations.description)
    annot = Annotations((), (), ())
    annot.save(fname)
    pytest.raises(IOError, read_annotations, fif_fname)  # none in old raw
    annot = read_annotations(fname)
    assert isinstance(annot, Annotations)
    assert len(annot) == 0
    # Test that empty annotations can be saved with an object
    fname = op.join(tempdir, 'test_raw.fif')
    raw.set_annotations(annot)
    raw.save(fname)
    raw_read = read_raw_fif(fname)
    assert isinstance(raw_read.annotations, Annotations)
    assert len(raw_read.annotations) == 0
    raw.set_annotations(None)
    raw.save(fname, overwrite=True)
    raw_read = read_raw_fif(fname)
    assert raw_read.annotations is not None  # XXX to be fixed in #5416
    assert len(raw_read.annotations.onset) == 0  # XXX to be fixed in #5416
예제 #54
0
def test_cov_rank_estimation(rank_method, proj, meg):
    """Test cov rank estimation."""
    # Test that our rank estimation works properly on a simple case
    evoked = read_evokeds(ave_fname, condition=0, baseline=(None, 0),
                          proj=False)
    cov = read_cov(cov_fname)
    ch_names = [ch for ch in evoked.info['ch_names'] if '053' not in ch and
                ch.startswith('EEG')]
    cov = prepare_noise_cov(cov, evoked.info, ch_names, None)
    assert cov['eig'][0] <= 1e-25  # avg projector should set this to zero
    assert (cov['eig'][1:] > 1e-16).all()  # all else should be > 0

    # Now do some more comprehensive tests
    raw_sample = read_raw_fif(raw_fname)
    assert not _has_eeg_average_ref_proj(raw_sample.info['projs'])

    raw_sss = read_raw_fif(hp_fif_fname)
    assert not _has_eeg_average_ref_proj(raw_sss.info['projs'])
    raw_sss.add_proj(compute_proj_raw(raw_sss, meg=meg))

    cov_sample = compute_raw_covariance(raw_sample)
    cov_sample_proj = compute_raw_covariance(raw_sample.copy().apply_proj())

    cov_sss = compute_raw_covariance(raw_sss)
    cov_sss_proj = compute_raw_covariance(raw_sss.copy().apply_proj())

    picks_all_sample = pick_types(raw_sample.info, meg=True, eeg=True)
    picks_all_sss = pick_types(raw_sss.info, meg=True, eeg=True)

    info_sample = pick_info(raw_sample.info, picks_all_sample)
    picks_stack_sample = [('eeg', pick_types(info_sample, meg=False,
                                             eeg=True))]
    picks_stack_sample += [('meg', pick_types(info_sample, meg=True))]
    picks_stack_sample += [('all',
                            pick_types(info_sample, meg=True, eeg=True))]

    info_sss = pick_info(raw_sss.info, picks_all_sss)
    picks_stack_somato = [('eeg', pick_types(info_sss, meg=False, eeg=True))]
    picks_stack_somato += [('meg', pick_types(info_sss, meg=True))]
    picks_stack_somato += [('all',
                            pick_types(info_sss, meg=True, eeg=True))]

    iter_tests = list(itt.product(
        [(cov_sample, picks_stack_sample, info_sample),
         (cov_sample_proj, picks_stack_sample, info_sample),
         (cov_sss, picks_stack_somato, info_sss),
         (cov_sss_proj, picks_stack_somato, info_sss)],  # sss
        [dict(mag=1e15, grad=1e13, eeg=1e6)],
    ))

    for (cov, picks_list, iter_info), scalings in iter_tests:
        rank = compute_rank(cov, rank_method, scalings, iter_info,
                            proj=proj)
        rank['all'] = sum(rank.values())
        for ch_type, picks in picks_list:

            this_info = pick_info(iter_info, picks)

            # compute subset of projs, active and inactive
            n_projs_applied = sum(proj['active'] and
                                  len(set(proj['data']['col_names']) &
                                      set(this_info['ch_names'])) > 0
                                  for proj in cov['projs'])
            n_projs_info = sum(len(set(proj['data']['col_names']) &
                                   set(this_info['ch_names'])) > 0
                               for proj in this_info['projs'])

            # count channel types
            ch_types = [channel_type(this_info, idx)
                        for idx in range(len(picks))]
            n_eeg, n_mag, n_grad = [ch_types.count(k) for k in
                                    ['eeg', 'mag', 'grad']]
            n_meg = n_mag + n_grad
            has_sss = (n_meg > 0 and len(this_info['proc_history']) > 0)
            if has_sss:
                n_meg = _get_rank_sss(this_info)

            expected_rank = n_meg + n_eeg
            if rank_method is None:
                if meg == 'combined' or not has_sss:
                    if proj:
                        expected_rank -= n_projs_info
                    else:
                        expected_rank -= n_projs_applied
            else:
                # XXX for now it just uses the total count
                assert rank_method == 'info'
                if proj:
                    expected_rank -= n_projs_info

            assert rank[ch_type] == expected_rank
예제 #55
0
def test_maxfilter_get_rank(n_proj, fname, rank_orig, meg, tol_kind, tol):
    """Test maxfilter rank lookup."""
    raw = read_raw_fif(fname).crop(0, 5).load_data().pick_types(meg=True)
    assert raw.info['projs'] == []
    mf = raw.info['proc_history'][0]['max_info']
    assert mf['sss_info']['nfree'] == rank_orig

    assert compute_rank(raw, 'info')['meg'] == rank_orig
    assert compute_rank(raw.copy().pick('grad'), 'info')['grad'] == rank_orig
    assert compute_rank(raw.copy().pick('mag'), 'info')['mag'] == rank_orig

    mult = 1 + (meg == 'separate')
    rank = rank_orig - mult * n_proj
    if n_proj > 0:
        # Let's do some projection
        raw.add_proj(compute_proj_raw(raw, n_mag=n_proj, n_grad=n_proj,
                                      meg=meg, verbose=True))
    raw.apply_proj()
    data_orig = raw[:][0]

    # degenerate cases
    with pytest.raises(ValueError, match='tol must be'):
        _estimate_rank_raw(raw, tol='foo')
    with pytest.raises(TypeError, match='must be a string or a'):
        _estimate_rank_raw(raw, tol=None)

    allowed_rank = [rank_orig if meg == 'separate' else rank]
    if fname == mf_fif_fname:
        # Here we permit a -1 because for mf_fif_fname we miss by 1, which is
        # probably acceptable. If we use the entire duration instead of 5 sec
        # this problem goes away, but the test is much slower.
        allowed_rank.append(allowed_rank[0] - 1)

    # multiple ways of hopefully getting the same thing
    # default tol=1e-4, scalings='norm'
    rank_new = _estimate_rank_raw(raw, tol_kind=tol_kind)
    assert rank_new in allowed_rank

    rank_new = _estimate_rank_raw(
        raw, tol=tol, tol_kind=tol_kind)
    if fname == mf_fif_fname and tol_kind == 'relative' and tol != 'auto':
        pass  # does not play nicely with row norms of _estimate_rank_raw
    else:
        assert rank_new in allowed_rank
    rank_new = _estimate_rank_raw(
        raw, scalings=dict(), tol=tol, tol_kind=tol_kind)
    assert rank_new in allowed_rank
    scalings = dict(grad=1e13, mag=1e15)
    rank_new = _compute_rank_int(
        raw, None, scalings=scalings, tol=tol, tol_kind=tol_kind,
        verbose='debug')
    assert rank_new in allowed_rank
    # XXX default scalings mis-estimate sometimes :(
    if fname == hp_fif_fname:
        allowed_rank.append(allowed_rank[0] - 2)
    rank_new = _compute_rank_int(
        raw, None, tol=tol, tol_kind=tol_kind, verbose='debug')
    assert rank_new in allowed_rank
    del allowed_rank

    rank_new = _compute_rank_int(raw, 'info')
    assert rank_new == rank
    assert_array_equal(raw[:][0], data_orig)
예제 #56
0
from mne import io
from mne.datasets import sample

print(__doc__)

data_path = sample.data_path()

###############################################################################
# Set parameters and read data
raw_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw.fif'
event_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw-eve.fif'
tmin, tmax = -0.2, 0.5
event_id = dict(aud_l=1, vis_l=3)

# Setup for reading the raw data
raw = io.read_raw_fif(raw_fname, preload=True)
raw.filter(2, None, method='iir')  # replace baselining with high-pass
events = mne.read_events(event_fname)

raw.info['bads'] = ['MEG 2443']  # set bad channels
picks = mne.pick_types(raw.info,
                       meg='grad',
                       eeg=False,
                       stim=False,
                       eog=False,
                       exclude='bads')

# Read epochs
epochs = mne.Epochs(raw,
                    events,
                    event_id,
예제 #57
0
def test_scaler(info, method):
    """Test methods of Scaler."""
    raw = io.read_raw_fif(raw_fname)
    events = read_events(event_name)
    picks = pick_types(raw.info,
                       meg=True,
                       stim=False,
                       ecg=False,
                       eog=False,
                       exclude='bads')
    picks = picks[1:13:3]

    epochs = Epochs(raw,
                    events,
                    event_id,
                    tmin,
                    tmax,
                    picks=picks,
                    baseline=(None, 0),
                    preload=True)
    epochs_data = epochs.get_data()
    y = epochs.events[:, -1]

    epochs_data_t = epochs_data.transpose([1, 0, 2])
    if method in ('mean', 'median'):
        if not check_version('sklearn'):
            with pytest.raises(ImportError, match='No module'):
                Scaler(info, method)
            return

    if info:
        info = epochs.info
    scaler = Scaler(info, method)
    X = scaler.fit_transform(epochs_data, y)
    assert_equal(X.shape, epochs_data.shape)
    if method is None or isinstance(method, dict):
        sd = DEFAULTS['scalings'] if method is None else method
        stds = np.zeros(len(picks))
        for key in ('mag', 'grad'):
            stds[pick_types(epochs.info, meg=key)] = 1. / sd[key]
        stds[pick_types(epochs.info, meg=False, eeg=True)] = 1. / sd['eeg']
        means = np.zeros(len(epochs.ch_names))
    elif method == 'mean':
        stds = np.array([np.std(ch_data) for ch_data in epochs_data_t])
        means = np.array([np.mean(ch_data) for ch_data in epochs_data_t])
    else:  # median
        percs = np.array([
            np.percentile(ch_data, [25, 50, 75]) for ch_data in epochs_data_t
        ])
        stds = percs[:, 2] - percs[:, 0]
        means = percs[:, 1]
    assert_allclose(X * stds[:, np.newaxis] + means[:, np.newaxis],
                    epochs_data,
                    rtol=1e-12,
                    atol=1e-20,
                    err_msg=method)

    X2 = scaler.fit(epochs_data, y).transform(epochs_data)
    assert_array_equal(X, X2)

    # inverse_transform
    Xi = scaler.inverse_transform(X)
    assert_array_almost_equal(epochs_data, Xi)

    # Test init exception
    pytest.raises(ValueError, Scaler, None, None)
    pytest.raises(TypeError, scaler.fit, epochs, y)
    pytest.raises(TypeError, scaler.transform, epochs)
    epochs_bad = Epochs(raw,
                        events,
                        event_id,
                        0,
                        0.01,
                        baseline=None,
                        picks=np.arange(len(raw.ch_names)))  # non-data chs
    scaler = Scaler(epochs_bad.info, None)
    pytest.raises(ValueError, scaler.fit, epochs_bad.get_data(), y)
예제 #58
0
def test_compute_tfr():
    """Test _compute_tfr function."""
    # Set parameters
    event_id = 1
    tmin = -0.2
    tmax = 0.498  # Allows exhaustive decimation testing

    # Setup for reading the raw data
    raw = read_raw_fif(raw_fname)
    events = read_events(event_fname)

    exclude = raw.info['bads'] + ['MEG 2443', 'EEG 053']  # bads + 2 more

    # picks MEG gradiometers
    picks = pick_types(raw.info,
                       meg='grad',
                       eeg=False,
                       stim=False,
                       include=[],
                       exclude=exclude)

    picks = picks[:2]
    epochs = Epochs(raw, events, event_id, tmin, tmax, picks=picks)
    data = epochs.get_data()
    sfreq = epochs.info['sfreq']
    freqs = np.arange(10, 20, 3).astype(float)

    # Check all combination of options
    for func, use_fft, zero_mean, output in product(
        (tfr_array_multitaper, tfr_array_morlet), (False, True), (False, True),
        ('complex', 'power', 'phase', 'avg_power_itc', 'avg_power', 'itc')):
        # Check exception
        if (func == tfr_array_multitaper) and (output == 'phase'):
            pytest.raises(NotImplementedError,
                          func,
                          data,
                          sfreq=sfreq,
                          freqs=freqs,
                          output=output)
            continue

        # Check runs
        out = func(data,
                   sfreq=sfreq,
                   freqs=freqs,
                   use_fft=use_fft,
                   zero_mean=zero_mean,
                   n_cycles=2.,
                   output=output)
        # Check shapes
        shape = np.r_[data.shape[:2], len(freqs), data.shape[2]]
        if ('avg' in output) or ('itc' in output):
            assert_array_equal(shape[1:], out.shape)
        else:
            assert_array_equal(shape, out.shape)

        # Check types
        if output in ('complex', 'avg_power_itc'):
            assert_equal(np.complex128, out.dtype)
        else:
            assert_equal(np.float64, out.dtype)
        assert (np.all(np.isfinite(out)))

    # Check errors params
    for _data in (None, 'foo', data[0]):
        pytest.raises(ValueError, _compute_tfr, _data, freqs, sfreq)
    for _freqs in (None, 'foo', [[0]]):
        pytest.raises(ValueError, _compute_tfr, data, _freqs, sfreq)
    for _sfreq in (None, 'foo'):
        pytest.raises(ValueError, _compute_tfr, data, freqs, _sfreq)
    for key in ('output', 'method', 'use_fft', 'decim', 'n_jobs'):
        for value in (None, 'foo'):
            kwargs = {key: value}  # FIXME pep8
            pytest.raises(ValueError, _compute_tfr, data, freqs, sfreq,
                          **kwargs)
    with pytest.raises(ValueError, match='above Nyquist'):
        _compute_tfr(data, [sfreq], sfreq)

    # No time_bandwidth param in morlet
    pytest.raises(ValueError,
                  _compute_tfr,
                  data,
                  freqs,
                  sfreq,
                  method='morlet',
                  time_bandwidth=1)
    # No phase in multitaper XXX Check ?
    pytest.raises(NotImplementedError,
                  _compute_tfr,
                  data,
                  freqs,
                  sfreq,
                  method='multitaper',
                  output='phase')

    # Inter-trial coherence tests
    out = _compute_tfr(data, freqs, sfreq, output='itc', n_cycles=2.)
    assert np.sum(out >= 1) == 0
    assert np.sum(out <= 0) == 0

    # Check decim shapes
    # 2: multiple of len(times) even
    # 3: multiple odd
    # 8: not multiple, even
    # 9: not multiple, odd
    for decim in (2, 3, 8, 9, slice(0, 2), slice(1, 3), slice(2, 4)):
        _decim = slice(None, None, decim) if isinstance(decim, int) else decim
        n_time = len(np.arange(data.shape[2])[_decim])
        shape = np.r_[data.shape[:2], len(freqs), n_time]
        for method in ('multitaper', 'morlet'):
            # Single trials
            out = _compute_tfr(data,
                               freqs,
                               sfreq,
                               method=method,
                               decim=decim,
                               n_cycles=2.)
            assert_array_equal(shape, out.shape)
            # Averages
            out = _compute_tfr(data,
                               freqs,
                               sfreq,
                               method=method,
                               decim=decim,
                               output='avg_power',
                               n_cycles=2.)
            assert_array_equal(shape[1:], out.shape)
# to speed up this example; we recommend oct6 in actual analyses)
src = setup_source_space(subject, subjects_dir=subjects_dir,
                         spacing='oct5', add_dist=False)

# Setup a volume source space
# set pos=10.0 for speed, not very accurate; we recommend something smaller
# like 5.0 in actual analyses:
vol_src = setup_volume_source_space(
    subject, mri=fname_aseg, pos=10.0, bem=fname_model,
    add_interpolator=False,  # just for speed, usually use True
    volume_label=labels_vol, subjects_dir=subjects_dir)
# Generate the mixed source space
src += vol_src

# Load data
raw = read_raw_fif(fname_raw, preload=True)
events = mne.find_events(raw)
raw.pick_types(meg=True, eeg=False, eog=True)
noise_cov = mne.read_cov(fname_cov)

# compute the fwd matrix
fwd = make_forward_solution(raw.info, fname_trans, src, fname_bem,
                            mindist=5.0)  # ignore sources<=5mm from innerskull
del src

# Define epochs for left-auditory condition
event_id, tmin, tmax = 1, -0.2, 0.5
reject = dict(mag=4e-12, grad=4000e-13, eog=150e-6)
epochs = mne.Epochs(raw, events, event_id, tmin, tmax,
                    reject=reject, preload=False)
del raw
예제 #60
0
    os.mkdir(os.path.join(output_path))

# files to be analysed

files = sorted(glob.glob(os.path.join(data_path, '*-raw.fif')))

# === LOOP THROUGH FILES AND EXTRACT EPOCHS =========================
for file in files:
    # --- 1) set up paths and file names -----------------------
    filepath, filename = os.path.split(file)
    # subject in question
    subj = re.findall(r'\d+', filename)[0]

    # --- 2) Read in the data ----------------------------------

    raw = read_raw_fif(file, preload=True)

    # sampling freq
    sfreq = raw.info['sfreq']

    # --- 3) RECODE EVENTS -----------------------------------------
    #  Get events
    evs = mne.find_events(raw,
                          stim_channel='Status',
                          output='onset',
                          min_duration=0.002)
    # Copy of events
    new_evs = evs.copy()
    broken = []
    trial = 0
    rt = np.zeros((1248, 4))