Esempio n. 1
1
def test_saving_picked():
    """Test saving picked CTF instances."""
    temp_dir = _TempDir()
    out_fname = op.join(temp_dir, 'test_py_raw.fif')
    raw = read_raw_ctf(op.join(ctf_dir, ctf_fname_1_trial))
    raw.crop(0, 1).load_data()
    assert raw.compensation_grade == get_current_comp(raw.info) == 0
    assert len(raw.info['comps']) == 5
    pick_kwargs = dict(meg=True, ref_meg=False, verbose=True)
    for comp_grade in [0, 1]:
        raw.apply_gradient_compensation(comp_grade)
        with catch_logging() as log:
            raw_pick = raw.copy().pick_types(**pick_kwargs)
        assert len(raw.info['comps']) == 5
        assert len(raw_pick.info['comps']) == 0
        log = log.getvalue()
        assert 'Removing 5 compensators' in log
        raw_pick.save(out_fname, overwrite=True)  # should work
        raw2 = read_raw_fif(out_fname)
        assert (raw_pick.ch_names == raw2.ch_names)
        assert_array_equal(raw_pick.times, raw2.times)
        assert_allclose(raw2[0:20][0], raw_pick[0:20][0], rtol=1e-6,
                        atol=1e-20)  # atol is very small but > 0

        raw2 = read_raw_fif(out_fname, preload=True)
        assert (raw_pick.ch_names == raw2.ch_names)
        assert_array_equal(raw_pick.times, raw2.times)
        assert_allclose(raw2[0:20][0], raw_pick[0:20][0], rtol=1e-6,
                        atol=1e-20)  # atol is very small but > 0
Esempio n. 2
0
def test_split_files():
    """Test writing and reading of split raw files
    """
    tempdir = _TempDir()
    raw_1 = Raw(fif_fname, preload=True)
    assert_allclose(raw_1.info['buffer_size_sec'], 10., atol=1e-2)  # samp rate
    split_fname = op.join(tempdir, 'split_raw.fif')
    raw_1.save(split_fname, buffer_size_sec=1.0, split_size='10MB')

    raw_2 = Raw(split_fname)
    assert_allclose(raw_2.info['buffer_size_sec'], 1., atol=1e-2)  # samp rate
    data_1, times_1 = raw_1[:, :]
    data_2, times_2 = raw_2[:, :]
    assert_array_equal(data_1, data_2)
    assert_array_equal(times_1, times_2)

    # test the case where the silly user specifies the split files
    fnames = [split_fname]
    fnames.extend(sorted(glob.glob(op.join(tempdir, 'split_raw-*.fif'))))
    with warnings.catch_warnings(record=True):
        warnings.simplefilter('always')
        raw_2 = Raw(fnames)
    data_2, times_2 = raw_2[:, :]
    assert_array_equal(data_1, data_2)
    assert_array_equal(times_1, times_2)
def test_io_inverse_operator():
    """Test IO of inverse_operator
    """
    tempdir = _TempDir()
    inverse_operator = read_inverse_operator(fname_inv)
    x = repr(inverse_operator)
    assert_true(x)
    assert_true(isinstance(inverse_operator['noise_cov'], Covariance))
    # just do one example for .gz, as it should generalize
    _compare_io(inverse_operator, '.gz')

    # test warnings on bad filenames
    with warnings.catch_warnings(record=True) as w:
        warnings.simplefilter('always')
        inv_badname = op.join(tempdir, 'test-bad-name.fif.gz')
        write_inverse_operator(inv_badname, inverse_operator)
        read_inverse_operator(inv_badname)
    assert_naming(w, 'test_inverse.py', 2)

    # make sure we can write and read
    inv_fname = op.join(tempdir, 'test-inv.fif')
    args = (10, 1. / 9., 'dSPM')
    inv_prep = prepare_inverse_operator(inverse_operator, *args)
    write_inverse_operator(inv_fname, inv_prep)
    inv_read = read_inverse_operator(inv_fname)
    _compare(inverse_operator, inv_read)
    inv_read_prep = prepare_inverse_operator(inv_read, *args)
    _compare(inv_prep, inv_read_prep)
    inv_prep_prep = prepare_inverse_operator(inv_prep, *args)
    _compare(inv_prep, inv_prep_prep)
Esempio n. 4
0
def test_source_space_from_label():
    """Test generating a source space from volume label."""
    tempdir = _TempDir()
    aseg_fname = op.join(subjects_dir, 'sample', 'mri', 'aseg.mgz')
    label_names = get_volume_labels_from_aseg(aseg_fname)
    volume_label = label_names[int(np.random.rand() * len(label_names))]

    # Test pos as dict
    pos = dict()
    pytest.raises(ValueError, setup_volume_source_space, 'sample', pos=pos,
                  volume_label=volume_label, mri=aseg_fname)

    # Test no mri provided
    pytest.raises(RuntimeError, setup_volume_source_space, 'sample', mri=None,
                  volume_label=volume_label)

    # Test invalid volume label
    pytest.raises(ValueError, setup_volume_source_space, 'sample',
                  volume_label='Hello World!', mri=aseg_fname)

    src = setup_volume_source_space('sample', subjects_dir=subjects_dir,
                                    volume_label=volume_label, mri=aseg_fname,
                                    add_interpolator=False)
    assert_equal(volume_label, src[0]['seg_name'])

    # test reading and writing
    out_name = op.join(tempdir, 'temp-src.fif')
    write_source_spaces(out_name, src)
    src_from_file = read_source_spaces(out_name)
    _compare_source_spaces(src, src_from_file, mode='approx')
Esempio n. 5
0
def test_preload_modify():
    """Test preloading and modifying data
    """
    tempdir = _TempDir()
    for preload in [False, True, 'memmap.dat']:
        raw = Raw(fif_fname, preload=preload)

        nsamp = raw.last_samp - raw.first_samp + 1
        picks = pick_types(raw.info, meg='grad', exclude='bads')

        data = rng.randn(len(picks), nsamp // 2)

        try:
            raw[picks, :nsamp // 2] = data
        except RuntimeError as err:
            if not preload:
                continue
            else:
                raise err

        tmp_fname = op.join(tempdir, 'raw.fif')
        raw.save(tmp_fname, overwrite=True)

        raw_new = Raw(tmp_fname)
        data_new, _ = raw_new[picks, :nsamp / 2]

        assert_allclose(data, data_new)
Esempio n. 6
0
def test_add_source_space_distances_limited():
    """Test adding distances to source space with a dist_limit."""
    tempdir = _TempDir()
    src = read_source_spaces(fname)
    src_new = read_source_spaces(fname)
    del src_new[0]['dist']
    del src_new[1]['dist']
    n_do = 200  # limit this for speed
    src_new[0]['vertno'] = src_new[0]['vertno'][:n_do].copy()
    src_new[1]['vertno'] = src_new[1]['vertno'][:n_do].copy()
    out_name = op.join(tempdir, 'temp-src.fif')
    try:
        add_source_space_distances(src_new, dist_limit=0.007)
    except RuntimeError:  # what we throw when scipy version is wrong
        raise SkipTest('dist_limit requires scipy > 0.13')
    write_source_spaces(out_name, src_new)
    src_new = read_source_spaces(out_name)

    for so, sn in zip(src, src_new):
        assert_array_equal(so['dist_limit'], np.array([-0.007], np.float32))
        assert_array_equal(sn['dist_limit'], np.array([0.007], np.float32))
        do = so['dist']
        dn = sn['dist']

        # clean out distances > 0.007 in C code
        do.data[do.data > 0.007] = 0
        do.eliminate_zeros()

        # make sure we have some comparable distances
        assert np.sum(do.data < 0.007) > 400

        # do comparison over the region computed
        d = (do - dn)[:sn['vertno'][n_do - 1]][:, :sn['vertno'][n_do - 1]]
        assert_allclose(np.zeros_like(d.data), d.data, rtol=0, atol=1e-6)
Esempio n. 7
0
def test_decimate():
    """Test decimation of digitizer headshapes with too many points."""
    # load headshape and convert to meters
    hsp_mm = _get_ico_surface(5)['rr'] * 100
    hsp_m = hsp_mm / 1000.

    # save headshape to a file in mm in temporary directory
    tempdir = _TempDir()
    sphere_hsp_path = op.join(tempdir, 'test_sphere.txt')
    np.savetxt(sphere_hsp_path, hsp_mm)

    # read in raw data using spherical hsp, and extract new hsp
    with warnings.catch_warnings(record=True) as w:
        raw = read_raw_kit(sqd_path, mrk_path, elp_txt_path, sphere_hsp_path)
    assert_true(any('more than' in str(ww.message) for ww in w))
    # collect headshape from raw (should now be in m)
    hsp_dec = np.array([dig['r'] for dig in raw.info['dig']])[8:]

    # with 10242 points and _decimate_points set to resolution of 5 mm, hsp_dec
    # should be a bit over 5000 points. If not, something is wrong or
    # decimation resolution has been purposefully changed
    assert_true(len(hsp_dec) > 5000)

    # should have similar size, distance from center
    dist = np.sqrt(np.sum((hsp_m - np.mean(hsp_m, axis=0))**2, axis=1))
    dist_dec = np.sqrt(np.sum((hsp_dec - np.mean(hsp_dec, axis=0))**2, axis=1))
    hsp_rad = np.mean(dist)
    hsp_dec_rad = np.mean(dist_dec)
    assert_almost_equal(hsp_rad, hsp_dec_rad, places=3)
Esempio n. 8
0
def test_watershed_bem():
    """Test mne watershed bem."""
    check_usage(mne_watershed_bem)
    # Copy necessary files to tempdir
    tempdir = _TempDir()
    mridata_path = op.join(subjects_dir, 'sample', 'mri')
    subject_path_new = op.join(tempdir, 'sample')
    mridata_path_new = op.join(subject_path_new, 'mri')
    os.mkdir(op.join(tempdir, 'sample'))
    os.mkdir(mridata_path_new)
    if op.exists(op.join(mridata_path, 'T1')):
        shutil.copytree(op.join(mridata_path, 'T1'), op.join(mridata_path_new,
                                                             'T1'))
    if op.exists(op.join(mridata_path, 'T1.mgz')):
        shutil.copyfile(op.join(mridata_path, 'T1.mgz'),
                        op.join(mridata_path_new, 'T1.mgz'))
    out_fnames = list()
    for kind in ('outer_skin', 'outer_skull', 'inner_skull'):
        out_fnames.append(op.join(subject_path_new, 'bem', 'inner_skull.surf'))
    assert not any(op.isfile(out_fname) for out_fname in out_fnames)
    with ArgvSetter(('-d', tempdir, '-s', 'sample', '-o'),
                    disable_stdout=False, disable_stderr=False):
        mne_watershed_bem.run()
    for out_fname in out_fnames:
        _, tris = read_surface(out_fname)
        assert len(tris) == 20480
Esempio n. 9
0
def test_volume_source_space():
    """Test setting up volume source spaces."""
    tempdir = _TempDir()
    src = read_source_spaces(fname_vol)
    temp_name = op.join(tempdir, 'temp-src.fif')
    surf = read_bem_surfaces(fname_bem, s_id=FIFF.FIFFV_BEM_SURF_ID_BRAIN)
    surf['rr'] *= 1e3  # convert to mm
    # The one in the testing dataset (uses bem as bounds)
    for bem, surf in zip((fname_bem, None), (None, surf)):
        src_new = setup_volume_source_space(
            'sample', pos=7.0, bem=bem, surface=surf, mri='T1.mgz',
            subjects_dir=subjects_dir)
        write_source_spaces(temp_name, src_new, overwrite=True)
        src[0]['subject_his_id'] = 'sample'  # XXX: to make comparison pass
        _compare_source_spaces(src, src_new, mode='approx')
        del src_new
        src_new = read_source_spaces(temp_name)
        _compare_source_spaces(src, src_new, mode='approx')
    pytest.raises(IOError, setup_volume_source_space, 'sample',
                  pos=7.0, bem=None, surface='foo',  # bad surf
                  mri=fname_mri, subjects_dir=subjects_dir)
    assert repr(src) == repr(src_new)
    assert src.kind == 'volume'
    # Spheres
    sphere = make_sphere_model(r0=(0., 0., 0.), head_radius=0.1,
                               relative_radii=(0.9, 1.0), sigmas=(0.33, 1.0))
    src = setup_volume_source_space(pos=10)
    src_new = setup_volume_source_space(pos=10, sphere=sphere)
    _compare_source_spaces(src, src_new, mode='exact')
    pytest.raises(ValueError, setup_volume_source_space, sphere='foo')
    # Need a radius
    sphere = make_sphere_model(head_radius=None)
    pytest.raises(ValueError, setup_volume_source_space, sphere=sphere)
Esempio n. 10
0
def test_flash_bem():
    """Test mne flash_bem."""
    check_usage(mne_flash_bem, force_help=True)
    # Using the sample dataset
    subjects_dir = op.join(sample.data_path(download=False), 'subjects')
    # Copy necessary files to tempdir
    tempdir = _TempDir()
    mridata_path = op.join(subjects_dir, 'sample', 'mri')
    subject_path_new = op.join(tempdir, 'sample')
    mridata_path_new = op.join(subject_path_new, 'mri')
    os.makedirs(op.join(mridata_path_new, 'flash'))
    os.makedirs(op.join(subject_path_new, 'bem'))
    shutil.copyfile(op.join(mridata_path, 'T1.mgz'),
                    op.join(mridata_path_new, 'T1.mgz'))
    shutil.copyfile(op.join(mridata_path, 'brain.mgz'),
                    op.join(mridata_path_new, 'brain.mgz'))
    # Copy the available mri/flash/mef*.mgz files from the dataset
    flash_path = op.join(mridata_path_new, 'flash')
    for kind in (5, 30):
        in_fname = op.join(mridata_path, 'flash', 'mef%02d.mgz' % kind)
        shutil.copyfile(in_fname, op.join(flash_path, op.basename(in_fname)))
    # Test mne flash_bem with --noconvert option
    # (since there are no DICOM Flash images in dataset)
    out_fnames = list()
    for kind in ('outer_skin', 'outer_skull', 'inner_skull'):
        out_fnames.append(op.join(subject_path_new, 'bem', 'outer_skin.surf'))
    assert not any(op.isfile(out_fname) for out_fname in out_fnames)
    with ArgvSetter(('-d', tempdir, '-s', 'sample', '-n'),
                    disable_stdout=False, disable_stderr=False):
        mne_flash_bem.run()
    # do they exist and are expected size
    for out_fname in out_fnames:
        _, tris = read_surface(out_fname)
        assert len(tris) == 5120
Esempio n. 11
0
def test_make_morph_maps():
    """Test reading and creating morph maps
    """
    # make a new fake subjects_dir
    tempdir = _TempDir()
    for subject in ('sample', 'sample_ds', 'fsaverage_ds'):
        os.mkdir(op.join(tempdir, subject))
        os.mkdir(op.join(tempdir, subject, 'surf'))
        for hemi in ['lh', 'rh']:
            args = [subject, 'surf', hemi + '.sphere.reg']
            copyfile(op.join(subjects_dir, *args),
                     op.join(tempdir, *args))

    # this should trigger the creation of morph-maps dir and create the map
    mmap = read_morph_map('fsaverage_ds', 'sample_ds', tempdir)
    mmap2 = read_morph_map('fsaverage_ds', 'sample_ds', subjects_dir)
    assert_equal(len(mmap), len(mmap2))
    for m1, m2 in zip(mmap, mmap2):
        # deal with sparse matrix stuff
        diff = (m1 - m2).data
        assert_allclose(diff, np.zeros_like(diff), atol=1e-3, rtol=0)

    # This will also trigger creation, but it's trivial
    mmap = read_morph_map('sample', 'sample', subjects_dir=tempdir)
    for mm in mmap:
        assert_true((mm - sparse.eye(mm.shape[0], mm.shape[0])).sum() == 0)
Esempio n. 12
0
def test_make_flash_bem():
    """Test computing bem from flash images."""
    tmp = _TempDir()
    bemdir = op.join(subjects_dir, 'sample', 'bem')
    flash_path = op.join(subjects_dir, 'sample', 'mri', 'flash')

    for surf in ('inner_skull', 'outer_skull', 'outer_skin'):
        copy(op.join(bemdir, surf + '.surf'), tmp)
        copy(op.join(bemdir, surf + '.tri'), tmp)
    copy(op.join(bemdir, 'inner_skull_tmp.tri'), tmp)
    copy(op.join(bemdir, 'outer_skin_from_testing.surf'), tmp)

    # This function deletes the tri files at the end.
    try:
        make_flash_bem('sample', overwrite=True, subjects_dir=subjects_dir,
                       flash_path=flash_path)
        for surf in ('inner_skull', 'outer_skull', 'outer_skin'):
            coords, faces = read_surface(op.join(bemdir, surf + '.surf'))
            surf = 'outer_skin_from_testing' if surf == 'outer_skin' else surf
            coords_c, faces_c = read_surface(op.join(tmp, surf + '.surf'))
            assert_equal(0, faces.min())
            assert_equal(coords.shape[0], faces.max() + 1)
            assert_allclose(coords, coords_c)
            assert_allclose(faces, faces_c)
    finally:
        for surf in ('inner_skull', 'outer_skull', 'outer_skin'):
            remove(op.join(bemdir, surf + '.surf'))  # delete symlinks
            copy(op.join(tmp, surf + '.tri'), bemdir)  # return deleted tri
            copy(op.join(tmp, surf + '.surf'), bemdir)  # return moved surf
        copy(op.join(tmp, 'inner_skull_tmp.tri'), bemdir)
        copy(op.join(tmp, 'outer_skin_from_testing.surf'), bemdir)
    plt.close('all')
def test_other_volume_source_spaces():
    """Test setting up other volume source spaces"""
    # these are split off because they require the MNE tools, and
    # Travis doesn't seem to like them

    # let's try the spherical one (no bem or surf supplied)
    tempdir = _TempDir()
    temp_name = op.join(tempdir, 'temp-src.fif')
    run_subprocess(['mne_volume_source_space',
                    '--grid', '7.0',
                    '--src', temp_name,
                    '--mri', fname_mri])
    src = read_source_spaces(temp_name)
    src_new = setup_volume_source_space('sample', temp_name, pos=7.0,
                                        mri=fname_mri,
                                        subjects_dir=subjects_dir)
    _compare_source_spaces(src, src_new, mode='approx')
    del src
    del src_new
    assert_raises(ValueError, setup_volume_source_space, 'sample', temp_name,
                  pos=7.0, sphere=[1., 1.], mri=fname_mri,  # bad sphere
                  subjects_dir=subjects_dir)

    # now without MRI argument, it should give an error when we try
    # to read it
    run_subprocess(['mne_volume_source_space',
                    '--grid', '7.0',
                    '--src', temp_name])
    assert_raises(ValueError, read_source_spaces, temp_name)
Esempio n. 14
0
def test_make_morph_maps():
    """Test reading and creating morph maps."""
    # make a new fake subjects_dir
    tempdir = _TempDir()
    for subject in ('sample', 'sample_ds', 'fsaverage_ds'):
        os.mkdir(op.join(tempdir, subject))
        os.mkdir(op.join(tempdir, subject, 'surf'))
        regs = ('reg', 'left_right') if subject == 'fsaverage_ds' else ('reg',)
        for hemi in ['lh', 'rh']:
            for reg in regs:
                args = [subject, 'surf', hemi + '.sphere.' + reg]
                copyfile(op.join(subjects_dir, *args),
                         op.join(tempdir, *args))

    for subject_from, subject_to, xhemi in (
            ('fsaverage_ds', 'sample_ds', False),
            ('fsaverage_ds', 'fsaverage_ds', True)):
        # trigger the creation of morph-maps dir and create the map
        with warnings.catch_warnings(record=True):
            mmap = read_morph_map(subject_from, subject_to, tempdir,
                                  xhemi=xhemi)
        mmap2 = read_morph_map(subject_from, subject_to, subjects_dir,
                               xhemi=xhemi)
        assert_equal(len(mmap), len(mmap2))
        for m1, m2 in zip(mmap, mmap2):
            # deal with sparse matrix stuff
            diff = (m1 - m2).data
            assert_allclose(diff, np.zeros_like(diff), atol=1e-3, rtol=0)

    # This will also trigger creation, but it's trivial
    with warnings.catch_warnings(record=True):
        mmap = read_morph_map('sample', 'sample', subjects_dir=tempdir)
    for mm in mmap:
        assert_true((mm - sparse.eye(mm.shape[0], mm.shape[0])).sum() == 0)
Esempio n. 15
0
def test_tempdir():
    """Test TempDir."""
    tempdir2 = _TempDir()
    assert_true(op.isdir(tempdir2))
    x = str(tempdir2)
    del tempdir2
    assert_true(not op.isdir(x))
Esempio n. 16
0
def test_read_segment():
    """Test writing raw eeg files when preload is False
    """
    tempdir = _TempDir()
    raw1 = read_raw_brainvision(vhdr_path, eog=eog, preload=False)
    raw1_file = op.join(tempdir, 'test1-raw.fif')
    raw1.save(raw1_file, overwrite=True)
    raw11 = Raw(raw1_file, preload=True)
    data1, times1 = raw1[:, :]
    data11, times11 = raw11[:, :]
    assert_array_almost_equal(data1, data11, 8)
    assert_array_almost_equal(times1, times11)
    assert_equal(sorted(raw1.info.keys()), sorted(raw11.info.keys()))

    raw2 = read_raw_brainvision(vhdr_path, eog=eog, preload=True)
    raw2_file = op.join(tempdir, 'test2-raw.fif')
    raw2.save(raw2_file, overwrite=True)
    data2, times2 = raw2[:, :]
    assert_array_equal(data1, data2)
    assert_array_equal(times1, times2)

    raw1 = Raw(raw1_file, preload=True)
    raw2 = Raw(raw2_file, preload=True)
    assert_array_equal(raw1._data, raw2._data)

    # save with buffer size smaller than file
    raw3_file = op.join(tempdir, 'test3-raw.fif')
    raw3 = read_raw_brainvision(vhdr_path, eog=eog)
    raw3.save(raw3_file, buffer_size_sec=2)
    raw3 = Raw(raw3_file, preload=True)
    assert_array_equal(raw3._data, raw1._data)
Esempio n. 17
0
def test_read_segment():
    """Test writing raw edf files when preload is False
    """
    tempdir = _TempDir()
    raw1 = read_raw_edf(edf_path, stim_channel=139, preload=False)
    raw1_file = op.join(tempdir, 'test1-raw.fif')
    raw1.save(raw1_file, overwrite=True, buffer_size_sec=1)
    raw11 = Raw(raw1_file, preload=True)
    data1, times1 = raw1[:139, :]
    data11, times11 = raw11[:139, :]
    assert_allclose(data1, data11, rtol=1e-6)
    assert_array_almost_equal(times1, times11)
    assert_equal(sorted(raw1.info.keys()), sorted(raw11.info.keys()))

    raw2 = read_raw_edf(edf_path, stim_channel=139, preload=True)
    raw2_file = op.join(tempdir, 'test2-raw.fif')
    raw2.save(raw2_file, overwrite=True)
    data2, times2 = raw2[:139, :]
    assert_allclose(data1, data2, rtol=1e-6)
    assert_array_equal(times1, times2)

    raw1 = Raw(raw1_file, preload=True)
    raw2 = Raw(raw2_file, preload=True)
    assert_array_equal(raw1._data, raw2._data)

    # test the _read_segment function by only loading some of the data
    raw1 = read_raw_edf(edf_path, preload=False)
    raw2 = read_raw_edf(edf_path, preload=True)

    # select some random range of data to compare
    data1, times1 = raw1[:, 345:417]
    data2, times2 = raw2[:, 345:417]
    assert_array_equal(data1, data2)
    assert_array_equal(times1, times2)
Esempio n. 18
0
def test_edf_data():
    """Test edf files"""
    _test_raw_reader(read_raw_edf, input_fname=edf_path, stim_channel=None)
    raw_py = read_raw_edf(edf_path, preload=True)
    # Test saving and loading when annotations were parsed.
    tempdir = _TempDir()
    raw_file = op.join(tempdir, 'test-raw.fif')
    raw_py.save(raw_file, overwrite=True, buffer_size_sec=1)
    Raw(raw_file, preload=True)

    edf_events = find_events(raw_py, output='step', shortest_event=0,
                             stim_channel='STI 014')

    # onset, duration, id
    events = [[0.1344, 0.2560, 2],
              [0.3904, 1.0000, 2],
              [2.0000, 0.0000, 3],
              [2.5000, 2.5000, 2]]
    events = np.array(events)
    events[:, :2] *= 512  # convert time to samples
    events = np.array(events, dtype=int)
    events[:, 1] -= 1
    events[events[:, 1] <= 0, 1] = 1
    events[:, 1] += events[:, 0]

    onsets = events[:, [0, 2]]
    offsets = events[:, [1, 2]]

    events = np.zeros((2 * events.shape[0], 3), dtype=int)
    events[0::2, [0, 2]] = onsets
    events[1::2, [0, 1]] = offsets

    assert_array_equal(edf_events, events)
Esempio n. 19
0
def test_config():
    """Test mne-python config file support"""
    tempdir = _TempDir()
    key = '_MNE_PYTHON_CONFIG_TESTING'
    value = '123456'
    old_val = os.getenv(key, None)
    os.environ[key] = value
    assert_true(get_config(key) == value)
    del os.environ[key]
    # catch the warning about it being a non-standard config key
    assert_true(len(set_config(None, None)) > 10)  # tuple of valid keys
    with warnings.catch_warnings(record=True) as w:
        warnings.simplefilter('always')
        set_config(key, None, home_dir=tempdir)
    assert_true(len(w) == 1)
    assert_true(get_config(key, home_dir=tempdir) is None)
    assert_raises(KeyError, get_config, key, raise_error=True)
    with warnings.catch_warnings(record=True):
        warnings.simplefilter('always')
        set_config(key, value, home_dir=tempdir)
        assert_true(get_config(key, home_dir=tempdir) == value)
        set_config(key, None, home_dir=tempdir)
    if old_val is not None:
        os.environ[key] = old_val
    # Check if get_config with no input returns all config
    key = 'MNE_PYTHON_TESTING_KEY'
    config = {key: value}
    with warnings.catch_warnings(record=True):  # non-standard key
        warnings.simplefilter('always')
        set_config(key, value, home_dir=tempdir)
    assert_equal(get_config(home_dir=tempdir), config)
Esempio n. 20
0
def test_kit2fiff_gui():
    """Test Kit2Fiff GUI."""
    if os.environ.get('TRAVIS_OS_NAME') == 'linux':
        raise SkipTest("Skipping on Travis for Linux due to GUI error")
    home_dir = _TempDir()
    os.environ['_MNE_GUI_TESTING_MODE'] = 'true'
    os.environ['_MNE_FAKE_HOME_DIR'] = home_dir
    try:
        with warnings.catch_warnings(record=True):  # traits warnings
            warnings.simplefilter('always')
            ui, frame = mne.gui.kit2fiff()
            assert_false(frame.model.can_save)
            assert_equal(frame.model.stim_threshold, 1.)
            frame.model.stim_threshold = 10.
            frame.model.stim_chs = 'save this!'
            # ui.dispose() should close the Traits-UI, but it opens modal
            # dialogs which interrupt the tests. This workaround triggers
            # saving of configurations without closing the window:
            frame.save_config(home_dir)

            # test setting persistence
            ui, frame = mne.gui.kit2fiff()
            assert_equal(frame.model.stim_threshold, 10.)
            assert_equal(frame.model.stim_chs, 'save this!')

            frame.model.markers.mrk1.file = mrk_pre_path
            frame.marker_panel.mrk1_obj.label = True
            frame.marker_panel.mrk1_obj.label = False
    finally:
        del os.environ['_MNE_GUI_TESTING_MODE']
        del os.environ['_MNE_FAKE_HOME_DIR']
Esempio n. 21
0
def test_n_components_and_max_pca_components_none(method):
    """Test n_components and max_pca_components=None."""
    _skip_check_picard(method)
    raw = read_raw_fif(raw_fname).crop(1.5, stop).load_data()
    events = read_events(event_name)
    picks = pick_types(raw.info, eeg=True, meg=False)
    epochs = Epochs(raw, events, event_id, tmin, tmax, picks=picks,
                    baseline=(None, 0), preload=True)

    max_pca_components = None
    n_components = None
    random_state = 12345

    tempdir = _TempDir()
    output_fname = op.join(tempdir, 'test_ica-ica.fif')
    ica = ICA(max_pca_components=max_pca_components, method=method,
              n_components=n_components, random_state=random_state)
    with pytest.warns(None):  # convergence
        ica.fit(epochs)
    ica.save(output_fname)

    ica = read_ica(output_fname)

    # ICA.fit() replaced max_pca_components, which was previously None,
    # with the appropriate integer value.
    assert_equal(ica.max_pca_components, epochs.info['nchan'])
    assert ica.n_components is None
Esempio n. 22
0
def test_render_add_sections():
    """Test adding figures/images to section.
    """
    tempdir = _TempDir()
    import matplotlib.pyplot as plt

    report = Report(subjects_dir=subjects_dir)
    # Check add_figs_to_section functionality
    fig = plt.plot([1, 2], [1, 2])[0].figure
    report.add_figs_to_section(figs=fig,  # test non-list input
                               captions=['evoked response'], scale=1.2,
                               image_format='svg')
    assert_raises(ValueError, report.add_figs_to_section, figs=[fig, fig],
                  captions='H')

    # Check add_images_to_section
    img_fname = op.join(tempdir, 'testimage.png')
    fig.savefig(img_fname)
    report.add_images_to_section(fnames=[img_fname],
                                 captions=['evoked response'])
    assert_raises(ValueError, report.add_images_to_section,
                  fnames=[img_fname, img_fname], captions='H')

    # Check deprecation of add_section
    with warnings.catch_warnings(record=True) as w:
        warnings.simplefilter('always')
        report.add_section(figs=fig,
                           captions=['evoked response'])
        assert_true(w[0].category == DeprecationWarning)
Esempio n. 23
0
def test_flash_bem():
    """Test mne flash_bem."""
    check_usage(mne_flash_bem, force_help=True)
    # Using the sample dataset
    subjects_dir = op.join(sample.data_path(download=False), 'subjects')
    # Copy necessary files to tempdir
    tempdir = _TempDir()
    mridata_path = op.join(subjects_dir, 'sample', 'mri')
    mridata_path_new = op.join(tempdir, 'sample', 'mri')
    os.makedirs(op.join(mridata_path_new, 'flash'))
    os.makedirs(op.join(tempdir, 'sample', 'bem'))
    shutil.copyfile(op.join(mridata_path, 'T1.mgz'),
                    op.join(mridata_path_new, 'T1.mgz'))
    shutil.copyfile(op.join(mridata_path, 'brain.mgz'),
                    op.join(mridata_path_new, 'brain.mgz'))
    # Copy the available mri/flash/mef*.mgz files from the dataset
    files = glob.glob(op.join(mridata_path, 'flash', 'mef*.mgz'))
    for infile in files:
        shutil.copyfile(infile, op.join(mridata_path_new, 'flash',
                                        op.basename(infile)))
    # Test mne flash_bem with --noconvert option
    # (since there are no DICOM Flash images in dataset)
    currdir = os.getcwd()
    with ArgvSetter(('-d', tempdir, '-s', 'sample', '-n'),
                    disable_stdout=False, disable_stderr=False):
        mne_flash_bem.run()
    os.chdir(currdir)
def test_read_segment():
    """Test writing raw kit files when preload is False
    """
    tempdir = _TempDir()
    raw1 = read_raw_kit(sqd_path, mrk_path, elp_path, hsp_path, stim='<',
                        preload=False)
    raw1_file = op.join(tempdir, 'test1-raw.fif')
    raw1.save(raw1_file, buffer_size_sec=.1, overwrite=True)
    raw2 = read_raw_kit(sqd_path, mrk_path, elp_path, hsp_path, stim='<',
                        preload=True)
    raw2_file = op.join(tempdir, 'test2-raw.fif')
    raw2.save(raw2_file, buffer_size_sec=.1, overwrite=True)
    data1, times1 = raw1[0, 0:1]

    raw1 = Raw(raw1_file, preload=True)
    raw2 = Raw(raw2_file, preload=True)
    assert_array_equal(raw1._data, raw2._data)
    data2, times2 = raw2[0, 0:1]
    assert_array_almost_equal(data1, data2)
    assert_array_almost_equal(times1, times2)
    raw3 = read_raw_kit(sqd_path, mrk_path, elp_path, hsp_path, stim='<',
                        preload=True)
    assert_array_almost_equal(raw1._data, raw3._data)
    raw4 = read_raw_kit(sqd_path, mrk_path, elp_path, hsp_path, stim='<',
                        preload=False)
    raw4.load_data()
    buffer_fname = op.join(tempdir, 'buffer')
    assert_array_almost_equal(raw1._data, raw4._data)
    raw5 = read_raw_kit(sqd_path, mrk_path, elp_path, hsp_path, stim='<',
                        preload=buffer_fname)
    assert_array_almost_equal(raw1._data, raw5._data)
Esempio n. 25
0
def test_read_write_info():
    """Test IO of info
    """
    tempdir = _TempDir()
    info = read_info(raw_fname)
    temp_file = op.join(tempdir, 'info.fif')
    # check for bug `#1198`
    info['dev_head_t']['trans'] = np.eye(4)
    t1 = info['dev_head_t']['trans']
    write_info(temp_file, info)
    info2 = read_info(temp_file)
    t2 = info2['dev_head_t']['trans']
    assert_true(len(info['chs']) == len(info2['chs']))
    assert_array_equal(t1, t2)
    # proc_history (e.g., GH#1875)
    creator = u'é'
    info = read_info(chpi_fname)
    info['proc_history'][0]['creator'] = creator
    info['hpi_meas'][0]['creator'] = creator
    info['subject_info']['his_id'] = creator
    write_info(temp_file, info)
    info = read_info(temp_file)
    assert_equal(info['proc_history'][0]['creator'], creator)
    assert_equal(info['hpi_meas'][0]['creator'], creator)
    assert_equal(info['subject_info']['his_id'], creator)
Esempio n. 26
0
def test_io_cov():
    """Test IO for noise covariance matrices
    """
    tempdir = _TempDir()
    cov = read_cov(cov_fname)
    cov.save(op.join(tempdir, 'test-cov.fif'))
    cov2 = read_cov(op.join(tempdir, 'test-cov.fif'))
    assert_array_almost_equal(cov.data, cov2.data)

    cov2 = read_cov(cov_gz_fname)
    assert_array_almost_equal(cov.data, cov2.data)
    cov2.save(op.join(tempdir, 'test-cov.fif.gz'))
    cov2 = read_cov(op.join(tempdir, 'test-cov.fif.gz'))
    assert_array_almost_equal(cov.data, cov2.data)

    cov['bads'] = ['EEG 039']
    cov_sel = pick_channels_cov(cov, exclude=cov['bads'])
    assert_true(cov_sel['dim'] == (len(cov['data']) - len(cov['bads'])))
    assert_true(cov_sel['data'].shape == (cov_sel['dim'], cov_sel['dim']))
    cov_sel.save(op.join(tempdir, 'test-cov.fif'))

    cov2 = read_cov(cov_gz_fname)
    assert_array_almost_equal(cov.data, cov2.data)
    cov2.save(op.join(tempdir, 'test-cov.fif.gz'))
    cov2 = read_cov(op.join(tempdir, 'test-cov.fif.gz'))
    assert_array_almost_equal(cov.data, cov2.data)

    # test warnings on bad filenames
    with warnings.catch_warnings(record=True) as w:
        warnings.simplefilter('always')
        cov_badname = op.join(tempdir, 'test-bad-name.fif.gz')
        write_cov(cov_badname, cov)
        read_cov(cov_badname)
    assert_true(len(w) == 2)
Esempio n. 27
0
def test_cov_estimation_on_raw_segment():
    """Test estimation from raw on continuous recordings (typically empty room)
    """
    tempdir = _TempDir()
    raw = Raw(raw_fname, preload=False)
    cov = compute_raw_data_covariance(raw)
    cov_mne = read_cov(erm_cov_fname)
    assert_true(cov_mne.ch_names == cov.ch_names)
    assert_true(linalg.norm(cov.data - cov_mne.data, ord='fro')
                / linalg.norm(cov.data, ord='fro') < 1e-4)

    # test IO when computation done in Python
    cov.save(op.join(tempdir, 'test-cov.fif'))  # test saving
    cov_read = read_cov(op.join(tempdir, 'test-cov.fif'))
    assert_true(cov_read.ch_names == cov.ch_names)
    assert_true(cov_read.nfree == cov.nfree)
    assert_array_almost_equal(cov.data, cov_read.data)

    # test with a subset of channels
    picks = pick_channels(raw.ch_names, include=raw.ch_names[:5])
    cov = compute_raw_data_covariance(raw, picks=picks)
    assert_true(cov_mne.ch_names[:5] == cov.ch_names)
    assert_true(linalg.norm(cov.data - cov_mne.data[picks][:, picks],
                ord='fro') / linalg.norm(cov.data, ord='fro') < 1e-4)
    # make sure we get a warning with too short a segment
    raw_2 = raw.crop(0, 1)
    with warnings.catch_warnings(record=True) as w:
        warnings.simplefilter('always')
        cov = compute_raw_data_covariance(raw_2)
    assert_true(len(w) == 1)
Esempio n. 28
0
def test_make_forward_solution_sphere():
    """Test making a forward solution with a sphere model."""
    temp_dir = _TempDir()
    fname_src_small = op.join(temp_dir, 'sample-oct-2-src.fif')
    src = setup_source_space('sample', 'oct2', subjects_dir=subjects_dir,
                             add_dist=False)
    write_source_spaces(fname_src_small, src)  # to enable working with MNE-C
    out_name = op.join(temp_dir, 'tmp-fwd.fif')
    run_subprocess(['mne_forward_solution', '--meg', '--eeg',
                    '--meas', fname_raw, '--src', fname_src_small,
                    '--mri', fname_trans, '--fwd', out_name])
    fwd = read_forward_solution(out_name)
    sphere = make_sphere_model(verbose=True)
    fwd_py = make_forward_solution(fname_raw, fname_trans, src, sphere,
                                   meg=True, eeg=True, verbose=True)
    _compare_forwards(fwd, fwd_py, 366, 108,
                      meg_rtol=5e-1, meg_atol=1e-6,
                      eeg_rtol=5e-1, eeg_atol=5e-1)
    # Since the above is pretty lax, let's check a different way
    for meg, eeg in zip([True, False], [False, True]):
        fwd_ = pick_types_forward(fwd, meg=meg, eeg=eeg)
        fwd_py_ = pick_types_forward(fwd, meg=meg, eeg=eeg)
        assert_allclose(np.corrcoef(fwd_['sol']['data'].ravel(),
                                    fwd_py_['sol']['data'].ravel())[0, 1],
                        1.0, rtol=1e-3)
Esempio n. 29
0
def test_make_scalp_surfaces():
    """Test mne make_scalp_surfaces"""
    check_usage(mne_make_scalp_surfaces)
    # Copy necessary files to avoid FreeSurfer call
    tempdir = _TempDir()
    surf_path = op.join(subjects_dir, 'sample', 'surf')
    surf_path_new = op.join(tempdir, 'sample', 'surf')
    os.mkdir(op.join(tempdir, 'sample'))
    os.mkdir(surf_path_new)
    os.mkdir(op.join(tempdir, 'sample', 'bem'))
    shutil.copy(op.join(surf_path, 'lh.seghead'), surf_path_new)

    orig_fs = os.getenv('FREESURFER_HOME', None)
    orig_mne = os.getenv('MNE_ROOT')
    if orig_fs is not None:
        del os.environ['FREESURFER_HOME']
    cmd = ('-s', 'sample', '--subjects-dir', tempdir)
    os.environ['_MNE_TESTING_SCALP'] = 'true'
    try:
        with ArgvSetter(cmd, disable_stdout=False, disable_stderr=False):
            assert_raises(RuntimeError, mne_make_scalp_surfaces.run)
            os.environ['FREESURFER_HOME'] = tempdir  # don't need it
            del os.environ['MNE_ROOT']
            assert_raises(RuntimeError, mne_make_scalp_surfaces.run)
            os.environ['MNE_ROOT'] = orig_mne
            mne_make_scalp_surfaces.run()
            assert_raises(IOError, mne_make_scalp_surfaces.run)  # no overwrite
    finally:
        if orig_fs is not None:
            os.environ['FREESURFER_HOME'] = orig_fs
        os.environ['MNE_ROOT'] = orig_mne
        del os.environ['_MNE_TESTING_SCALP']
Esempio n. 30
0
def test_mne_c_design():
    """Test MNE-C filter design."""
    tempdir = _TempDir()
    temp_fname = op.join(tempdir, 'test_raw.fif')
    out_fname = op.join(tempdir, 'test_c_raw.fif')
    x = np.zeros((1, 10001))
    x[0, 5000] = 1.
    time_sl = slice(5000 - 4096, 5000 + 4097)
    sfreq = 1000.
    RawArray(x, create_info(1, sfreq, 'eeg')).save(temp_fname)

    tols = dict(rtol=1e-4, atol=1e-4)
    cmd = ('mne_process_raw', '--projoff', '--raw', temp_fname,
           '--save', out_fname)
    run_subprocess(cmd)
    h = design_mne_c_filter(sfreq, None, 40)
    h_c = read_raw_fif(out_fname)[0][0][0][time_sl]
    assert_allclose(h, h_c, **tols)

    run_subprocess(cmd + ('--highpass', '5', '--highpassw', '2.5'))
    h = design_mne_c_filter(sfreq, 5, 40, 2.5)
    h_c = read_raw_fif(out_fname)[0][0][0][time_sl]
    assert_allclose(h, h_c, **tols)

    run_subprocess(cmd + ('--lowpass', '1000', '--highpass', '10'))
    h = design_mne_c_filter(sfreq, 10, None, verbose=True)
    h_c = read_raw_fif(out_fname)[0][0][0][time_sl]
    assert_allclose(h, h_c, **tols)
Esempio n. 31
0
def test_bads_reading():
    bids_root = _TempDir()
    bids_path = _bids_path.copy().update(root=bids_root)
    data_path = BIDSPath(subject=subject_id,
                         session=session_id,
                         datatype='meg',
                         root=bids_root).mkdir().directory
    ch_path = (bids_path.copy().update(suffix='channels', extension='.tsv'))
    channels_fname = op.join(data_path, ch_path.basename)

    raw_bids_fname = (bids_path.copy().update(root=bids_root,
                                              datatype='meg',
                                              suffix='meg',
                                              extension='.fif'))
    raw = _read_raw_fif(raw_fname, verbose=False)

    ###########################################################################
    # bads in FIF only, no `status` column in channels.tsv
    bads = ['EEG 053', 'MEG 2443']
    raw.info['bads'] = bads
    write_raw_bids(raw, bids_path, overwrite=True, verbose=False)

    # Delete `status` column
    tsv_data = _from_tsv(channels_fname)
    del tsv_data['status'], tsv_data['status_description']
    _to_tsv(tsv_data, fname=channels_fname)

    raw = read_raw_bids(bids_path=bids_path, verbose=False)
    assert raw.info['bads'] == bads

    ###########################################################################
    # bads in `status` column in channels.tsv, no bads in raw.info['bads']
    bads = ['EEG 053', 'MEG 2443']
    raw.info['bads'] = bads
    write_raw_bids(raw, bids_path, overwrite=True, verbose=False)

    # Remove info['bads'] from the raw file.
    raw = _read_raw_fif(raw_bids_fname, preload=True, verbose=False)
    raw.info['bads'] = []
    raw.save(raw_bids_fname, overwrite=True, verbose=False)

    raw = read_raw_bids(bids_path=bids_path, verbose=False)
    assert type(raw.info['bads']) is list
    assert set(raw.info['bads']) == set(bads)

    ###########################################################################
    # Different bads in `status` column and raw.info['bads']
    bads_bids = ['EEG 053', 'MEG 2443']
    bads_raw = ['MEG 0112', 'MEG 0131']

    raw.info['bads'] = bads_bids
    write_raw_bids(raw, bids_path, overwrite=True, verbose=False)

    # Replace info['bads'] in the raw file.
    raw = _read_raw_fif(raw_bids_fname, preload=True, verbose=False)
    raw.info['bads'] = bads_raw
    raw.save(raw_bids_fname, overwrite=True, verbose=False)

    with pytest.warns(RuntimeWarning, match='conflicting information'):
        raw = read_raw_bids(bids_path=bids_path, verbose=False)
    assert type(raw.info['bads']) is list
    assert set(raw.info['bads']) == set(bads_bids)
Esempio n. 32
0
def test_simulate_raw_sphere():
    """Test simulation of raw data with sphere model"""
    seed = 42
    raw, src, stc, trans, sphere = _get_data()
    assert_true(len(pick_types(raw.info, meg=False, ecg=True)) == 1)

    # head pos
    head_pos_sim = dict()
    # these will be at 1., 2., ... sec
    shifts = [[0.001, 0., -0.001], [-0.001, 0.001, 0.]]

    for time_key, shift in enumerate(shifts):
        # Create 4x4 matrix transform and normalize
        temp_trans = deepcopy(raw.info['dev_head_t'])
        temp_trans['trans'][:3, 3] += shift
        head_pos_sim[time_key + 1.] = temp_trans['trans']

    #
    # Test raw simulation with basic parameters
    #
    raw_sim = simulate_raw(raw, stc, trans, src, sphere, read_cov(cov_fname),
                           head_pos=head_pos_sim,
                           blink=True, ecg=True, random_state=seed)
    raw_sim_2 = simulate_raw(raw, stc, trans_fname, src_fname, sphere,
                             cov_fname, head_pos=head_pos_sim,
                             blink=True, ecg=True, random_state=seed)
    assert_array_equal(raw_sim_2[:][0], raw_sim[:][0])
    # Test IO on processed data
    tempdir = _TempDir()
    test_outname = op.join(tempdir, 'sim_test_raw.fif')
    raw_sim.save(test_outname)

    raw_sim_loaded = Raw(test_outname, preload=True, proj=False)
    assert_allclose(raw_sim_loaded[:][0], raw_sim[:][0], rtol=1e-6, atol=1e-20)
    del raw_sim, raw_sim_2
    # with no cov (no noise) but with artifacts, most time periods should match
    # but the EOG/ECG channels should not
    for ecg, eog in ((True, False), (False, True), (True, True)):
        raw_sim_3 = simulate_raw(raw, stc, trans, src, sphere,
                                 cov=None, head_pos=head_pos_sim,
                                 blink=eog, ecg=ecg, random_state=seed)
        raw_sim_4 = simulate_raw(raw, stc, trans, src, sphere,
                                 cov=None, head_pos=head_pos_sim,
                                 blink=False, ecg=False, random_state=seed)
        picks = np.arange(len(raw.ch_names))
        diff_picks = pick_types(raw.info, meg=False, ecg=ecg, eog=eog)
        these_picks = np.setdiff1d(picks, diff_picks)
        close = isclose(raw_sim_3[these_picks][0],
                        raw_sim_4[these_picks][0], atol=1e-20)
        assert_true(np.mean(close) > 0.7)
        far = ~isclose(raw_sim_3[diff_picks][0],
                       raw_sim_4[diff_picks][0], atol=1e-20)
        assert_true(np.mean(far) > 0.99)
    del raw_sim_3, raw_sim_4

    # make sure it works with EEG-only and MEG-only
    raw_sim_meg = simulate_raw(raw.copy().pick_types(meg=True, eeg=False),
                               stc, trans, src, sphere, cov=None,
                               ecg=True, blink=True, random_state=seed)
    raw_sim_eeg = simulate_raw(raw.copy().pick_types(meg=False, eeg=True),
                               stc, trans, src, sphere, cov=None,
                               ecg=True, blink=True, random_state=seed)
    raw_sim_meeg = simulate_raw(raw.copy().pick_types(meg=True, eeg=True),
                                stc, trans, src, sphere, cov=None,
                                ecg=True, blink=True, random_state=seed)
    assert_allclose(np.concatenate((raw_sim_meg[:][0], raw_sim_eeg[:][0])),
                    raw_sim_meeg[:][0], rtol=1e-7, atol=1e-20)
    del raw_sim_meg, raw_sim_eeg, raw_sim_meeg

    # check that different interpolations are similar given small movements
    raw_sim_cos = simulate_raw(raw, stc, trans, src, sphere,
                               head_pos=head_pos_sim,
                               random_state=seed)
    raw_sim_lin = simulate_raw(raw, stc, trans, src, sphere,
                               head_pos=head_pos_sim, interp='linear',
                               random_state=seed)
    assert_allclose(raw_sim_cos[:][0], raw_sim_lin[:][0],
                    rtol=1e-5, atol=1e-20)
    del raw_sim_cos, raw_sim_lin

    # Make impossible transform (translate up into helmet) and ensure failure
    head_pos_sim_err = deepcopy(head_pos_sim)
    head_pos_sim_err[1.][2, 3] -= 0.1  # z trans upward 10cm
    assert_raises(RuntimeError, simulate_raw, raw, stc, trans, src, sphere,
                  ecg=False, blink=False, head_pos=head_pos_sim_err)
    assert_raises(RuntimeError, simulate_raw, raw, stc, trans, src,
                  bem_fname, ecg=False, blink=False,
                  head_pos=head_pos_sim_err)
    # other degenerate conditions
    assert_raises(TypeError, simulate_raw, 'foo', stc, trans, src, sphere)
    assert_raises(TypeError, simulate_raw, raw, 'foo', trans, src, sphere)
    assert_raises(ValueError, simulate_raw, raw, stc.copy().crop(0, 0),
                  trans, src, sphere)
    stc_bad = stc.copy()
    stc_bad.tstep += 0.1
    assert_raises(ValueError, simulate_raw, raw, stc_bad, trans, src, sphere)
    assert_raises(RuntimeError, simulate_raw, raw, stc, trans, src, sphere,
                  chpi=True)  # no cHPI info
    assert_raises(ValueError, simulate_raw, raw, stc, trans, src, sphere,
                  interp='foo')
    assert_raises(TypeError, simulate_raw, raw, stc, trans, src, sphere,
                  head_pos=1.)
    assert_raises(RuntimeError, simulate_raw, raw, stc, trans, src, sphere,
                  head_pos=pos_fname)  # ends up with t>t_end
    head_pos_sim_err = deepcopy(head_pos_sim)
    head_pos_sim_err[-1.] = head_pos_sim_err[1.]  # negative time
    assert_raises(RuntimeError, simulate_raw, raw, stc, trans, src, sphere,
                  head_pos=head_pos_sim_err)
    raw_bad = raw.copy()
    raw_bad.info['dig'] = None
    assert_raises(RuntimeError, simulate_raw, raw_bad, stc, trans, src, sphere,
                  blink=True)
Esempio n. 33
0
def test_compute_proj_raw():
    """Test SSP computation on raw"""
    tempdir = _TempDir()
    # Test that the raw projectors work
    raw_time = 2.5  # Do shorter amount for speed
    raw = read_raw_fif(raw_fname).crop(0, raw_time)
    raw.load_data()
    for ii in (0.25, 0.5, 1, 2):
        with warnings.catch_warnings(record=True) as w:
            warnings.simplefilter('always')
            projs = compute_proj_raw(raw,
                                     duration=ii - 0.1,
                                     stop=raw_time,
                                     n_grad=1,
                                     n_mag=1,
                                     n_eeg=0)
            assert_true(len(w) == 1)

        # test that you can compute the projection matrix
        projs = activate_proj(projs)
        proj, nproj, U = make_projector(projs, raw.ch_names, bads=[])

        assert_true(nproj == 2)
        assert_true(U.shape[1] == 2)

        # test that you can save them
        raw.info['projs'] += projs
        raw.save(op.join(tempdir, 'foo_%d_raw.fif' % ii), overwrite=True)

    # Test that purely continuous (no duration) raw projection works
    with warnings.catch_warnings(record=True) as w:
        warnings.simplefilter('always')
        projs = compute_proj_raw(raw,
                                 duration=None,
                                 stop=raw_time,
                                 n_grad=1,
                                 n_mag=1,
                                 n_eeg=0)
        assert_equal(len(w), 1)

    # test that you can compute the projection matrix
    projs = activate_proj(projs)
    proj, nproj, U = make_projector(projs, raw.ch_names, bads=[])

    assert_true(nproj == 2)
    assert_true(U.shape[1] == 2)

    # test that you can save them
    raw.info['projs'] += projs
    raw.save(op.join(tempdir, 'foo_rawproj_continuous_raw.fif'))

    # test resampled-data projector, upsampling instead of downsampling
    # here to save an extra filtering (raw would have to be LP'ed to be equiv)
    raw_resamp = cp.deepcopy(raw)
    raw_resamp.resample(raw.info['sfreq'] * 2, n_jobs=2, npad='auto')
    with warnings.catch_warnings(record=True) as w:
        warnings.simplefilter('always')
        projs = compute_proj_raw(raw_resamp,
                                 duration=None,
                                 stop=raw_time,
                                 n_grad=1,
                                 n_mag=1,
                                 n_eeg=0)
    projs = activate_proj(projs)
    proj_new, _, _ = make_projector(projs, raw.ch_names, bads=[])
    assert_array_almost_equal(proj_new, proj, 4)

    # test with bads
    raw.load_bad_channels(bads_fname)  # adds 2 bad mag channels
    with warnings.catch_warnings(record=True) as w:
        warnings.simplefilter('always')
        projs = compute_proj_raw(raw, n_grad=0, n_mag=0, n_eeg=1)

    # test that bad channels can be excluded
    proj, nproj, U = make_projector(projs, raw.ch_names, bads=raw.ch_names)
    assert_array_almost_equal(proj, np.eye(len(raw.ch_names)))
Esempio n. 34
0
def test_compute_proj_epochs():
    """Test SSP computation on epochs."""
    tempdir = _TempDir()
    event_id, tmin, tmax = 1, -0.2, 0.3

    raw = read_raw_fif(raw_fname, preload=True)
    events = read_events(event_fname)
    bad_ch = 'MEG 2443'
    picks = pick_types(raw.info,
                       meg=True,
                       eeg=False,
                       stim=False,
                       eog=False,
                       exclude=[])
    epochs = Epochs(raw,
                    events,
                    event_id,
                    tmin,
                    tmax,
                    picks=picks,
                    baseline=None,
                    proj=False)

    evoked = epochs.average()
    projs = compute_proj_epochs(epochs, n_grad=1, n_mag=1, n_eeg=0, n_jobs=1)
    write_proj(op.join(tempdir, 'test-proj.fif.gz'), projs)
    for p_fname in [
            proj_fname, proj_gz_fname,
            op.join(tempdir, 'test-proj.fif.gz')
    ]:
        projs2 = read_proj(p_fname)

        assert_true(len(projs) == len(projs2))

        for p1, p2 in zip(projs, projs2):
            assert_true(p1['desc'] == p2['desc'])
            assert_true(p1['data']['col_names'] == p2['data']['col_names'])
            assert_true(p1['active'] == p2['active'])
            # compare with sign invariance
            p1_data = p1['data']['data'] * np.sign(p1['data']['data'][0, 0])
            p2_data = p2['data']['data'] * np.sign(p2['data']['data'][0, 0])
            if bad_ch in p1['data']['col_names']:
                bad = p1['data']['col_names'].index('MEG 2443')
                mask = np.ones(p1_data.size, dtype=np.bool)
                mask[bad] = False
                p1_data = p1_data[:, mask]
                p2_data = p2_data[:, mask]
            corr = np.corrcoef(p1_data, p2_data)[0, 1]
            assert_array_almost_equal(corr, 1.0, 5)
            if p2['explained_var']:
                assert_array_almost_equal(p1['explained_var'],
                                          p2['explained_var'])

    # test that you can compute the projection matrix
    projs = activate_proj(projs)
    proj, nproj, U = make_projector(projs, epochs.ch_names, bads=[])

    assert_true(nproj == 2)
    assert_true(U.shape[1] == 2)

    # test that you can save them
    epochs.info['projs'] += projs
    evoked = epochs.average()
    evoked.save(op.join(tempdir, 'foo-ave.fif'))

    projs = read_proj(proj_fname)

    projs_evoked = compute_proj_evoked(evoked, n_grad=1, n_mag=1, n_eeg=0)
    assert_true(len(projs_evoked) == 2)
    # XXX : test something

    # test parallelization
    projs = compute_proj_epochs(epochs,
                                n_grad=1,
                                n_mag=1,
                                n_eeg=0,
                                n_jobs=2,
                                desc_prefix='foobar')
    assert_true(all('foobar' in x['desc'] for x in projs))
    projs = activate_proj(projs)
    proj_par, _, _ = make_projector(projs, epochs.ch_names, bads=[])
    assert_allclose(proj, proj_par, rtol=1e-8, atol=1e-16)

    # test warnings on bad filenames
    with warnings.catch_warnings(record=True) as w:
        warnings.simplefilter('always')
        proj_badname = op.join(tempdir, 'test-bad-name.fif.gz')
        write_proj(proj_badname, projs)
        read_proj(proj_badname)
    assert_naming(w, 'test_proj.py', 2)
Esempio n. 35
0
def _test_raw_reader(reader, test_preloading=True, **kwargs):
    """Test reading, writing and slicing of raw classes.

    Parameters
    ----------
    reader : function
        Function to test.
    test_preloading : bool
        Whether not preloading is implemented for the reader. If True, both
        cases and memory mapping to file are tested.
    **kwargs :
        Arguments for the reader. Note: Do not use preload as kwarg.
        Use ``test_preloading`` instead.

    Returns
    -------
    raw : Instance of Raw
        A preloaded Raw object.
    """
    tempdir = _TempDir()
    rng = np.random.RandomState(0)
    if test_preloading:
        raw = reader(preload=True, **kwargs)
        # don't assume the first is preloaded
        buffer_fname = op.join(tempdir, 'buffer')
        picks = rng.permutation(np.arange(len(raw.ch_names) - 1))[:10]
        picks = np.append(picks, len(raw.ch_names) - 1)  # test trigger channel
        bnd = min(int(round(raw.buffer_size_sec * raw.info['sfreq'])),
                  raw.n_times)
        slices = [
            slice(0, bnd),
            slice(bnd - 1, bnd),
            slice(3, bnd),
            slice(3, 300),
            slice(None),
            slice(1, bnd)
        ]
        if raw.n_times >= 2 * bnd:  # at least two complete blocks
            slices += [
                slice(bnd, 2 * bnd),
                slice(bnd, bnd + 1),
                slice(0, bnd + 100)
            ]
        other_raws = [
            reader(preload=buffer_fname, **kwargs),
            reader(preload=False, **kwargs)
        ]
        for sl_time in slices:
            data1, times1 = raw[picks, sl_time]
            for other_raw in other_raws:
                data2, times2 = other_raw[picks, sl_time]
                assert_allclose(data1, data2)
                assert_allclose(times1, times2)
    else:
        raw = reader(**kwargs)

    full_data = raw._data
    assert raw.__class__.__name__ in repr(raw)  # to test repr
    assert raw.info.__class__.__name__ in repr(raw.info)

    # Test saving and reading
    out_fname = op.join(tempdir, 'test_raw.fif')
    raw = concatenate_raws([raw])
    raw.save(out_fname, tmax=raw.times[-1], overwrite=True, buffer_size_sec=1)
    raw3 = read_raw_fif(out_fname)
    assert set(raw.info.keys()) == set(raw3.info.keys())
    assert_allclose(raw3[0:20][0], full_data[0:20], rtol=1e-6,
                    atol=1e-20)  # atol is very small but > 0
    assert_array_almost_equal(raw.times, raw3.times)

    assert not math.isnan(raw3.info['highpass'])
    assert not math.isnan(raw3.info['lowpass'])
    assert not math.isnan(raw.info['highpass'])
    assert not math.isnan(raw.info['lowpass'])

    assert raw3.info['kit_system_id'] == raw.info['kit_system_id']

    # Make sure concatenation works
    first_samp = raw.first_samp
    last_samp = raw.last_samp
    concat_raw = concatenate_raws([raw.copy(), raw])
    assert_equal(concat_raw.n_times, 2 * raw.n_times)
    assert_equal(concat_raw.first_samp, first_samp)
    assert_equal(concat_raw.last_samp - last_samp + first_samp, last_samp + 1)
    idx = np.where(concat_raw.annotations.description == 'BAD boundary')[0]

    if concat_raw.info['meas_date'] is None:
        expected_bad_boundary_onset = ((last_samp - first_samp) /
                                       raw.info['sfreq'])
    else:
        expected_bad_boundary_onset = last_samp / raw.info['sfreq']

    assert_array_almost_equal(concat_raw.annotations.onset[idx],
                              expected_bad_boundary_onset,
                              decimal=2)

    if raw.info['meas_id'] is not None:
        for key in ['secs', 'usecs', 'version']:
            assert_equal(raw.info['meas_id'][key], raw3.info['meas_id'][key])
        assert_array_equal(raw.info['meas_id']['machid'],
                           raw3.info['meas_id']['machid'])
    return raw
Esempio n. 36
0
from nose.tools import assert_true
from numpy.testing import assert_array_equal, assert_equal, assert_allclose

from mne.datasets import sample
from mne import read_trans, write_trans
from mne.utils import _TempDir
from mne.transforms import _get_mri_head_t_from_trans_file, invert_transform

data_path = sample.data_path(download=False)
fname = op.join(data_path, 'MEG', 'sample', 'sample_audvis_raw-trans.fif')
fname_trans = op.join(
    op.split(__file__)[0], '..', 'fiff', 'tests', 'data',
    'sample-audvis-raw-trans.txt')

tempdir = _TempDir()


@sample.requires_sample_data
def test_get_mri_head_t():
    """Test converting '-trans.txt' to '-trans.fif'"""
    trans = read_trans(fname)
    trans = invert_transform(trans)  # starts out as head->MRI, so invert
    trans_2 = _get_mri_head_t_from_trans_file(fname_trans)
    assert_equal(trans['from'], trans_2['from'])
    assert_equal(trans['to'], trans_2['to'])
    assert_allclose(trans['trans'], trans_2['trans'], rtol=1e-5, atol=1e-5)


@sample.requires_sample_data
def test_io_trans():
Esempio n. 37
0
def test_io_events():
    """Test IO for events
    """
    tempdir = _TempDir()
    # Test binary fif IO
    events = read_events(fname)  # Use as the gold standard
    write_events(op.join(tempdir, 'events-eve.fif'), events)
    events2 = read_events(op.join(tempdir, 'events-eve.fif'))
    assert_array_almost_equal(events, events2)

    # Test binary fif.gz IO
    events2 = read_events(fname_gz)  # Use as the gold standard
    assert_array_almost_equal(events, events2)
    write_events(op.join(tempdir, 'events-eve.fif.gz'), events2)
    events2 = read_events(op.join(tempdir, 'events-eve.fif.gz'))
    assert_array_almost_equal(events, events2)

    # Test new format text file IO
    write_events(op.join(tempdir, 'events.eve'), events)
    events2 = read_events(op.join(tempdir, 'events.eve'))
    assert_array_almost_equal(events, events2)
    events2 = read_events(fname_txt_mpr)
    assert_array_almost_equal(events, events2)

    # Test old format text file IO
    events2 = read_events(fname_old_txt)
    assert_array_almost_equal(events, events2)
    write_events(op.join(tempdir, 'events.eve'), events)
    events2 = read_events(op.join(tempdir, 'events.eve'))
    assert_array_almost_equal(events, events2)

    # Test event selection
    a = read_events(op.join(tempdir, 'events-eve.fif'), include=1)
    b = read_events(op.join(tempdir, 'events-eve.fif'), include=[1])
    c = read_events(op.join(tempdir, 'events-eve.fif'),
                    exclude=[2, 3, 4, 5, 32])
    d = read_events(op.join(tempdir, 'events-eve.fif'),
                    include=1,
                    exclude=[2, 3])
    assert_array_equal(a, b)
    assert_array_equal(a, c)
    assert_array_equal(a, d)

    # Test binary file IO for 1 event
    events = read_events(fname_1)  # Use as the new gold standard
    write_events(op.join(tempdir, 'events-eve.fif'), events)
    events2 = read_events(op.join(tempdir, 'events-eve.fif'))
    assert_array_almost_equal(events, events2)

    # Test text file IO for 1 event
    write_events(op.join(tempdir, 'events.eve'), events)
    events2 = read_events(op.join(tempdir, 'events.eve'))
    assert_array_almost_equal(events, events2)

    # test warnings on bad filenames
    with warnings.catch_warnings(record=True) as w:
        warnings.simplefilter('always')
        fname2 = op.join(tempdir, 'test-bad-name.fif')
        write_events(fname2, events)
        read_events(fname2)
    assert_true(len(w) == 2)
Esempio n. 38
0
def test_plot_alignment():
    """Test plotting of -trans.fif files and MEG sensor layouts."""
    # generate fiducials file for testing
    tempdir = _TempDir()
    fiducials_path = op.join(tempdir, 'fiducials.fif')
    fid = [{
        'coord_frame': 5,
        'ident': 1,
        'kind': 1,
        'r': [-0.08061612, -0.02908875, -0.04131077]
    }, {
        'coord_frame': 5,
        'ident': 2,
        'kind': 1,
        'r': [0.00146763, 0.08506715, -0.03483611]
    }, {
        'coord_frame': 5,
        'ident': 3,
        'kind': 1,
        'r': [0.08436285, -0.02850276, -0.04127743]
    }]
    write_dig(fiducials_path, fid, 5)

    mlab = _import_mlab()
    evoked = read_evokeds(evoked_fname)[0]
    sample_src = read_source_spaces(src_fname)
    with warnings.catch_warnings(record=True):  # 4D weight tables
        bti = read_raw_bti(pdf_fname,
                           config_fname,
                           hs_fname,
                           convert=True,
                           preload=False).info
    infos = dict(
        Neuromag=evoked.info,
        CTF=read_raw_ctf(ctf_fname).info,
        BTi=bti,
        KIT=read_raw_kit(sqd_fname).info,
    )
    for system, info in infos.items():
        meg = ['helmet', 'sensors']
        if system == 'KIT':
            meg.append('ref')
        plot_alignment(info,
                       trans_fname,
                       subject='sample',
                       subjects_dir=subjects_dir,
                       meg=meg)
        mlab.close(all=True)
    # KIT ref sensor coil def is defined
    mlab.close(all=True)
    info = infos['Neuromag']
    assert_raises(TypeError,
                  plot_alignment,
                  'foo',
                  trans_fname,
                  subject='sample',
                  subjects_dir=subjects_dir)
    assert_raises(TypeError,
                  plot_alignment,
                  info,
                  trans_fname,
                  subject='sample',
                  subjects_dir=subjects_dir,
                  src='foo')
    assert_raises(ValueError,
                  plot_alignment,
                  info,
                  trans_fname,
                  subject='fsaverage',
                  subjects_dir=subjects_dir,
                  src=sample_src)
    sample_src.plot(subjects_dir=subjects_dir,
                    head=True,
                    skull=True,
                    brain='white')
    mlab.close(all=True)
    # no-head version
    mlab.close(all=True)
    # all coord frames
    assert_raises(ValueError, plot_alignment, info)
    plot_alignment(info, surfaces=[])
    for coord_frame in ('meg', 'head', 'mri'):
        plot_alignment(info,
                       meg=['helmet', 'sensors'],
                       dig=True,
                       coord_frame=coord_frame,
                       trans=trans_fname,
                       subject='sample',
                       mri_fiducials=fiducials_path,
                       subjects_dir=subjects_dir,
                       src=sample_src)
        mlab.close(all=True)
    # EEG only with strange options
    evoked_eeg_ecog = evoked.copy().pick_types(meg=False, eeg=True)
    evoked_eeg_ecog.info['projs'] = []  # "remove" avg proj
    evoked_eeg_ecog.set_channel_types({'EEG 001': 'ecog'})
    with warnings.catch_warnings(record=True) as w:
        plot_alignment(evoked_eeg_ecog.info,
                       subject='sample',
                       trans=trans_fname,
                       subjects_dir=subjects_dir,
                       surfaces=['white', 'outer_skin', 'outer_skull'],
                       meg=['helmet', 'sensors'],
                       eeg=['original', 'projected'],
                       ecog=True)
    mlab.close(all=True)
    assert_true(['Cannot plot MEG' in str(ww.message) for ww in w])

    sphere = make_sphere_model(info=evoked.info, r0='auto', head_radius='auto')
    bem_sol = read_bem_solution(
        op.join(subjects_dir, 'sample', 'bem',
                'sample-1280-1280-1280-bem-sol.fif'))
    bem_surfs = read_bem_surfaces(
        op.join(subjects_dir, 'sample', 'bem',
                'sample-1280-1280-1280-bem.fif'))
    sample_src[0]['coord_frame'] = 4  # hack for coverage
    plot_alignment(
        info,
        subject='sample',
        eeg='projected',
        meg='helmet',
        bem=sphere,
        dig=True,
        surfaces=['brain', 'inner_skull', 'outer_skull', 'outer_skin'])
    plot_alignment(info,
                   trans_fname,
                   subject='sample',
                   meg='helmet',
                   subjects_dir=subjects_dir,
                   eeg='projected',
                   bem=sphere,
                   surfaces=['head', 'brain'],
                   src=sample_src)
    plot_alignment(info,
                   trans_fname,
                   subject='sample',
                   meg=[],
                   subjects_dir=subjects_dir,
                   bem=bem_sol,
                   eeg=True,
                   surfaces=['head', 'inflated', 'outer_skull', 'inner_skull'])
    plot_alignment(info,
                   trans_fname,
                   subject='sample',
                   meg=True,
                   subjects_dir=subjects_dir,
                   surfaces=['head', 'inner_skull'],
                   bem=bem_surfs)
    sphere = make_sphere_model('auto', 'auto', evoked.info)
    src = setup_volume_source_space(sphere=sphere)
    plot_alignment(
        info,
        eeg='projected',
        meg='helmet',
        bem=sphere,
        src=src,
        dig=True,
        surfaces=['brain', 'inner_skull', 'outer_skull', 'outer_skin'])
    sphere = make_sphere_model('auto', None, evoked.info)  # one layer
    plot_alignment(info,
                   trans_fname,
                   subject='sample',
                   meg=False,
                   coord_frame='mri',
                   subjects_dir=subjects_dir,
                   surfaces=['brain'],
                   bem=sphere,
                   show_axes=True)

    # one layer bem with skull surfaces:
    assert_raises(ValueError,
                  plot_alignment,
                  info=info,
                  trans=trans_fname,
                  subject='sample',
                  subjects_dir=subjects_dir,
                  surfaces=['brain', 'head', 'inner_skull'],
                  bem=sphere)
    # wrong eeg value:
    assert_raises(ValueError,
                  plot_alignment,
                  info=info,
                  trans=trans_fname,
                  subject='sample',
                  subjects_dir=subjects_dir,
                  eeg='foo')
    # wrong meg value:
    assert_raises(ValueError,
                  plot_alignment,
                  info=info,
                  trans=trans_fname,
                  subject='sample',
                  subjects_dir=subjects_dir,
                  meg='bar')
    # multiple brain surfaces:
    assert_raises(ValueError,
                  plot_alignment,
                  info=info,
                  trans=trans_fname,
                  subject='sample',
                  subjects_dir=subjects_dir,
                  surfaces=['white', 'pial'])
    assert_raises(TypeError,
                  plot_alignment,
                  info=info,
                  trans=trans_fname,
                  subject='sample',
                  subjects_dir=subjects_dir,
                  surfaces=[1])
    assert_raises(ValueError,
                  plot_alignment,
                  info=info,
                  trans=trans_fname,
                  subject='sample',
                  subjects_dir=subjects_dir,
                  surfaces=['foo'])
    mlab.close(all=True)
Esempio n. 39
0
def test_events():
    """Test reading and modifying events"""
    tempdir = _TempDir()

    # check that events are read and stim channel is synthesized correcly
    with warnings.catch_warnings(record=True) as w:
        warnings.simplefilter('always')
        raw = read_raw_brainvision(vhdr_path, eog=eog, preload=True)
        events = raw._get_brainvision_events()
        assert_array_equal(
            events,
            [[487, 1, 253], [497, 1, 255], [1770, 1, 254], [1780, 1, 255],
             [3253, 1, 254], [3263, 1, 255], [4936, 1, 253], [4946, 1, 255],
             [6000, 1, 255], [6620, 1, 254], [6630, 1, 255]])
        assert_equal(len(w), 1)  # for dropping Sync & R255 events

    # check that events are read and stim channel is synthesized correcly and
    # response triggers are shifted like they're supposed to be.
    with warnings.catch_warnings(record=True) as w:
        warnings.simplefilter('always')
        raw = read_raw_brainvision(vhdr_path,
                                   eog=eog,
                                   preload=True,
                                   response_trig_shift=1000)
        events = raw._get_brainvision_events()
        assert_array_equal(
            events,
            [[487, 1, 253], [497, 1, 255], [1770, 1, 254], [1780, 1, 255],
             [3253, 1, 254], [3263, 1, 255], [4936, 1, 253], [4946, 1, 255],
             [6000, 1, 1255], [6620, 1, 254], [6630, 1, 255]])
        assert_equal(len(w), 1)  # for dropping Sync & R255 events

    # check that events are read and stim channel is synthesized correcly and
    # response triggers are ignored.
    with warnings.catch_warnings(record=True) as w:
        warnings.simplefilter('always')
        raw = read_raw_brainvision(vhdr_path,
                                   eog=eog,
                                   preload=True,
                                   response_trig_shift=None)
        events = raw._get_brainvision_events()
        assert_array_equal(
            events,
            [[487, 1, 253], [497, 1, 255], [1770, 1, 254], [1780, 1, 255],
             [3253, 1, 254], [3263, 1, 255], [4936, 1, 253], [4946, 1, 255],
             [6620, 1, 254], [6630, 1, 255]])
        assert_equal(len(w), 1)  # for dropping Sync & R255 events

    # check that events are read properly when event_id is specified for
    # auxiliary events
    with warnings.catch_warnings(record=True) as w:
        warnings.simplefilter('always')
        raw = read_raw_brainvision(vhdr_path,
                                   eog=eog,
                                   preload=True,
                                   response_trig_shift=None,
                                   event_id={'Sync On': 5})
        events = raw._get_brainvision_events()
        assert_array_equal(
            events,
            [[487, 1, 253], [497, 1, 255], [1770, 1, 254], [1780, 1, 255],
             [3253, 1, 254], [3263, 1, 255], [4936, 1, 253], [4946, 1, 255],
             [6620, 1, 254], [6630, 1, 255], [7630, 1, 5]])
        assert_equal(len(w), 1)  # parsing Sync event, missing R255

    assert_raises(TypeError,
                  read_raw_brainvision,
                  vhdr_path,
                  eog=eog,
                  preload=True,
                  response_trig_shift=0.1)
    assert_raises(TypeError,
                  read_raw_brainvision,
                  vhdr_path,
                  eog=eog,
                  preload=True,
                  response_trig_shift=np.nan)

    # Test that both response_trig_shit and event_id can be set
    with warnings.catch_warnings(record=True) as w:
        warnings.simplefilter('always')
        read_raw_brainvision(vhdr_path,
                             eog=eog,
                             preload=False,
                             response_trig_shift=100,
                             event_id={'Sync On': 5})

        mne_events = find_events(raw, stim_channel='STI 014')
        assert_array_equal(events[:, [0, 2]], mne_events[:, [0, 2]])
        assert_equal(len(w), 0)  # parsing the Sync event

    # modify events and check that stim channel is updated
    index = events[:, 2] == 255
    events = events[index]
    raw._set_brainvision_events(events)
    mne_events = find_events(raw, stim_channel='STI 014')
    assert_array_equal(events[:, [0, 2]], mne_events[:, [0, 2]])

    # remove events
    nchan = raw.info['nchan']
    ch_name = raw.info['chs'][-2]['ch_name']
    events = np.empty((0, 3))
    raw._set_brainvision_events(events)
    assert_equal(raw.info['nchan'], nchan)
    assert_equal(len(raw._data), nchan)
    assert_equal(raw.info['chs'][-2]['ch_name'], ch_name)
    assert_equal(len(find_events(raw, 'STI 014')), 0)
    assert_allclose(raw[-1][0], 0.)
    fname = op.join(tempdir, 'evt_raw.fif')
    raw.save(fname)

    # add events back in
    events = [[10, 1, 2]]
    raw._set_brainvision_events(events)
    assert_equal(raw.info['nchan'], nchan)
    assert_equal(len(raw._data), nchan)
    assert_equal(raw.info['chs'][-1]['ch_name'], 'STI 014')
Esempio n. 40
0
def test_cov_estimation_with_triggers():
    """Test estimation from raw with triggers."""
    tempdir = _TempDir()
    raw = read_raw_fif(raw_fname)
    raw.set_eeg_reference(projection=True).load_data()
    events = find_events(raw, stim_channel='STI 014')
    event_ids = [1, 2, 3, 4]
    reject = dict(grad=10000e-13, mag=4e-12, eeg=80e-6, eog=150e-6)

    # cov with merged events and keep_sample_mean=True
    events_merged = merge_events(events, event_ids, 1234)
    epochs = Epochs(raw,
                    events_merged,
                    1234,
                    tmin=-0.2,
                    tmax=0,
                    baseline=(-0.2, -0.1),
                    proj=True,
                    reject=reject,
                    preload=True)

    cov = compute_covariance(epochs, keep_sample_mean=True)
    _assert_cov(cov, read_cov(cov_km_fname))

    # Test with tmin and tmax (different but not too much)
    cov_tmin_tmax = compute_covariance(epochs, tmin=-0.19, tmax=-0.01)
    assert np.all(cov.data != cov_tmin_tmax.data)
    err = (linalg.norm(cov.data - cov_tmin_tmax.data, ord='fro') /
           linalg.norm(cov_tmin_tmax.data, ord='fro'))
    assert err < 0.05

    # cov using a list of epochs and keep_sample_mean=True
    epochs = [
        Epochs(raw,
               events,
               ev_id,
               tmin=-0.2,
               tmax=0,
               baseline=(-0.2, -0.1),
               proj=True,
               reject=reject) for ev_id in event_ids
    ]
    cov2 = compute_covariance(epochs, keep_sample_mean=True)
    assert_array_almost_equal(cov.data, cov2.data)
    assert cov.ch_names == cov2.ch_names

    # cov with keep_sample_mean=False using a list of epochs
    cov = compute_covariance(epochs, keep_sample_mean=False)
    _assert_cov(cov, read_cov(cov_fname), nfree=False)

    method_params = {'empirical': {'assume_centered': False}}
    pytest.raises(ValueError,
                  compute_covariance,
                  epochs,
                  keep_sample_mean=False,
                  method_params=method_params)
    pytest.raises(ValueError,
                  compute_covariance,
                  epochs,
                  keep_sample_mean=False,
                  method='factor_analysis')

    # test IO when computation done in Python
    cov.save(op.join(tempdir, 'test-cov.fif'))  # test saving
    cov_read = read_cov(op.join(tempdir, 'test-cov.fif'))
    _assert_cov(cov, cov_read, 1e-5)

    # cov with list of epochs with different projectors
    epochs = [
        Epochs(raw,
               events[:1],
               None,
               tmin=-0.2,
               tmax=0,
               baseline=(-0.2, -0.1),
               proj=True),
        Epochs(raw,
               events[:1],
               None,
               tmin=-0.2,
               tmax=0,
               baseline=(-0.2, -0.1),
               proj=False)
    ]
    # these should fail
    pytest.raises(ValueError, compute_covariance, epochs)
    pytest.raises(ValueError, compute_covariance, epochs, projs=None)
    # these should work, but won't be equal to above
    with warnings.catch_warnings(record=True) as w:  # too few samples warning
        warnings.simplefilter('always')
        cov = compute_covariance(epochs, projs=epochs[0].info['projs'])
        cov = compute_covariance(epochs, projs=[])
    assert_equal(len(w), 2)

    # test new dict support
    epochs = Epochs(raw,
                    events,
                    dict(a=1, b=2, c=3, d=4),
                    tmin=-0.01,
                    tmax=0,
                    proj=True,
                    reject=reject,
                    preload=True)
    with warnings.catch_warnings(record=True):  # samples
        compute_covariance(epochs)

        # projs checking
        compute_covariance(epochs, projs=[])
    pytest.raises(TypeError, compute_covariance, epochs, projs='foo')
    pytest.raises(TypeError, compute_covariance, epochs, projs=['foo'])
Esempio n. 41
0
def _test_raw_reader(reader, test_preloading=True, **kwargs):
    """Test reading, writing and slicing of raw classes.

    Parameters
    ----------
    reader : function
        Function to test.
    test_preloading : bool
        Whether not preloading is implemented for the reader. If True, both
        cases and memory mapping to file are tested.
    **kwargs :
        Arguments for the reader. Note: Do not use preload as kwarg.
        Use ``test_preloading`` instead.

    Returns
    -------
    raw : Instance of Raw
        A preloaded Raw object.
    """
    tempdir = _TempDir()
    rng = np.random.RandomState(0)
    if test_preloading:
        raw = reader(preload=True, **kwargs)
        # don't assume the first is preloaded
        buffer_fname = op.join(tempdir, 'buffer')
        picks = rng.permutation(np.arange(len(raw.ch_names)))[:10]
        bnd = min(int(round(raw.info['buffer_size_sec'] * raw.info['sfreq'])),
                  raw.n_times)
        slices = [
            slice(0, bnd),
            slice(bnd - 1, bnd),
            slice(3, bnd),
            slice(3, 300),
            slice(None),
            slice(1, bnd)
        ]
        if raw.n_times >= 2 * bnd:  # at least two complete blocks
            slices += [
                slice(bnd, 2 * bnd),
                slice(bnd, bnd + 1),
                slice(0, bnd + 100)
            ]
        other_raws = [
            reader(preload=buffer_fname, **kwargs),
            reader(preload=False, **kwargs)
        ]
        for sl_time in slices:
            for other_raw in other_raws:
                data1, times1 = raw[picks, sl_time]
                data2, times2 = other_raw[picks, sl_time]
                assert_allclose(data1, data2)
                assert_allclose(times1, times2)
    else:
        raw = reader(**kwargs)

    full_data = raw._data
    assert_true(raw.__class__.__name__, repr(raw))  # to test repr
    assert_true(raw.info.__class__.__name__, repr(raw.info))

    # Test saving and reading
    out_fname = op.join(tempdir, 'test_raw.fif')
    raw.save(out_fname, tmax=raw.times[-1], overwrite=True, buffer_size_sec=1)
    raw3 = Raw(out_fname)
    assert_equal(set(raw.info.keys()), set(raw3.info.keys()))
    assert_allclose(raw3[0:20][0], full_data[0:20], rtol=1e-6,
                    atol=1e-20)  # atol is very small but > 0
    assert_array_almost_equal(raw.times, raw3.times)

    assert_true(not math.isnan(raw3.info['highpass']))
    assert_true(not math.isnan(raw3.info['lowpass']))
    assert_true(not math.isnan(raw.info['highpass']))
    assert_true(not math.isnan(raw.info['lowpass']))

    # Make sure concatenation works
    first_samp = raw.first_samp
    last_samp = raw.last_samp
    concat_raw = concatenate_raws([raw.copy(), raw])
    assert_equal(concat_raw.n_times, 2 * raw.n_times)
    assert_equal(concat_raw.first_samp, first_samp)
    assert_equal(concat_raw.last_samp - last_samp + first_samp, last_samp + 1)
    return raw
Esempio n. 42
0
def test_ica_additional():
    """Test additional ICA functionality"""
    tempdir = _TempDir()
    stop2 = 500
    raw = Raw(raw_fname).crop(1.5, stop, copy=False)
    raw.load_data()
    picks = pick_types(raw.info,
                       meg=True,
                       stim=False,
                       ecg=False,
                       eog=False,
                       exclude='bads')
    test_cov = read_cov(test_cov_name)
    events = read_events(event_name)
    picks = pick_types(raw.info,
                       meg=True,
                       stim=False,
                       ecg=False,
                       eog=False,
                       exclude='bads')
    epochs = Epochs(raw,
                    events[:4],
                    event_id,
                    tmin,
                    tmax,
                    picks=picks,
                    baseline=(None, 0),
                    preload=True)
    # test if n_components=None works
    with warnings.catch_warnings(record=True):
        ica = ICA(n_components=None,
                  max_pca_components=None,
                  n_pca_components=None,
                  random_state=0)
        ica.fit(epochs, picks=picks, decim=3)
    # for testing eog functionality
    picks2 = pick_types(raw.info,
                        meg=True,
                        stim=False,
                        ecg=False,
                        eog=True,
                        exclude='bads')
    epochs_eog = Epochs(raw,
                        events[:4],
                        event_id,
                        tmin,
                        tmax,
                        picks=picks2,
                        baseline=(None, 0),
                        preload=True)

    test_cov2 = test_cov.copy()
    ica = ICA(noise_cov=test_cov2,
              n_components=3,
              max_pca_components=4,
              n_pca_components=4)
    assert_true(ica.info is None)
    with warnings.catch_warnings(record=True):
        ica.fit(raw, picks[:5])
    assert_true(isinstance(ica.info, Info))
    assert_true(ica.n_components_ < 5)

    ica = ICA(n_components=3, max_pca_components=4, n_pca_components=4)
    assert_raises(RuntimeError, ica.save, '')
    with warnings.catch_warnings(record=True):
        ica.fit(raw, picks=[1, 2, 3, 4, 5], start=start, stop=stop2)

    # test corrmap
    ica2 = ica.copy()
    ica3 = ica.copy()
    corrmap([ica, ica2], (0, 0),
            threshold='auto',
            label='blinks',
            plot=True,
            ch_type="mag")
    corrmap([ica, ica2], (0, 0), threshold=2, plot=False, show=False)
    assert_true(ica.labels_["blinks"] == ica2.labels_["blinks"])
    assert_true(0 in ica.labels_["blinks"])
    template = _get_ica_map(ica)[0]
    corrmap([ica, ica3],
            template,
            threshold='auto',
            label='blinks',
            plot=True,
            ch_type="mag")
    assert_true(ica2.labels_["blinks"] == ica3.labels_["blinks"])
    plt.close('all')

    # test warnings on bad filenames
    with warnings.catch_warnings(record=True) as w:
        warnings.simplefilter('always')
        ica_badname = op.join(op.dirname(tempdir), 'test-bad-name.fif.gz')
        ica.save(ica_badname)
        read_ica(ica_badname)
    assert_naming(w, 'test_ica.py', 2)

    # test decim
    ica = ICA(n_components=3, max_pca_components=4, n_pca_components=4)
    raw_ = raw.copy()
    for _ in range(3):
        raw_.append(raw_)
    n_samples = raw_._data.shape[1]
    with warnings.catch_warnings(record=True):
        ica.fit(raw, picks=None, decim=3)
    assert_true(raw_._data.shape[1], n_samples)

    # test expl var
    ica = ICA(n_components=1.0, max_pca_components=4, n_pca_components=4)
    with warnings.catch_warnings(record=True):
        ica.fit(raw, picks=None, decim=3)
    assert_true(ica.n_components_ == 4)

    # epochs extraction from raw fit
    assert_raises(RuntimeError, ica.get_sources, epochs)
    # test reading and writing
    test_ica_fname = op.join(op.dirname(tempdir), 'test-ica.fif')
    for cov in (None, test_cov):
        ica = ICA(noise_cov=cov,
                  n_components=2,
                  max_pca_components=4,
                  n_pca_components=4)
        with warnings.catch_warnings(record=True):  # ICA does not converge
            ica.fit(raw, picks=picks, start=start, stop=stop2)
        sources = ica.get_sources(epochs).get_data()
        assert_true(ica.mixing_matrix_.shape == (2, 2))
        assert_true(ica.unmixing_matrix_.shape == (2, 2))
        assert_true(ica.pca_components_.shape == (4, len(picks)))
        assert_true(sources.shape[1] == ica.n_components_)

        for exclude in [[], [0]]:
            ica.exclude = exclude
            ica.labels_ = {'foo': [0]}
            ica.save(test_ica_fname)
            ica_read = read_ica(test_ica_fname)
            assert_true(ica.exclude == ica_read.exclude)
            assert_equal(ica.labels_, ica_read.labels_)
            ica.exclude = []
            ica.apply(raw, exclude=[1])
            assert_true(ica.exclude == [])

            ica.exclude = [0, 1]
            ica.apply(raw, exclude=[1])
            assert_true(ica.exclude == [0, 1])

            ica_raw = ica.get_sources(raw)
            assert_true(
                ica.exclude ==
                [ica_raw.ch_names.index(e) for e in ica_raw.info['bads']])

        # test filtering
        d1 = ica_raw._data[0].copy()
        with warnings.catch_warnings(record=True):  # dB warning
            ica_raw.filter(4, 20)
        assert_true((d1 != ica_raw._data[0]).any())
        d1 = ica_raw._data[0].copy()
        with warnings.catch_warnings(record=True):  # dB warning
            ica_raw.notch_filter([10])
        assert_true((d1 != ica_raw._data[0]).any())

        ica.n_pca_components = 2
        ica.method = 'fake'
        ica.save(test_ica_fname)
        ica_read = read_ica(test_ica_fname)
        assert_true(ica.n_pca_components == ica_read.n_pca_components)
        assert_equal(ica.method, ica_read.method)
        assert_equal(ica.labels_, ica_read.labels_)

        # check type consistency
        attrs = ('mixing_matrix_ unmixing_matrix_ pca_components_ '
                 'pca_explained_variance_ _pre_whitener')

        def f(x, y):
            return getattr(x, y).dtype

        for attr in attrs.split():
            assert_equal(f(ica_read, attr), f(ica, attr))

        ica.n_pca_components = 4
        ica_read.n_pca_components = 4

        ica.exclude = []
        ica.save(test_ica_fname)
        ica_read = read_ica(test_ica_fname)
        for attr in [
                'mixing_matrix_', 'unmixing_matrix_', 'pca_components_',
                'pca_mean_', 'pca_explained_variance_', '_pre_whitener'
        ]:
            assert_array_almost_equal(getattr(ica, attr),
                                      getattr(ica_read, attr))

        assert_true(ica.ch_names == ica_read.ch_names)
        assert_true(isinstance(ica_read.info, Info))

        sources = ica.get_sources(raw)[:, :][0]
        sources2 = ica_read.get_sources(raw)[:, :][0]
        assert_array_almost_equal(sources, sources2)

        _raw1 = ica.apply(raw, exclude=[1])
        _raw2 = ica_read.apply(raw, exclude=[1])
        assert_array_almost_equal(_raw1[:, :][0], _raw2[:, :][0])

    os.remove(test_ica_fname)
    # check scrore funcs
    for name, func in get_score_funcs().items():
        if name in score_funcs_unsuited:
            continue
        scores = ica.score_sources(raw,
                                   target='EOG 061',
                                   score_func=func,
                                   start=0,
                                   stop=10)
        assert_true(ica.n_components_ == len(scores))

    # check univariate stats
    scores = ica.score_sources(raw, score_func=stats.skew)
    # check exception handling
    assert_raises(ValueError, ica.score_sources, raw, target=np.arange(1))

    params = []
    params += [(None, -1, slice(2), [0, 1])]  # varicance, kurtosis idx params
    params += [(None, 'MEG 1531')]  # ECG / EOG channel params
    for idx, ch_name in product(*params):
        ica.detect_artifacts(raw,
                             start_find=0,
                             stop_find=50,
                             ecg_ch=ch_name,
                             eog_ch=ch_name,
                             skew_criterion=idx,
                             var_criterion=idx,
                             kurt_criterion=idx)
    with warnings.catch_warnings(record=True):
        idx, scores = ica.find_bads_ecg(raw, method='ctps')
        assert_equal(len(scores), ica.n_components_)
        idx, scores = ica.find_bads_ecg(raw, method='correlation')
        assert_equal(len(scores), ica.n_components_)

        idx, scores = ica.find_bads_eog(raw)
        assert_equal(len(scores), ica.n_components_)

        ica.labels_ = None
        idx, scores = ica.find_bads_ecg(epochs, method='ctps')
        assert_equal(len(scores), ica.n_components_)
        assert_raises(ValueError,
                      ica.find_bads_ecg,
                      epochs.average(),
                      method='ctps')
        assert_raises(ValueError,
                      ica.find_bads_ecg,
                      raw,
                      method='crazy-coupling')

        raw.info['chs'][raw.ch_names.index('EOG 061') - 1]['kind'] = 202
        idx, scores = ica.find_bads_eog(raw)
        assert_true(isinstance(scores, list))
        assert_equal(len(scores[0]), ica.n_components_)

    # check score funcs
    for name, func in get_score_funcs().items():
        if name in score_funcs_unsuited:
            continue
        scores = ica.score_sources(epochs_eog,
                                   target='EOG 061',
                                   score_func=func)
        assert_true(ica.n_components_ == len(scores))

    # check univariate stats
    scores = ica.score_sources(epochs, score_func=stats.skew)

    # check exception handling
    assert_raises(ValueError, ica.score_sources, epochs, target=np.arange(1))

    # ecg functionality
    ecg_scores = ica.score_sources(raw,
                                   target='MEG 1531',
                                   score_func='pearsonr')

    with warnings.catch_warnings(record=True):  # filter attenuation warning
        ecg_events = ica_find_ecg_events(raw,
                                         sources[np.abs(ecg_scores).argmax()])

    assert_true(ecg_events.ndim == 2)

    # eog functionality
    eog_scores = ica.score_sources(raw,
                                   target='EOG 061',
                                   score_func='pearsonr')
    with warnings.catch_warnings(record=True):  # filter attenuation warning
        eog_events = ica_find_eog_events(raw,
                                         sources[np.abs(eog_scores).argmax()])

    assert_true(eog_events.ndim == 2)

    # Test ica fiff export
    ica_raw = ica.get_sources(raw, start=0, stop=100)
    assert_true(ica_raw.last_samp - ica_raw.first_samp == 100)
    assert_true(len(ica_raw._filenames) == 0)  # API consistency
    ica_chans = [ch for ch in ica_raw.ch_names if 'ICA' in ch]
    assert_true(ica.n_components_ == len(ica_chans))
    test_ica_fname = op.join(op.abspath(op.curdir), 'test-ica_raw.fif')
    ica.n_components = np.int32(ica.n_components)
    ica_raw.save(test_ica_fname, overwrite=True)
    ica_raw2 = Raw(test_ica_fname, preload=True)
    assert_allclose(ica_raw._data, ica_raw2._data, rtol=1e-5, atol=1e-4)
    ica_raw2.close()
    os.remove(test_ica_fname)

    # Test ica epochs export
    ica_epochs = ica.get_sources(epochs)
    assert_true(ica_epochs.events.shape == epochs.events.shape)
    ica_chans = [ch for ch in ica_epochs.ch_names if 'ICA' in ch]
    assert_true(ica.n_components_ == len(ica_chans))
    assert_true(ica.n_components_ == ica_epochs.get_data().shape[1])
    assert_true(ica_epochs._raw is None)
    assert_true(ica_epochs.preload is True)

    # test float n pca components
    ica.pca_explained_variance_ = np.array([0.2] * 5)
    ica.n_components_ = 0
    for ncomps, expected in [[0.3, 1], [0.9, 4], [1, 1]]:
        ncomps_ = ica._check_n_pca_components(ncomps)
        assert_true(ncomps_ == expected)
Esempio n. 43
0
def test_render_report():
    """Test rendering -*.fif files for mne report.
    """
    tempdir = _TempDir()
    raw_fname_new = op.join(tempdir, 'temp_raw.fif')
    event_fname_new = op.join(tempdir, 'temp_raw-eve.fif')
    cov_fname_new = op.join(tempdir, 'temp_raw-cov.fif')
    fwd_fname_new = op.join(tempdir, 'temp_raw-fwd.fif')
    inv_fname_new = op.join(tempdir, 'temp_raw-inv.fif')
    for a, b in [[raw_fname, raw_fname_new], [event_fname, event_fname_new],
                 [cov_fname, cov_fname_new], [fwd_fname, fwd_fname_new],
                 [inv_fname, inv_fname_new]]:
        shutil.copyfile(a, b)

    # create and add -epo.fif and -ave.fif files
    epochs_fname = op.join(tempdir, 'temp-epo.fif')
    evoked_fname = op.join(tempdir, 'temp-ave.fif')
    raw = Raw(raw_fname_new)
    picks = pick_types(raw.info, meg='mag', eeg=False)  # faster with one type
    epochs = Epochs(raw, read_events(event_fname), 1, -0.2, 0.2, picks=picks)
    epochs.save(epochs_fname)
    epochs.average().save(evoked_fname)

    report = Report(info_fname=raw_fname_new, subjects_dir=subjects_dir)
    if sys.version.startswith('3.5'):  # XXX Some strange MPL/3.5 error...
        raise SkipTest('Python 3.5 and mpl have unresolved issues')
    with warnings.catch_warnings(record=True) as w:
        warnings.simplefilter('always')
        report.parse_folder(data_path=tempdir, on_error='raise')
    assert_true(len(w) >= 1)

    # Check correct paths and filenames
    fnames = glob.glob(op.join(tempdir, '*.fif'))
    for fname in fnames:
        assert_true(
            op.basename(fname) in [op.basename(x) for x in report.fnames])
        assert_true(''.join(report.html).find(op.basename(fname)) != -1)

    assert_equal(len(report.fnames), len(fnames))
    assert_equal(len(report.html), len(report.fnames))

    # Check saving functionality
    report.data_path = tempdir
    report.save(fname=op.join(tempdir, 'report.html'), open_browser=False)
    assert_true(op.isfile(op.join(tempdir, 'report.html')))

    assert_equal(len(report.html), len(fnames))
    assert_equal(len(report.html), len(report.fnames))

    # Check saving same report to new filename
    report.save(fname=op.join(tempdir, 'report2.html'), open_browser=False)
    assert_true(op.isfile(op.join(tempdir, 'report2.html')))

    # Check overwriting file
    report.save(fname=op.join(tempdir, 'report.html'),
                open_browser=False,
                overwrite=True)
    assert_true(op.isfile(op.join(tempdir, 'report.html')))

    # Check pattern matching with multiple patterns
    pattern = ['*raw.fif', '*eve.fif']
    with warnings.catch_warnings(record=True) as w:
        warnings.simplefilter('always')
        report.parse_folder(data_path=tempdir, pattern=pattern)
    assert_true(len(w) >= 1)

    fnames = glob.glob(op.join(tempdir, '*.raw')) + \
        glob.glob(op.join(tempdir, '*.raw'))
    for fname in fnames:
        assert_true(
            op.basename(fname) in [op.basename(x) for x in report.fnames])
        assert_true(''.join(report.html).find(op.basename(fname)) != -1)
Esempio n. 44
0
def test_events():
    """Test reading and modifying events."""
    tempdir = _TempDir()
    # Note: BrainVision event offsets are 1-based, mne offsets are 0-based.
    # So in all tests below, the "onset" is 1 less than what's in the file

    # check that events are read and stim channel is synthesized correctly
    raw = read_raw_brainvision(vhdr_path, eog=eog, event_id=event_id)
    events = raw._get_brainvision_events()
    events = events[events[:, 2] != event_id['Sync On']]
    assert_array_equal(
        events,
        [[486, 0, 253], [496, 1, 255], [1769, 1, 254], [1779, 1, 255],
         [3252, 1, 254], [3262, 1, 255], [4935, 1, 253], [4945, 1, 255],
         [5999, 1, 255], [6619, 1, 254], [6629, 1, 255]])

    # check that events are read and stim channel is synthesized correctly and
    # response triggers are shifted like they're supposed to be.
    raw = read_raw_brainvision(vhdr_path,
                               eog=eog,
                               response_trig_shift=1000,
                               event_id=event_id)
    events = raw._get_brainvision_events()
    events = events[events[:, 2] != event_id['Sync On']]
    assert_array_equal(
        events,
        [[486, 0, 253], [496, 1, 255], [1769, 1, 254], [1779, 1, 255],
         [3252, 1, 254], [3262, 1, 255], [4935, 1, 253], [4945, 1, 255],
         [5999, 1, 1255], [6619, 1, 254], [6629, 1, 255]])

    # check that events are read and stim channel is synthesized correctly and
    # response triggers are ignored.
    with warnings.catch_warnings(record=True):  # ignored events
        raw = read_raw_brainvision(vhdr_path,
                                   eog=eog,
                                   response_trig_shift=None)
    events = raw._get_brainvision_events()
    events = events[events[:, 2] != event_id['Sync On']]
    assert_array_equal(
        events, [[486, 0, 253], [496, 1, 255], [1769, 1, 254], [1779, 1, 255],
                 [3252, 1, 254], [3262, 1, 255], [4935, 1, 253],
                 [4945, 1, 255], [6619, 1, 254], [6629, 1, 255]])

    # check that events are read properly when event_id is specified for
    # auxiliary events
    with warnings.catch_warnings(record=True):  # dropped events
        raw = read_raw_brainvision(vhdr_path,
                                   eog=eog,
                                   preload=True,
                                   response_trig_shift=None,
                                   event_id=event_id)
    events = raw._get_brainvision_events()
    assert_array_equal(
        events, [[486, 0, 253], [496, 1, 255], [1769, 1, 254], [1779, 1, 255],
                 [3252, 1, 254], [3262, 1, 255], [4935, 1, 253],
                 [4945, 1, 255], [6619, 1, 254], [6629, 1, 255], [7629, 1, 5]])

    pytest.raises(TypeError,
                  read_raw_brainvision,
                  vhdr_path,
                  eog=eog,
                  preload=True,
                  response_trig_shift=0.1)
    pytest.raises(TypeError,
                  read_raw_brainvision,
                  vhdr_path,
                  eog=eog,
                  preload=True,
                  response_trig_shift=np.nan)

    # to handle the min duration = 1 of stim trig (re)construction ...
    events = np.array([[486, 1, 253], [496, 1, 255], [1769, 1, 254],
                       [1779, 1, 255], [3252, 1, 254], [3262, 1, 255],
                       [4935, 1, 253], [4945, 1, 255], [6619, 1, 254],
                       [6629, 1, 255], [7629, 1, 5]])

    # Test that both response_trig_shit and event_id can be set
    read_raw_brainvision(vhdr_path,
                         eog=eog,
                         preload=False,
                         response_trig_shift=100,
                         event_id=event_id)
    mne_events = find_events(raw, stim_channel='STI 014')
    assert_array_equal(events[:, [0, 2]], mne_events[:, [0, 2]])

    # modify events and check that stim channel is updated
    index = events[:, 2] == 255
    events = events[index]
    raw._set_brainvision_events(events)
    mne_events = find_events(raw, stim_channel='STI 014')
    assert_array_equal(events[:, [0, 2]], mne_events[:, [0, 2]])

    # remove events
    nchan = raw.info['nchan']
    ch_name = raw.info['chs'][-2]['ch_name']
    events = np.empty((0, 3))
    raw._set_brainvision_events(events)
    assert_equal(raw.info['nchan'], nchan)
    assert_equal(len(raw._data), nchan)
    assert_equal(raw.info['chs'][-2]['ch_name'], ch_name)
    assert_equal(len(find_events(raw, 'STI 014')), 0)
    assert_allclose(raw[-1][0], 0.)
    fname = op.join(tempdir, 'evt_raw.fif')
    raw.save(fname)

    # add events back in
    events = [[10, 1, 2]]
    raw._set_brainvision_events(events)
    assert_equal(raw.info['nchan'], nchan)
    assert_equal(len(raw._data), nchan)
    assert_equal(raw.info['chs'][-1]['ch_name'], 'STI 014')
Esempio n. 45
0
def _test_raw_reader(reader,
                     test_preloading=True,
                     test_kwargs=True,
                     boundary_decimal=2,
                     **kwargs):
    """Test reading, writing and slicing of raw classes.

    Parameters
    ----------
    reader : function
        Function to test.
    test_preloading : bool
        Whether not preloading is implemented for the reader. If True, both
        cases and memory mapping to file are tested.
    test_kwargs : dict
        Test _init_kwargs support.
    boundary_decimal : int
        Number of decimals up to which the boundary should match.
    **kwargs :
        Arguments for the reader. Note: Do not use preload as kwarg.
        Use ``test_preloading`` instead.

    Returns
    -------
    raw : instance of Raw
        A preloaded Raw object.
    """
    tempdir = _TempDir()
    rng = np.random.RandomState(0)
    montage = None
    if "montage" in kwargs:
        montage = kwargs['montage']
        del kwargs['montage']
    if test_preloading:
        raw = reader(preload=True, **kwargs)
        if montage is not None:
            raw.set_montage(montage)
        # don't assume the first is preloaded
        buffer_fname = op.join(tempdir, 'buffer')
        picks = rng.permutation(np.arange(len(raw.ch_names) - 1))[:10]
        picks = np.append(picks, len(raw.ch_names) - 1)  # test trigger channel
        bnd = min(int(round(raw.buffer_size_sec * raw.info['sfreq'])),
                  raw.n_times)
        slices = [
            slice(0, bnd),
            slice(bnd - 1, bnd),
            slice(3, bnd),
            slice(3, 300),
            slice(None),
            slice(1, bnd)
        ]
        if raw.n_times >= 2 * bnd:  # at least two complete blocks
            slices += [
                slice(bnd, 2 * bnd),
                slice(bnd, bnd + 1),
                slice(0, bnd + 100)
            ]
        other_raws = [
            reader(preload=buffer_fname, **kwargs),
            reader(preload=False, **kwargs)
        ]
        for sl_time in slices:
            data1, times1 = raw[picks, sl_time]
            for other_raw in other_raws:
                data2, times2 = other_raw[picks, sl_time]
                assert_allclose(data1, data2)
                assert_allclose(times1, times2)
    else:
        raw = reader(**kwargs)

    full_data = raw._data
    assert raw.__class__.__name__ in repr(raw)  # to test repr
    assert raw.info.__class__.__name__ in repr(raw.info)
    assert isinstance(raw.info['dig'], (type(None), list))
    if isinstance(raw.info['dig'], list):
        for di, d in enumerate(raw.info['dig']):
            assert isinstance(d, DigPoint), (di, d)

    # gh-5604
    assert _handle_meas_date(raw.info['meas_date']) >= 0

    # test resetting raw
    if test_kwargs:
        raw2 = reader(**raw._init_kwargs)
        assert set(raw.info.keys()) == set(raw2.info.keys())
        assert_array_equal(raw.times, raw2.times)

    # Test saving and reading
    out_fname = op.join(tempdir, 'test_raw.fif')
    raw = concatenate_raws([raw])
    raw.save(out_fname, tmax=raw.times[-1], overwrite=True, buffer_size_sec=1)
    raw3 = read_raw_fif(out_fname)
    assert set(raw.info.keys()) == set(raw3.info.keys())
    assert_allclose(raw3[0:20][0], full_data[0:20], rtol=1e-6,
                    atol=1e-20)  # atol is very small but > 0
    assert_array_almost_equal(raw.times, raw3.times)

    assert not math.isnan(raw3.info['highpass'])
    assert not math.isnan(raw3.info['lowpass'])
    assert not math.isnan(raw.info['highpass'])
    assert not math.isnan(raw.info['lowpass'])

    assert raw3.info['kit_system_id'] == raw.info['kit_system_id']

    # Make sure concatenation works
    first_samp = raw.first_samp
    last_samp = raw.last_samp
    concat_raw = concatenate_raws([raw.copy(), raw])
    assert concat_raw.n_times == 2 * raw.n_times
    assert concat_raw.first_samp == first_samp
    assert concat_raw.last_samp - last_samp + first_samp == last_samp + 1
    idx = np.where(concat_raw.annotations.description == 'BAD boundary')[0]

    if concat_raw.info['meas_date'] is None:
        expected_bad_boundary_onset = ((last_samp - first_samp) /
                                       raw.info['sfreq'])
    else:
        expected_bad_boundary_onset = raw._last_time

    assert_array_almost_equal(concat_raw.annotations.onset[idx],
                              expected_bad_boundary_onset,
                              decimal=boundary_decimal)

    if raw.info['meas_id'] is not None:
        for key in ['secs', 'usecs', 'version']:
            assert raw.info['meas_id'][key] == raw3.info['meas_id'][key]
        assert_array_equal(raw.info['meas_id']['machid'],
                           raw3.info['meas_id']['machid'])

    assert isinstance(raw.annotations, Annotations)

    # Make a "soft" test on units: They have to be valid SI units as in
    # mne.io.meas_info.valid_units, but we accept any lower/upper case for now.
    valid_units = _get_valid_units()
    valid_units_lower = [unit.lower() for unit in valid_units]
    if raw._orig_units is not None:
        assert isinstance(raw._orig_units, dict)
        for ch_name, unit in raw._orig_units.items():
            assert unit.lower() in valid_units_lower, ch_name

    return raw
Esempio n. 46
0
def test_render_add_sections():
    """Test adding figures/images to section.
    """
    from PIL import Image
    tempdir = _TempDir()
    import matplotlib.pyplot as plt
    report = Report(subjects_dir=subjects_dir)
    # Check add_figs_to_section functionality
    fig = plt.plot([1, 2], [1, 2])[0].figure
    report.add_figs_to_section(
        figs=fig,  # test non-list input
        captions=['evoked response'],
        scale=1.2,
        image_format='svg')
    assert_raises(ValueError,
                  report.add_figs_to_section,
                  figs=[fig, fig],
                  captions='H')
    assert_raises(ValueError,
                  report.add_figs_to_section,
                  figs=fig,
                  captions=['foo'],
                  scale=0,
                  image_format='svg')
    assert_raises(ValueError,
                  report.add_figs_to_section,
                  figs=fig,
                  captions=['foo'],
                  scale=1e-10,
                  image_format='svg')
    # need to recreate because calls above change size
    fig = plt.plot([1, 2], [1, 2])[0].figure

    # Check add_images_to_section with png and then gif
    img_fname = op.join(tempdir, 'testimage.png')
    fig.savefig(img_fname)
    report.add_images_to_section(fnames=[img_fname],
                                 captions=['evoked response'])

    im = Image.open(img_fname)
    op.join(tempdir, 'testimage.gif')
    im.save(img_fname)  # matplotlib does not support gif
    report.add_images_to_section(fnames=[img_fname],
                                 captions=['evoked response'])

    assert_raises(ValueError,
                  report.add_images_to_section,
                  fnames=[img_fname, img_fname],
                  captions='H')

    assert_raises(ValueError,
                  report.add_images_to_section,
                  fnames=['foobar.xxx'],
                  captions='H')

    evoked = read_evokeds(evoked_fname,
                          condition='Left Auditory',
                          baseline=(-0.2, 0.0))
    fig = plot_trans(evoked.info,
                     trans_fname,
                     subject='sample',
                     subjects_dir=subjects_dir)

    report.add_figs_to_section(
        figs=fig,  # test non-list input
        captions='random image',
        scale=1.2)
Esempio n. 47
0
def test_montage():
    """Test making montages."""
    tempdir = _TempDir()
    inputs = dict(
        sfp='FidNz 0       9.071585155     -2.359754454\n'
            'FidT9 -6.711765       0.040402876     -3.251600355\n'
            'very_very_very_long_name -5.831241498 -4.494821698  4.955347697\n'
            'Cz 0       0       8.899186843',
        csd='// MatLab   Sphere coordinates [degrees]         Cartesian coordinates\n'  # noqa: E501
            '// Label       Theta       Phi    Radius         X         Y         Z       off sphere surface\n'  # noqa: E501
            'E1      37.700     -14.000       1.000    0.7677    0.5934   -0.2419  -0.00000000000000011\n'  # noqa: E501
            'E3      51.700      11.000       1.000    0.6084    0.7704    0.1908   0.00000000000000000\n'  # noqa: E501
            'E31      90.000     -11.000       1.000    0.0000    0.9816   -0.1908   0.00000000000000000\n'  # noqa: E501
            'E61     158.000     -17.200       1.000   -0.8857    0.3579   -0.2957  -0.00000000000000022',  # noqa: E501
        mm_elc='# ASA electrode file\nReferenceLabel  avg\nUnitPosition    mm\n'  # noqa:E501
               'NumberPositions=    68\n'
               'Positions\n'
               '-86.0761 -19.9897 -47.9860\n'
               '85.7939 -20.0093 -48.0310\n'
               '0.0083 86.8110 -39.9830\n'
               '-86.0761 -24.9897 -67.9860\n'
               'Labels\nLPA\nRPA\nNz\nDummy\n',
        m_elc='# ASA electrode file\nReferenceLabel  avg\nUnitPosition    m\n'
              'NumberPositions=    68\nPositions\n-.0860761 -.0199897 -.0479860\n'  # noqa:E501
              '.0857939 -.0200093 -.0480310\n.0000083 .00868110 -.0399830\n'
              '.08 -.02 -.04\n'
              'Labels\nLPA\nRPA\nNz\nDummy\n',
        txt='Site  Theta  Phi\n'
            'Fp1  -92    -72\n'
            'Fp2   92     72\n'
            'very_very_very_long_name       -92     72\n'
            'O2        92    -90\n',
        elp='346\n'
            'EEG\t      F3\t -62.027\t -50.053\t      85\n'
            'EEG\t      Fz\t  45.608\t      90\t      85\n'
            'EEG\t      F4\t   62.01\t  50.103\t      85\n'
            'EEG\t      FCz\t   68.01\t  58.103\t      85\n',
        hpts='eeg Fp1 -95.0 -3. -3.\n'
             'eeg AF7 -1 -1 -3\n'
             'eeg A3 -2 -2 2\n'
             'eeg A 0 0 0',
        bvef='<?xml version="1.0" encoding="UTF-8" standalone="yes"?>\n'
             '<!-- Generated by EasyCap Configurator 19.05.2014 -->\n'
             '<Electrodes defaults="false">\n'
             '  <Electrode>\n'
             '    <Name>Fp1</Name>\n'
             '    <Theta>-90</Theta>\n'
             '    <Phi>-72</Phi>\n'
             '    <Radius>1</Radius>\n'
             '    <Number>1</Number>\n'
             '  </Electrode>\n'
             '  <Electrode>\n'
             '    <Name>Fz</Name>\n'
             '    <Theta>45</Theta>\n'
             '    <Phi>90</Phi>\n'
             '    <Radius>1</Radius>\n'
             '    <Number>2</Number>\n'
             '  </Electrode>\n'
             '  <Electrode>\n'
             '    <Name>F3</Name>\n'
             '    <Theta>-60</Theta>\n'
             '    <Phi>-51</Phi>\n'
             '    <Radius>1</Radius>\n'
             '    <Number>3</Number>\n'
             '  </Electrode>\n'
             '  <Electrode>\n'
             '    <Name>F7</Name>\n'
             '    <Theta>-90</Theta>\n'
             '    <Phi>-36</Phi>\n'
             '    <Radius>1</Radius>\n'
             '    <Number>4</Number>\n'
             '  </Electrode>\n'
             '</Electrodes>',
    )
    # Get actual positions and save them for checking
    # csd comes from the string above, all others come from commit 2fa35d4
    poss = dict(
        sfp=[[0.0, 9.07159, -2.35975], [-6.71176, 0.0404, -3.2516],
             [-5.83124, -4.49482, 4.95535], [0.0, 0.0, 8.89919]],
        mm_elc=[[-0.08608, -0.01999, -0.04799], [0.08579, -0.02001, -0.04803],
                [1e-05, 0.08681, -0.03998], [-0.08608, -0.02499, -0.06799]],
        m_elc=[[-0.08608, -0.01999, -0.04799], [0.08579, -0.02001, -0.04803],
               [1e-05, 0.00868, -0.03998], [0.08, -0.02, -0.04]],
        txt=[[-26.25044, 80.79056, -2.96646], [26.25044, 80.79056, -2.96646],
             [-26.25044, -80.79056, -2.96646], [0.0, -84.94822, -2.96646]],
        elp=[[-48.20043, 57.55106, 39.86971], [0.0, 60.73848, 59.4629],
             [48.1426, 57.58403, 39.89198], [41.64599, 66.91489, 31.8278]],
        hpts=[[-95, -3, -3], [-1, -1., -3.], [-2, -2, 2.], [0, 0, 0]],
        bvef=[[-2.62664445e-02,  8.08398039e-02,  5.20474890e-18],
              [3.68031324e-18,  6.01040764e-02,  6.01040764e-02],
              [-4.63256329e-02,  5.72073923e-02,  4.25000000e-02],
              [-6.87664445e-02,  4.99617464e-02,  5.20474890e-18]],
    )
    for key, text in inputs.items():
        kind = key.split('_')[-1]
        fname = op.join(tempdir, 'test.' + kind)
        with open(fname, 'w') as fid:
            fid.write(text)
        unit = 'mm' if kind == 'bvef' else 'm'
        montage = read_montage(fname, unit=unit)
        if kind in ('sfp', 'txt'):
            assert ('very_very_very_long_name' in montage.ch_names)
        assert_equal(len(montage.ch_names), 4)
        assert_equal(len(montage.ch_names), len(montage.pos))
        assert_equal(montage.pos.shape, (4, 3))
        assert_equal(montage.kind, 'test')
        if kind == 'csd':
            dtype = [('label', 'S4'), ('theta', 'f8'), ('phi', 'f8'),
                     ('radius', 'f8'), ('x', 'f8'), ('y', 'f8'), ('z', 'f8'),
                     ('off_sph', 'f8')]
            try:
                table = np.loadtxt(fname, skip_header=2, dtype=dtype)
            except TypeError:
                table = np.loadtxt(fname, skiprows=2, dtype=dtype)
            poss['csd'] = np.c_[table['x'], table['y'], table['z']]
        if kind == 'elc':
            # Make sure points are reasonable distance from geometric centroid
            centroid = np.sum(montage.pos, axis=0) / montage.pos.shape[0]
            distance_from_centroid = np.apply_along_axis(
                np.linalg.norm, 1,
                montage.pos - centroid)
            assert_array_less(distance_from_centroid, 0.2)
            assert_array_less(0.01, distance_from_centroid)
        assert_array_almost_equal(poss[key], montage.pos, 4, err_msg=key)

    # Bvef is either auto or mm in terms of "units"
    with pytest.raises(ValueError, match='be "auto" or "mm" for .bvef files.'):
        bvef_file = op.join(tempdir, 'test.' + 'bvef')
        read_montage(bvef_file, unit='m')

    # Test reading in different letter case.
    ch_names = ["F3", "FZ", "F4", "FC3", "FCz", "FC4", "C3", "CZ", "C4", "CP3",
                "CPZ", "CP4", "P3", "PZ", "P4", "O1", "OZ", "O2"]
    montage = read_montage('standard_1020', ch_names=ch_names)
    assert_array_equal(ch_names, montage.ch_names)

    # test transform
    input_strs = ["""
    eeg Fp1 -95.0 -31.0 -3.0
    eeg AF7 -81 -59 -3
    eeg AF3 -87 -41 28
    cardinal 2 -91 0 -42
    cardinal 1 0 -91 -42
    cardinal 3 0 91 -42
    """, """
    Fp1 -95.0 -31.0 -3.0
    AF7 -81 -59 -3
    AF3 -87 -41 28
    FidNz -91 0 -42
    FidT9 0 -91 -42
    FidT10 0 91 -42
    """]
    # sfp files seem to have Nz, T9, and T10 as fiducials:
    # https://github.com/mne-tools/mne-python/pull/4482#issuecomment-321980611

    kinds = ['test_fid.hpts', 'test_fid.sfp']

    for kind, input_str in zip(kinds, input_strs):
        fname = op.join(tempdir, kind)
        with open(fname, 'w') as fid:
            fid.write(input_str)
        montage = read_montage(op.join(tempdir, kind), transform=True)

        # check coordinate transformation
        pos = np.array([-95.0, -31.0, -3.0])
        nasion = np.array([-91, 0, -42])
        lpa = np.array([0, -91, -42])
        rpa = np.array([0, 91, -42])
        fids = np.vstack((nasion, lpa, rpa))
        trans = get_ras_to_neuromag_trans(fids[0], fids[1], fids[2])
        pos = apply_trans(trans, pos)
        assert_array_equal(montage.pos[0], pos)
        assert_array_equal(montage.nasion[[0, 2]], [0, 0])
        assert_array_equal(montage.lpa[[1, 2]], [0, 0])
        assert_array_equal(montage.rpa[[1, 2]], [0, 0])
        pos = np.array([-95.0, -31.0, -3.0])
        montage_fname = op.join(tempdir, kind)
        montage = read_montage(montage_fname, unit='mm')
        assert_array_equal(montage.pos[0], pos * 1e-3)

        # test with last
        info = create_info(montage.ch_names, 1e3,
                           ['eeg'] * len(montage.ch_names))
        _set_montage(info, montage)
        pos2 = np.array([c['loc'][:3] for c in info['chs']])
        assert_array_equal(pos2, montage.pos)
        assert_equal(montage.ch_names, info['ch_names'])

        info = create_info(
            montage.ch_names, 1e3, ['eeg'] * len(montage.ch_names))

        evoked = EvokedArray(
            data=np.zeros((len(montage.ch_names), 1)), info=info, tmin=0)

        # test return type as well as set montage
        assert (isinstance(evoked.set_montage(montage), type(evoked)))

        pos3 = np.array([c['loc'][:3] for c in evoked.info['chs']])
        assert_array_equal(pos3, montage.pos)
        assert_equal(montage.ch_names, evoked.info['ch_names'])

        # Warning should be raised when some EEG are not specified in montage
        info = create_info(montage.ch_names + ['foo', 'bar'], 1e3,
                           ['eeg'] * (len(montage.ch_names) + 2))
        with pytest.warns(RuntimeWarning, match='position specified'):
            _set_montage(info, montage)

    # Channel names can be treated case insensitive
    info = create_info(['FP1', 'af7', 'AF3'], 1e3, ['eeg'] * 3)
    _set_montage(info, montage)

    # Unless there is a collision in names
    info = create_info(['FP1', 'Fp1', 'AF3'], 1e3, ['eeg'] * 3)
    assert (info['dig'] is None)
    with pytest.warns(RuntimeWarning, match='position specified'):
        _set_montage(info, montage)
    assert len(info['dig']) == 5  # 2 EEG w/pos, 3 fiducials
    montage.ch_names = ['FP1', 'Fp1', 'AF3']
    info = create_info(['fp1', 'AF3'], 1e3, ['eeg', 'eeg'])
    assert (info['dig'] is None)
    with pytest.warns(RuntimeWarning, match='position specified'):
        _set_montage(info, montage, set_dig=False)
    assert (info['dig'] is None)

    # test get_pos2d method
    montage = read_montage("standard_1020")
    c3 = montage.get_pos2d()[montage.ch_names.index("C3")]
    c4 = montage.get_pos2d()[montage.ch_names.index("C4")]
    fz = montage.get_pos2d()[montage.ch_names.index("Fz")]
    oz = montage.get_pos2d()[montage.ch_names.index("Oz")]
    f1 = montage.get_pos2d()[montage.ch_names.index("F1")]
    assert (c3[0] < 0)  # left hemisphere
    assert (c4[0] > 0)  # right hemisphere
    assert (fz[1] > 0)  # frontal
    assert (oz[1] < 0)  # occipital
    assert_allclose(fz[0], 0, atol=1e-2)  # midline
    assert_allclose(oz[0], 0, atol=1e-2)  # midline
    assert (f1[0] < 0 and f1[1] > 0)  # left frontal

    # test get_builtin_montages function
    montages = get_builtin_montages()
    assert (len(montages) > 0)  # MNE should always ship with montages
    assert ("standard_1020" in montages)  # 10/20 montage
    assert ("standard_1005" in montages)  # 10/05 montage
Esempio n. 48
0
def test_crop():
    """Test cropping with annotations."""
    raw = read_raw_fif(fif_fname)
    events = mne.find_events(raw)
    onset = events[events[:, 2] == 1, 0] / raw.info['sfreq']
    duration = np.full_like(onset, 0.5)
    description = ['bad %d' % k for k in range(len(onset))]
    annot = mne.Annotations(onset, duration, description,
                            orig_time=raw.info['meas_date'])
    raw.set_annotations(annot)

    split_time = raw.times[-1] / 2. + 2.
    split_idx = len(onset) // 2 + 1
    raw_cropped_left = raw.copy().crop(0., split_time - 1. / raw.info['sfreq'])
    assert_array_equal(raw_cropped_left.annotations.description,
                       raw.annotations.description[:split_idx])
    assert_allclose(raw_cropped_left.annotations.duration,
                    raw.annotations.duration[:split_idx])
    assert_allclose(raw_cropped_left.annotations.onset,
                    raw.annotations.onset[:split_idx])
    raw_cropped_right = raw.copy().crop(split_time, None)
    assert_array_equal(raw_cropped_right.annotations.description,
                       raw.annotations.description[split_idx:])
    assert_allclose(raw_cropped_right.annotations.duration,
                    raw.annotations.duration[split_idx:])
    assert_allclose(raw_cropped_right.annotations.onset,
                    raw.annotations.onset[split_idx:])
    raw_concat = mne.concatenate_raws([raw_cropped_left, raw_cropped_right],
                                      verbose='debug')
    assert_allclose(raw_concat.times, raw.times)
    assert_allclose(raw_concat[:][0], raw[:][0], atol=1e-20)
    assert_and_remove_boundary_annot(raw_concat)
    # Ensure we annotations survive round-trip crop->concat
    assert_array_equal(raw_concat.annotations.description,
                       raw.annotations.description)
    for attr in ('onset', 'duration'):
        assert_allclose(getattr(raw_concat.annotations, attr),
                        getattr(raw.annotations, attr),
                        err_msg='Failed for %s:' % (attr,))

    raw.set_annotations(None)  # undo

    # Test concatenating annotations with and without orig_time.
    raw2 = raw.copy()
    raw.set_annotations(Annotations([45.], [3], 'test', raw.info['meas_date']))
    raw2.set_annotations(Annotations([2.], [3], 'BAD', None))
    expected_onset = [45., 2. + raw._last_time]
    raw = concatenate_raws([raw, raw2])
    assert_and_remove_boundary_annot(raw)
    assert_array_almost_equal(raw.annotations.onset, expected_onset, decimal=2)

    # Test IO
    tempdir = _TempDir()
    fname = op.join(tempdir, 'test-annot.fif')
    raw.annotations.save(fname)
    annot_read = read_annotations(fname)
    for attr in ('onset', 'duration'):
        assert_allclose(getattr(annot_read, attr),
                        getattr(raw.annotations, attr))
    assert annot_read.orig_time == raw.annotations.orig_time
    assert_array_equal(annot_read.description, raw.annotations.description)
    annot = Annotations((), (), ())
    annot.save(fname)
    pytest.raises(IOError, read_annotations, fif_fname)  # none in old raw
    annot = read_annotations(fname)
    assert isinstance(annot, Annotations)
    assert len(annot) == 0
    annot.crop()  # test if cropping empty annotations doesn't raise an error
    # Test that empty annotations can be saved with an object
    fname = op.join(tempdir, 'test_raw.fif')
    raw.set_annotations(annot)
    raw.save(fname)
    raw_read = read_raw_fif(fname)
    assert isinstance(raw_read.annotations, Annotations)
    assert len(raw_read.annotations) == 0
    raw.set_annotations(None)
    raw.save(fname, overwrite=True)
    raw_read = read_raw_fif(fname)
    assert raw_read.annotations is not None  # XXX to be fixed in #5416
    assert len(raw_read.annotations.onset) == 0  # XXX to be fixed in #5416
Esempio n. 49
0
def test_io_forward():
    """Test IO for forward solutions
    """
    temp_dir = _TempDir()
    # do extensive tests with MEEG + grad
    n_channels, n_src = 366, 108
    fwd = read_forward_solution(fname_meeg_grad)
    assert_true(isinstance(fwd, Forward))
    fwd = read_forward_solution(fname_meeg_grad)
    fwd = convert_forward_solution(fwd, surf_ori=True)
    leadfield = fwd['sol']['data']
    assert_equal(leadfield.shape, (n_channels, n_src))
    assert_equal(len(fwd['sol']['row_names']), n_channels)
    fname_temp = op.join(temp_dir, 'test-fwd.fif')
    write_forward_solution(fname_temp, fwd, overwrite=True)

    fwd = read_forward_solution(fname_meeg_grad)
    fwd = convert_forward_solution(fwd, surf_ori=True)
    fwd_read = read_forward_solution(fname_temp)
    fwd_read = convert_forward_solution(fwd_read, surf_ori=True)
    leadfield = fwd_read['sol']['data']
    assert_equal(leadfield.shape, (n_channels, n_src))
    assert_equal(len(fwd_read['sol']['row_names']), n_channels)
    assert_equal(len(fwd_read['info']['chs']), n_channels)
    assert_true('dev_head_t' in fwd_read['info'])
    assert_true('mri_head_t' in fwd_read)
    assert_array_almost_equal(fwd['sol']['data'], fwd_read['sol']['data'])

    fwd = read_forward_solution(fname_meeg)
    fwd = convert_forward_solution(fwd, surf_ori=True, force_fixed=True,
                                   use_cps=False)
    write_forward_solution(fname_temp, fwd, overwrite=True)
    fwd_read = read_forward_solution(fname_temp)
    fwd_read = convert_forward_solution(fwd_read, surf_ori=True,
                                        force_fixed=True, use_cps=False)
    assert_true(repr(fwd_read))
    assert_true(isinstance(fwd_read, Forward))
    assert_true(is_fixed_orient(fwd_read))
    compare_forwards(fwd, fwd_read)

    fwd = convert_forward_solution(fwd, surf_ori=True, force_fixed=True,
                                   use_cps=True)
    leadfield = fwd['sol']['data']
    assert_equal(leadfield.shape, (n_channels, 1494 / 3))
    assert_equal(len(fwd['sol']['row_names']), n_channels)
    assert_equal(len(fwd['info']['chs']), n_channels)
    assert_true('dev_head_t' in fwd['info'])
    assert_true('mri_head_t' in fwd)
    assert_true(fwd['surf_ori'])
    write_forward_solution(fname_temp, fwd, overwrite=True)
    fwd_read = read_forward_solution(fname_temp)
    fwd_read = convert_forward_solution(fwd_read, surf_ori=True,
                                        force_fixed=True, use_cps=True)
    assert_true(repr(fwd_read))
    assert_true(isinstance(fwd_read, Forward))
    assert_true(is_fixed_orient(fwd_read))
    compare_forwards(fwd, fwd_read)

    fwd = read_forward_solution(fname_meeg_grad)
    fwd = convert_forward_solution(fwd, surf_ori=True, force_fixed=True,
                                   use_cps=True)
    leadfield = fwd['sol']['data']
    assert_equal(leadfield.shape, (n_channels, n_src / 3))
    assert_equal(len(fwd['sol']['row_names']), n_channels)
    assert_equal(len(fwd['info']['chs']), n_channels)
    assert_true('dev_head_t' in fwd['info'])
    assert_true('mri_head_t' in fwd)
    assert_true(fwd['surf_ori'])
    write_forward_solution(fname_temp, fwd, overwrite=True)
    fwd_read = read_forward_solution(fname_temp)
    fwd_read = convert_forward_solution(fwd_read, surf_ori=True,
                                        force_fixed=True, use_cps=True)
    assert_true(repr(fwd_read))
    assert_true(isinstance(fwd_read, Forward))
    assert_true(is_fixed_orient(fwd_read))
    compare_forwards(fwd, fwd_read)

    # test warnings on bad filenames
    fwd = read_forward_solution(fname_meeg_grad)
    with warnings.catch_warnings(record=True) as w:
        warnings.simplefilter('always')
        fwd_badname = op.join(temp_dir, 'test-bad-name.fif.gz')
        write_forward_solution(fwd_badname, fwd)
        read_forward_solution(fwd_badname)
    assert_naming(w, 'test_forward.py', 2)

    fwd = read_forward_solution(fname_meeg)
    write_forward_solution(fname_temp, fwd, overwrite=True)
    fwd_read = read_forward_solution(fname_temp)
    compare_forwards(fwd, fwd_read)
Esempio n. 50
0
def test_restrict_forward_to_label():
    """Test restriction of source space to label
    """
    fwd = read_forward_solution(fname_meeg)
    fwd = convert_forward_solution(fwd, surf_ori=True, force_fixed=True,
                                   use_cps=True)
    fwd = pick_types_forward(fwd, meg=True)

    label_path = op.join(data_path, 'MEG', 'sample', 'labels')
    labels = ['Aud-lh', 'Vis-rh']
    label_lh = read_label(op.join(label_path, labels[0] + '.label'))
    label_rh = read_label(op.join(label_path, labels[1] + '.label'))

    fwd_out = restrict_forward_to_label(fwd, [label_lh, label_rh])

    src_sel_lh = np.intersect1d(fwd['src'][0]['vertno'], label_lh.vertices)
    src_sel_lh = np.searchsorted(fwd['src'][0]['vertno'], src_sel_lh)
    vertno_lh = fwd['src'][0]['vertno'][src_sel_lh]

    nuse_lh = fwd['src'][0]['nuse']
    src_sel_rh = np.intersect1d(fwd['src'][1]['vertno'], label_rh.vertices)
    src_sel_rh = np.searchsorted(fwd['src'][1]['vertno'], src_sel_rh)
    vertno_rh = fwd['src'][1]['vertno'][src_sel_rh]
    src_sel_rh += nuse_lh

    assert_equal(fwd_out['sol']['ncol'], len(src_sel_lh) + len(src_sel_rh))
    assert_equal(fwd_out['src'][0]['nuse'], len(src_sel_lh))
    assert_equal(fwd_out['src'][1]['nuse'], len(src_sel_rh))
    assert_equal(fwd_out['src'][0]['vertno'], vertno_lh)
    assert_equal(fwd_out['src'][1]['vertno'], vertno_rh)

    fwd = read_forward_solution(fname_meeg)
    fwd = pick_types_forward(fwd, meg=True)

    label_path = op.join(data_path, 'MEG', 'sample', 'labels')
    labels = ['Aud-lh', 'Vis-rh']
    label_lh = read_label(op.join(label_path, labels[0] + '.label'))
    label_rh = read_label(op.join(label_path, labels[1] + '.label'))

    fwd_out = restrict_forward_to_label(fwd, [label_lh, label_rh])

    src_sel_lh = np.intersect1d(fwd['src'][0]['vertno'], label_lh.vertices)
    src_sel_lh = np.searchsorted(fwd['src'][0]['vertno'], src_sel_lh)
    vertno_lh = fwd['src'][0]['vertno'][src_sel_lh]

    nuse_lh = fwd['src'][0]['nuse']
    src_sel_rh = np.intersect1d(fwd['src'][1]['vertno'], label_rh.vertices)
    src_sel_rh = np.searchsorted(fwd['src'][1]['vertno'], src_sel_rh)
    vertno_rh = fwd['src'][1]['vertno'][src_sel_rh]
    src_sel_rh += nuse_lh

    assert_equal(fwd_out['sol']['ncol'],
                 3 * (len(src_sel_lh) + len(src_sel_rh)))
    assert_equal(fwd_out['src'][0]['nuse'], len(src_sel_lh))
    assert_equal(fwd_out['src'][1]['nuse'], len(src_sel_rh))
    assert_equal(fwd_out['src'][0]['vertno'], vertno_lh)
    assert_equal(fwd_out['src'][1]['vertno'], vertno_rh)

    # Test saving the restricted forward object. This only works if all fields
    # are properly accounted for.
    temp_dir = _TempDir()
    fname_copy = op.join(temp_dir, 'copy-fwd.fif')
    write_forward_solution(fname_copy, fwd_out, overwrite=True)
    fwd_out_read = read_forward_solution(fname_copy)
    compare_forwards(fwd_out, fwd_out_read)
Esempio n. 51
0
def _test_raw_reader(reader, test_preloading=True, test_kwargs=True,
                     boundary_decimal=2, test_scaling=True, test_rank=True,
                     **kwargs):
    """Test reading, writing and slicing of raw classes.

    Parameters
    ----------
    reader : function
        Function to test.
    test_preloading : bool
        Whether not preloading is implemented for the reader. If True, both
        cases and memory mapping to file are tested.
    test_kwargs : dict
        Test _init_kwargs support.
    boundary_decimal : int
        Number of decimals up to which the boundary should match.
    **kwargs :
        Arguments for the reader. Note: Do not use preload as kwarg.
        Use ``test_preloading`` instead.

    Returns
    -------
    raw : instance of Raw
        A preloaded Raw object.
    """
    tempdir = _TempDir()
    rng = np.random.RandomState(0)
    montage = None
    if "montage" in kwargs:
        montage = kwargs['montage']
        del kwargs['montage']
    if test_preloading:
        raw = reader(preload=True, **kwargs)
        rep = repr(raw)
        assert rep.count('<') == 1
        assert rep.count('>') == 1
        if montage is not None:
            raw.set_montage(montage)
        # don't assume the first is preloaded
        buffer_fname = op.join(tempdir, 'buffer')
        picks = rng.permutation(np.arange(len(raw.ch_names) - 1))[:10]
        picks = np.append(picks, len(raw.ch_names) - 1)  # test trigger channel
        bnd = min(int(round(raw.buffer_size_sec *
                            raw.info['sfreq'])), raw.n_times)
        slices = [slice(0, bnd), slice(bnd - 1, bnd), slice(3, bnd),
                  slice(3, 300), slice(None), slice(1, bnd)]
        if raw.n_times >= 2 * bnd:  # at least two complete blocks
            slices += [slice(bnd, 2 * bnd), slice(bnd, bnd + 1),
                       slice(0, bnd + 100)]
        other_raws = [reader(preload=buffer_fname, **kwargs),
                      reader(preload=False, **kwargs)]
        for sl_time in slices:
            data1, times1 = raw[picks, sl_time]
            for other_raw in other_raws:
                data2, times2 = other_raw[picks, sl_time]
                assert_allclose(data1, data2)
                assert_allclose(times1, times2)

        # test projection vs cals and data units
        other_raw = reader(preload=False, **kwargs)
        other_raw.del_proj()
        eeg = meg = fnirs = False
        if 'eeg' in raw:
            eeg, atol = True, 1e-18
        elif 'grad' in raw:
            meg, atol = 'grad', 1e-24
        elif 'mag' in raw:
            meg, atol = 'mag', 1e-24
        else:
            assert 'fnirs_cw_amplitude' in raw, 'New channel type necessary?'
            fnirs, atol = 'fnirs_cw_amplitude', 1e-10
        picks = pick_types(
            other_raw.info, meg=meg, eeg=eeg, fnirs=fnirs)
        col_names = [other_raw.ch_names[pick] for pick in picks]
        proj = np.ones((1, len(picks)))
        proj /= proj.shape[1]
        proj = Projection(
            data=dict(data=proj, nrow=1, row_names=None,
                      col_names=col_names, ncol=len(picks)),
            active=False)
        assert len(other_raw.info['projs']) == 0
        other_raw.add_proj(proj)
        assert len(other_raw.info['projs']) == 1
        # Orders of projector application, data loading, and reordering
        # equivalent:
        # 1. load->apply->get
        data_load_apply_get = \
            other_raw.copy().load_data().apply_proj().get_data(picks)
        # 2. apply->get (and don't allow apply->pick)
        apply = other_raw.copy().apply_proj()
        data_apply_get = apply.get_data(picks)
        data_apply_get_0 = apply.get_data(picks[0])[0]
        with pytest.raises(RuntimeError, match='loaded'):
            apply.copy().pick(picks[0]).get_data()
        # 3. apply->load->get
        data_apply_load_get = apply.copy().load_data().get_data(picks)
        data_apply_load_get_0, data_apply_load_get_1 = \
            apply.copy().load_data().pick(picks[:2]).get_data()
        # 4. reorder->apply->load->get
        all_picks = np.arange(len(other_raw.ch_names))
        reord = np.concatenate((
            picks[1::2],
            picks[0::2],
            np.setdiff1d(all_picks, picks)))
        rev = np.argsort(reord)
        assert_array_equal(reord[rev], all_picks)
        assert_array_equal(rev[reord], all_picks)
        reorder = other_raw.copy().pick(reord)
        assert reorder.ch_names == [other_raw.ch_names[r] for r in reord]
        assert reorder.ch_names[0] == other_raw.ch_names[picks[1]]
        assert_allclose(reorder.get_data([0]), other_raw.get_data(picks[1]))
        reorder_apply = reorder.copy().apply_proj()
        assert reorder_apply.ch_names == reorder.ch_names
        assert reorder_apply.ch_names[0] == apply.ch_names[picks[1]]
        assert_allclose(reorder_apply.get_data([0]), apply.get_data(picks[1]),
                        atol=1e-18)
        data_reorder_apply_load_get = \
            reorder_apply.load_data().get_data(rev[:len(picks)])
        data_reorder_apply_load_get_1 = \
            reorder_apply.copy().load_data().pick([0]).get_data()[0]
        assert reorder_apply.ch_names[0] == apply.ch_names[picks[1]]
        assert (data_load_apply_get.shape ==
                data_apply_get.shape ==
                data_apply_load_get.shape ==
                data_reorder_apply_load_get.shape)
        del apply
        # first check that our data are (probably) in the right units
        data = data_load_apply_get.copy()
        data = data - np.mean(data, axis=1, keepdims=True)  # can be offsets
        np.abs(data, out=data)
        if test_scaling:
            maxval = atol * 1e16
            assert_array_less(data, maxval)
            minval = atol * 1e6
            assert_array_less(minval, np.median(data))
        else:
            atol = 1e-7 * np.median(data)  # 1e-7 * MAD
        # ranks should all be reduced by 1
        if test_rank == 'less':
            cmp = np.less
        else:
            cmp = np.equal
        rank_load_apply_get = np.linalg.matrix_rank(data_load_apply_get)
        rank_apply_get = np.linalg.matrix_rank(data_apply_get)
        rank_apply_load_get = np.linalg.matrix_rank(data_apply_load_get)
        rank_apply_load_get = np.linalg.matrix_rank(data_apply_load_get)
        assert cmp(rank_load_apply_get, len(col_names) - 1)
        assert cmp(rank_apply_get, len(col_names) - 1)
        assert cmp(rank_apply_load_get, len(col_names) - 1)
        # and they should all match
        t_kw = dict(
            atol=atol, err_msg='before != after, likely _mult_cal_one prob')
        assert_allclose(data_apply_get[0], data_apply_get_0, **t_kw)
        assert_allclose(data_apply_load_get_1,
                        data_reorder_apply_load_get_1, **t_kw)
        assert_allclose(data_load_apply_get[0], data_apply_load_get_0, **t_kw)
        assert_allclose(data_load_apply_get, data_apply_get, **t_kw)
        assert_allclose(data_load_apply_get, data_apply_load_get, **t_kw)
        if 'eeg' in raw:
            other_raw.del_proj()
            direct = \
                other_raw.copy().load_data().set_eeg_reference().get_data()
            other_raw.set_eeg_reference(projection=True)
            assert len(other_raw.info['projs']) == 1
            this_proj = other_raw.info['projs'][0]['data']
            assert this_proj['col_names'] == col_names
            assert this_proj['data'].shape == proj['data']['data'].shape
            assert_allclose(this_proj['data'], proj['data']['data'])
            proj = other_raw.apply_proj().get_data()
            assert_allclose(proj[picks], data_load_apply_get, atol=1e-10)
            assert_allclose(proj, direct, atol=1e-10, err_msg=t_kw['err_msg'])
    else:
        raw = reader(**kwargs)
    assert_named_constants(raw.info)

    full_data = raw._data
    assert raw.__class__.__name__ in repr(raw)  # to test repr
    assert raw.info.__class__.__name__ in repr(raw.info)
    assert isinstance(raw.info['dig'], (type(None), list))
    data_max = full_data.max()
    data_min = full_data.min()
    # these limits could be relaxed if we actually find data with
    # huge values (in SI units)
    assert data_max < 1e5
    assert data_min > -1e5
    if isinstance(raw.info['dig'], list):
        for di, d in enumerate(raw.info['dig']):
            assert isinstance(d, DigPoint), (di, d)

    # gh-5604
    meas_date = raw.info['meas_date']
    assert meas_date is None or meas_date >= _stamp_to_dt((0, 0))

    # test resetting raw
    if test_kwargs:
        raw2 = reader(**raw._init_kwargs)
        assert set(raw.info.keys()) == set(raw2.info.keys())
        assert_array_equal(raw.times, raw2.times)

    # Test saving and reading
    out_fname = op.join(tempdir, 'test_raw.fif')
    raw = concatenate_raws([raw])
    raw.save(out_fname, tmax=raw.times[-1], overwrite=True, buffer_size_sec=1)

    # Test saving with not correct extension
    out_fname_h5 = op.join(tempdir, 'test_raw.h5')
    with pytest.raises(IOError, match='raw must end with .fif or .fif.gz'):
        raw.save(out_fname_h5)

    raw3 = read_raw_fif(out_fname)
    assert_named_constants(raw3.info)
    assert set(raw.info.keys()) == set(raw3.info.keys())
    assert_allclose(raw3[0:20][0], full_data[0:20], rtol=1e-6,
                    atol=1e-20)  # atol is very small but > 0
    assert_array_almost_equal(raw.times, raw3.times)

    assert not math.isnan(raw3.info['highpass'])
    assert not math.isnan(raw3.info['lowpass'])
    assert not math.isnan(raw.info['highpass'])
    assert not math.isnan(raw.info['lowpass'])

    assert raw3.info['kit_system_id'] == raw.info['kit_system_id']

    # Make sure concatenation works
    first_samp = raw.first_samp
    last_samp = raw.last_samp
    concat_raw = concatenate_raws([raw.copy(), raw])
    assert concat_raw.n_times == 2 * raw.n_times
    assert concat_raw.first_samp == first_samp
    assert concat_raw.last_samp - last_samp + first_samp == last_samp + 1
    idx = np.where(concat_raw.annotations.description == 'BAD boundary')[0]

    expected_bad_boundary_onset = raw._last_time

    assert_array_almost_equal(concat_raw.annotations.onset[idx],
                              expected_bad_boundary_onset,
                              decimal=boundary_decimal)

    if raw.info['meas_id'] is not None:
        for key in ['secs', 'usecs', 'version']:
            assert raw.info['meas_id'][key] == raw3.info['meas_id'][key]
        assert_array_equal(raw.info['meas_id']['machid'],
                           raw3.info['meas_id']['machid'])

    assert isinstance(raw.annotations, Annotations)

    # Make a "soft" test on units: They have to be valid SI units as in
    # mne.io.meas_info.valid_units, but we accept any lower/upper case for now.
    valid_units = _get_valid_units()
    valid_units_lower = [unit.lower() for unit in valid_units]
    if raw._orig_units is not None:
        assert isinstance(raw._orig_units, dict)
        for ch_name, unit in raw._orig_units.items():
            assert unit.lower() in valid_units_lower, ch_name

    # Test picking with and without preload
    if test_preloading:
        preload_kwargs = (dict(preload=True), dict(preload=False))
    else:
        preload_kwargs = (dict(),)
    n_ch = len(raw.ch_names)
    picks = rng.permutation(n_ch)
    for preload_kwarg in preload_kwargs:
        these_kwargs = kwargs.copy()
        these_kwargs.update(preload_kwarg)
        # don't use the same filename or it could create problems
        if isinstance(these_kwargs.get('preload', None), str) and \
                op.isfile(these_kwargs['preload']):
            these_kwargs['preload'] += '-1'
        whole_raw = reader(**these_kwargs)
        print(whole_raw)  # __repr__
        assert n_ch >= 2
        picks_1 = picks[:n_ch // 2]
        picks_2 = picks[n_ch // 2:]
        raw_1 = whole_raw.copy().pick(picks_1)
        raw_2 = whole_raw.copy().pick(picks_2)
        data, times = whole_raw[:]
        data_1, times_1 = raw_1[:]
        data_2, times_2 = raw_2[:]
        assert_array_equal(times, times_1)
        assert_array_equal(data[picks_1], data_1)
        assert_array_equal(times, times_2,)
        assert_array_equal(data[picks_2], data_2)

    # Make sure that writing info to h5 format
    # (all fields should be compatible)
    if check_version('h5py'):
        fname_h5 = op.join(tempdir, 'info.h5')
        with _writing_info_hdf5(raw.info):
            write_hdf5(fname_h5, raw.info)
        new_info = Info(read_hdf5(fname_h5))
        assert object_diff(new_info, raw.info) == ''

    # Make sure that changing directory does not break anything
    if test_preloading:
        these_kwargs = kwargs.copy()
        key = None
        for key in ('fname',
                    'input_fname',  # artemis123
                    'vhdr_fname',  # BV
                    'pdf_fname',  # BTi
                    'directory',  # CTF
                    'filename',  # nedf
                    ):
            try:
                fname = kwargs[key]
            except KeyError:
                key = None
            else:
                break
        # len(kwargs) == 0 for the fake arange reader
        if len(kwargs):
            assert key is not None, sorted(kwargs.keys())
            dirname = op.dirname(fname)
            these_kwargs[key] = op.basename(fname)
            these_kwargs['preload'] = False
            orig_dir = os.getcwd()
            try:
                os.chdir(dirname)
                raw_chdir = reader(**these_kwargs)
            finally:
                os.chdir(orig_dir)
            raw_chdir.load_data()

    return raw
Esempio n. 52
0
def test_dipole_fitting():
    """Test dipole fitting."""
    amp = 10e-9
    tempdir = _TempDir()
    rng = np.random.RandomState(0)
    fname_dtemp = op.join(tempdir, 'test.dip')
    fname_sim = op.join(tempdir, 'test-ave.fif')
    fwd = convert_forward_solution(read_forward_solution(fname_fwd),
                                   surf_ori=False,
                                   force_fixed=True)
    evoked = read_evokeds(fname_evo)[0]
    cov = read_cov(fname_cov)
    n_per_hemi = 5
    vertices = [
        np.sort(rng.permutation(s['vertno'])[:n_per_hemi]) for s in fwd['src']
    ]
    nv = sum(len(v) for v in vertices)
    stc = SourceEstimate(amp * np.eye(nv), vertices, 0, 0.001)
    evoked = simulate_evoked(fwd,
                             stc,
                             evoked.info,
                             cov,
                             nave=evoked.nave,
                             random_state=rng)
    # For speed, let's use a subset of channels (strange but works)
    picks = np.sort(
        np.concatenate([
            pick_types(evoked.info, meg=True, eeg=False)[::2],
            pick_types(evoked.info, meg=False, eeg=True)[::2]
        ]))
    evoked.pick_channels([evoked.ch_names[p] for p in picks])
    evoked.add_proj(make_eeg_average_ref_proj(evoked.info))
    write_evokeds(fname_sim, evoked)

    # Run MNE-C version
    run_subprocess([
        'mne_dipole_fit',
        '--meas',
        fname_sim,
        '--meg',
        '--eeg',
        '--noise',
        fname_cov,
        '--dip',
        fname_dtemp,
        '--mri',
        fname_fwd,
        '--reg',
        '0',
        '--tmin',
        '0',
    ])
    dip_c = read_dipole(fname_dtemp)

    # Run mne-python version
    sphere = make_sphere_model(head_radius=0.1)
    dip, residuals = fit_dipole(evoked, fname_cov, sphere, fname_fwd)

    # Sanity check: do our residuals have less power than orig data?
    data_rms = np.sqrt(np.sum(evoked.data**2, axis=0))
    resi_rms = np.sqrt(np.sum(residuals**2, axis=0))
    factor = 1.
    # XXX weird, inexplicable differenc for 3.5 build we'll assume is due to
    # Anaconda bug for now...
    if os.getenv('TRAVIS', 'false') == 'true' and \
            sys.version[:3] in ('3.5', '2.7'):
        factor = 0.8
    assert_true((data_rms > factor * resi_rms).all(),
                msg='%s (factor: %s)' % ((data_rms / resi_rms).min(), factor))

    # Compare to original points
    transform_surface_to(fwd['src'][0], 'head', fwd['mri_head_t'])
    transform_surface_to(fwd['src'][1], 'head', fwd['mri_head_t'])
    src_rr = np.concatenate([s['rr'][v] for s, v in zip(fwd['src'], vertices)],
                            axis=0)
    src_nn = np.concatenate([s['nn'][v] for s, v in zip(fwd['src'], vertices)],
                            axis=0)

    # MNE-C skips the last "time" point :(
    out = dip.crop(dip_c.times[0], dip_c.times[-1])
    assert_true(dip is out)
    src_rr, src_nn = src_rr[:-1], src_nn[:-1]

    # check that we did at least as well
    corrs, dists, gc_dists, amp_errs, gofs = [], [], [], [], []
    for d in (dip_c, dip):
        new = d.pos
        diffs = new - src_rr
        corrs += [np.corrcoef(src_rr.ravel(), new.ravel())[0, 1]]
        dists += [np.sqrt(np.mean(np.sum(diffs * diffs, axis=1)))]
        gc_dists += [
            180 / np.pi * np.mean(np.arccos(np.sum(src_nn * d.ori, axis=1)))
        ]
        amp_errs += [np.sqrt(np.mean((amp - d.amplitude)**2))]
        gofs += [np.mean(d.gof)]
    assert_true(dists[0] >= dists[1] * factor, 'dists: %s' % dists)
    assert_true(corrs[0] <= corrs[1] / factor, 'corrs: %s' % corrs)
    assert_true(gc_dists[0] >= gc_dists[1] * factor,
                'gc-dists (ori): %s' % gc_dists)
    assert_true(amp_errs[0] >= amp_errs[1] * factor,
                'amplitude errors: %s' % amp_errs)
    assert_true(gofs[0] <= gofs[1] / factor, 'gof: %s' % gofs)
Esempio n. 53
0
def test_coreg_model():
    """Test CoregModel."""
    from mne.gui._coreg_gui import CoregModel
    tempdir = _TempDir()
    trans_dst = op.join(tempdir, 'test-trans.fif')

    model = CoregModel()
    pytest.raises(RuntimeError, model.save_trans, 'blah.fif')

    model.mri.subjects_dir = subjects_dir
    model.mri.subject = 'sample'

    assert not model.mri.fid_ok
    model.mri.lpa = [[-0.06, 0, 0]]
    model.mri.nasion = [[0, 0.05, 0]]
    model.mri.rpa = [[0.08, 0, 0]]
    assert (model.mri.fid_ok)

    model.hsp.file = raw_path
    assert_allclose(model.hsp.lpa, [[-7.137e-2, 0, 5.122e-9]], 1e-4)
    assert_allclose(model.hsp.rpa, [[+7.527e-2, 0, 5.588e-9]], 1e-4)
    assert_allclose(model.hsp.nasion, [[+3.725e-9, 1.026e-1, 4.191e-9]], 1e-4)
    assert model.has_lpa_data
    assert model.has_nasion_data
    assert model.has_rpa_data
    assert len(model.hsp.eeg_points) > 1

    assert len(model.mri.bem_low_res.surf.rr) == 2562
    assert len(model.mri.bem_high_res.surf.rr) == 267122

    lpa_distance = model.lpa_distance
    nasion_distance = model.nasion_distance
    rpa_distance = model.rpa_distance
    avg_point_distance = np.mean(model.point_distance)

    model.nasion_weight = 1.
    model.fit_fiducials(0)
    old_x = lpa_distance**2 + rpa_distance**2 + nasion_distance**2
    new_x = (model.lpa_distance**2 + model.rpa_distance**2 +
             model.nasion_distance**2)
    assert new_x < old_x

    model.fit_icp(0)
    new_dist = np.mean(model.point_distance)
    assert new_dist < avg_point_distance

    model.save_trans(trans_dst)
    trans = mne.read_trans(trans_dst)
    assert_allclose(trans['trans'], model.head_mri_t)

    # test restoring trans
    x, y, z = 100, 200, 50
    rot_x, rot_y, rot_z = np.rad2deg([1.5, 0.1, -1.2])
    model.trans_x = x
    model.trans_y = y
    model.trans_z = z
    model.rot_x = rot_x
    model.rot_y = rot_y
    model.rot_z = rot_z
    trans = model.mri_head_t
    model.reset_traits(
        ["trans_x", "trans_y", "trans_z", "rot_x", "rot_y", "rot_z"])
    assert_equal(model.trans_x, 0)
    model.set_trans(trans)
    assert_array_almost_equal(model.trans_x, x)
    assert_array_almost_equal(model.trans_y, y)
    assert_array_almost_equal(model.trans_z, z)
    assert_array_almost_equal(model.rot_x, rot_x)
    assert_array_almost_equal(model.rot_y, rot_y)
    assert_array_almost_equal(model.rot_z, rot_z)

    # info
    assert (isinstance(model.fid_eval_str, string_types))
    assert (isinstance(model.points_eval_str, string_types))

    # scaling job
    assert not model.can_prepare_bem_model
    model.n_scale_params = 1
    assert (model.can_prepare_bem_model)
    model.prepare_bem_model = True
    sdir, sfrom, sto, scale, skip_fiducials, labels, annot, bemsol = \
        model.get_scaling_job('sample2', False)
    assert_equal(sdir, subjects_dir)
    assert_equal(sfrom, 'sample')
    assert_equal(sto, 'sample2')
    assert_allclose(scale, model.parameters[6:9])
    assert_equal(skip_fiducials, False)
    # find BEM files
    bems = set()
    for fname in os.listdir(op.join(subjects_dir, 'sample', 'bem')):
        match = re.match(r'sample-(.+-bem)\.fif', fname)
        if match:
            bems.add(match.group(1))
    assert_equal(set(bemsol), bems)
    model.prepare_bem_model = False
    sdir, sfrom, sto, scale, skip_fiducials, labels, annot, bemsol = \
        model.get_scaling_job('sample2', True)
    assert_equal(bemsol, [])
    assert (skip_fiducials)

    model.load_trans(fname_trans)
    model.save_trans(trans_dst)
    trans = mne.read_trans(trans_dst)
    assert_allclose(trans['trans'], model.head_mri_t)
    assert_allclose(
        invert_transform(trans)['trans'][:3, 3] * 1000.,
        [model.trans_x, model.trans_y, model.trans_z])
Esempio n. 54
0
def test_handle_eeg_coords_reading():
    """Test reading iEEG coordinates from BIDS files."""
    bids_root = _TempDir()

    bids_path = BIDSPath(subject=subject_id,
                         session=session_id,
                         run=run,
                         acquisition=acq,
                         task=task,
                         root=bids_root)

    data_path = op.join(testing.data_path(), 'EDF')
    raw_fname = op.join(data_path, 'test_reduced.edf')
    raw = _read_raw_edf(raw_fname)

    # ensure we are writing 'eeg' data
    raw.set_channel_types({ch: 'eeg' for ch in raw.ch_names})

    # set a `random` montage
    ch_names = raw.ch_names
    elec_locs = np.random.random((len(ch_names), 3)).astype(float)
    ch_pos = dict(zip(ch_names, elec_locs))

    # # create montage in 'unknown' coordinate frame
    # # and assert coordsystem/electrodes sidecar tsv don't exist
    montage = mne.channels.make_dig_montage(ch_pos=ch_pos,
                                            coord_frame="unknown")
    raw.set_montage(montage)
    with pytest.warns(RuntimeWarning, match="Skipping EEG electrodes.tsv"):
        write_raw_bids(raw, bids_path, overwrite=True)

    bids_path.update(root=bids_root)
    coordsystem_fname = _find_matching_sidecar(bids_path,
                                               suffix='coordsystem',
                                               extension='.json',
                                               on_error='warn')
    electrodes_fname = _find_matching_sidecar(bids_path,
                                              suffix='electrodes',
                                              extension='.tsv',
                                              on_error='warn')
    assert coordsystem_fname is None
    assert electrodes_fname is None

    # create montage in head frame and set should result in
    # warning if landmarks not set
    montage = mne.channels.make_dig_montage(ch_pos=ch_pos, coord_frame="head")
    raw.set_montage(montage)
    with pytest.warns(RuntimeWarning,
                      match='Setting montage not possible '
                      'if anatomical landmarks'):
        write_raw_bids(raw, bids_path, overwrite=True)

    montage = mne.channels.make_dig_montage(ch_pos=ch_pos,
                                            coord_frame="head",
                                            nasion=[1, 0, 0],
                                            lpa=[0, 1, 0],
                                            rpa=[0, 0, 1])
    raw.set_montage(montage)
    write_raw_bids(raw, bids_path, overwrite=True)

    # obtain the sensor positions and assert ch_coords are same
    raw_test = read_raw_bids(bids_path, verbose=True)
    assert not object_diff(raw.info['chs'], raw_test.info['chs'])

    # modify coordinate frame to not-captrak
    coordsystem_fname = _find_matching_sidecar(bids_path,
                                               suffix='coordsystem',
                                               extension='.json')
    _update_sidecar(coordsystem_fname, 'EEGCoordinateSystem', 'besa')
    with pytest.warns(RuntimeWarning,
                      match='EEG Coordinate frame is not '
                      'accepted BIDS keyword'):
        raw_test = read_raw_bids(bids_path)
        assert raw_test.info['dig'] is None
Esempio n. 55
0
def test_kit2fiff_model():
    """Test CombineMarkersModel Traits Model."""
    from mne.gui._kit2fiff_gui import Kit2FiffModel, Kit2FiffPanel
    tempdir = _TempDir()
    tgt_fname = os.path.join(tempdir, 'test-raw.fif')

    model = Kit2FiffModel()
    assert_false(model.can_save)
    assert_equal(model.misc_chs_desc, "No SQD file selected...")
    assert_equal(model.stim_chs_comment, "")
    model.markers.mrk1.file = mrk_pre_path
    model.markers.mrk2.file = mrk_post_path
    model.sqd_file = sqd_path
    assert_equal(model.misc_chs_desc, "160:192")
    model.hsp_file = hsp_path
    assert_false(model.can_save)
    model.fid_file = fid_path
    assert_true(model.can_save)

    # events
    model.stim_slope = '+'
    assert_equal(model.get_event_info(), {1: 2})
    model.stim_slope = '-'
    assert_equal(model.get_event_info(), {254: 2, 255: 2})

    # stim channels
    model.stim_chs = "181:184, 186"
    assert_array_equal(model.stim_chs_array, [181, 182, 183, 186])
    assert_true(model.stim_chs_ok)
    assert_equal(model.get_event_info(), {})
    model.stim_chs = "181:184, bad"
    assert_false(model.stim_chs_ok)
    assert_false(model.can_save)
    model.stim_chs = ""
    assert_true(model.can_save)

    # export raw
    raw_out = model.get_raw()
    raw_out.save(tgt_fname)
    raw = read_raw_fif(tgt_fname, add_eeg_ref=False)

    # Compare exported raw with the original binary conversion
    raw_bin = read_raw_fif(fif_path, add_eeg_ref=False)
    trans_bin = raw.info['dev_head_t']['trans']
    want_keys = list(raw_bin.info.keys())
    assert_equal(sorted(want_keys), sorted(list(raw.info.keys())))
    trans_transform = raw_bin.info['dev_head_t']['trans']
    assert_allclose(trans_transform, trans_bin, 0.1)

    # Averaging markers
    model.markers.mrk3.method = "Average"
    trans_avg = model.dev_head_trans
    assert_false(np.all(trans_avg == trans_transform))
    assert_allclose(trans_avg, trans_bin, 0.1)

    # Test exclusion of one marker
    model.markers.mrk3.method = "Transform"
    model.use_mrk = [1, 2, 3, 4]
    assert_false(np.all(model.dev_head_trans == trans_transform))
    assert_false(np.all(model.dev_head_trans == trans_avg))
    assert_false(np.all(model.dev_head_trans == np.eye(4)))

    # test setting stim channels
    model.stim_slope = '+'
    events_bin = mne.find_events(raw_bin, stim_channel='STI 014')

    model.stim_coding = '<'
    raw = model.get_raw()
    events = mne.find_events(raw, stim_channel='STI 014')
    assert_array_equal(events, events_bin)

    events_rev = events_bin.copy()
    events_rev[:, 2] = 1
    model.stim_coding = '>'
    raw = model.get_raw()
    events = mne.find_events(raw, stim_channel='STI 014')
    assert_array_equal(events, events_rev)

    model.stim_coding = 'channel'
    model.stim_chs = "160:161"
    raw = model.get_raw()
    events = mne.find_events(raw, stim_channel='STI 014')
    assert_array_equal(events, events_bin + [0, 0, 32])

    # test reset
    model.clear_all()
    assert_equal(model.use_mrk, [0, 1, 2, 3, 4])
    assert_equal(model.sqd_file, "")

    os.environ['_MNE_GUI_TESTING_MODE'] = 'true'
    try:
        with warnings.catch_warnings(record=True):  # traits warnings
            warnings.simplefilter('always')
            Kit2FiffPanel()
    finally:
        del os.environ['_MNE_GUI_TESTING_MODE']
Esempio n. 56
0
def test_handle_ieeg_coords_reading(bids_path):
    """Test reading iEEG coordinates from BIDS files."""
    bids_root = _TempDir()

    data_path = op.join(testing.data_path(), 'EDF')
    raw_fname = op.join(data_path, 'test_reduced.edf')
    bids_fname = bids_path.copy().update(datatype='ieeg',
                                         suffix='ieeg',
                                         extension='.edf',
                                         root=bids_root)
    raw = _read_raw_edf(raw_fname)

    # ensure we are writing 'ecog'/'ieeg' data
    raw.set_channel_types({ch: 'ecog' for ch in raw.ch_names})

    # coordinate frames in mne-python should all map correctly
    # set a `random` montage
    ch_names = raw.ch_names
    elec_locs = np.random.random((len(ch_names), 3)).astype(float)
    ch_pos = dict(zip(ch_names, elec_locs))
    coordinate_frames = ['mri', 'ras']
    for coord_frame in coordinate_frames:
        # XXX: mne-bids doesn't support multiple electrodes.tsv files
        sh.rmtree(bids_root)
        montage = mne.channels.make_dig_montage(ch_pos=ch_pos,
                                                coord_frame=coord_frame)
        raw.set_montage(montage)
        write_raw_bids(raw, bids_fname, overwrite=True, verbose=False)
        # read in raw file w/ updated coordinate frame
        # and make sure all digpoints are correct coordinate frames
        raw_test = read_raw_bids(bids_path=bids_fname, verbose=False)
        coord_frame_int = MNE_STR_TO_FRAME[coord_frame]
        for digpoint in raw_test.info['dig']:
            assert digpoint['coord_frame'] == coord_frame_int

    # start w/ new bids root
    sh.rmtree(bids_root)
    write_raw_bids(raw, bids_fname, overwrite=True, verbose=False)

    # obtain the sensor positions and assert ch_coords are same
    raw_test = read_raw_bids(bids_path=bids_fname, verbose=False)
    orig_locs = raw.info['dig'][1]
    test_locs = raw_test.info['dig'][1]
    assert orig_locs == test_locs
    assert not object_diff(raw.info['chs'], raw_test.info['chs'])

    # read in the data and assert montage is the same
    # regardless of 'm', 'cm', 'mm', or 'pixel'
    scalings = {'m': 1, 'cm': 100, 'mm': 1000}
    bids_fname.update(root=bids_root)
    coordsystem_fname = _find_matching_sidecar(bids_fname,
                                               suffix='coordsystem',
                                               extension='.json')
    electrodes_fname = _find_matching_sidecar(bids_fname,
                                              suffix='electrodes',
                                              extension='.tsv')
    orig_electrodes_dict = _from_tsv(electrodes_fname,
                                     [str, float, float, float, str])

    # not BIDS specified should not be read
    coord_unit = 'km'
    scaling = 0.001
    _update_sidecar(coordsystem_fname, 'iEEGCoordinateUnits', coord_unit)
    electrodes_dict = _from_tsv(electrodes_fname,
                                [str, float, float, float, str])
    for axis in ['x', 'y', 'z']:
        electrodes_dict[axis] = \
            np.multiply(orig_electrodes_dict[axis], scaling)
    _to_tsv(electrodes_dict, electrodes_fname)
    with pytest.warns(RuntimeWarning,
                      match='Coordinate unit is not '
                      'an accepted BIDS unit'):
        raw_test = read_raw_bids(bids_path=bids_fname, verbose=False)

    # correct BIDS units should scale to meters properly
    for coord_unit, scaling in scalings.items():
        # update coordinate SI units
        _update_sidecar(coordsystem_fname, 'iEEGCoordinateUnits', coord_unit)
        electrodes_dict = _from_tsv(electrodes_fname,
                                    [str, float, float, float, str])
        for axis in ['x', 'y', 'z']:
            electrodes_dict[axis] = \
                np.multiply(orig_electrodes_dict[axis], scaling)
        _to_tsv(electrodes_dict, electrodes_fname)

        # read in raw file w/ updated montage
        raw_test = read_raw_bids(bids_path=bids_fname, verbose=False)

        # obtain the sensor positions and make sure they're the same
        assert_dig_allclose(raw.info, raw_test.info)

    # XXX: Improve by changing names to 'unknown' coordframe (needs mne PR)
    # check that coordinate systems other coordinate systems should be named
    # in the file and not the CoordinateSystem, which is reserved for keywords
    coordinate_frames = ['lia', 'ria', 'lip', 'rip', 'las']
    for coord_frame in coordinate_frames:
        # update coordinate units
        _update_sidecar(coordsystem_fname, 'iEEGCoordinateSystem', coord_frame)
        # read in raw file w/ updated coordinate frame
        # and make sure all digpoints are MRI coordinate frame
        with pytest.warns(RuntimeWarning,
                          match="iEEG Coordinate frame is "
                          "not accepted BIDS keyword"):
            raw_test = read_raw_bids(bids_path=bids_fname, verbose=False)
            assert raw_test.info['dig'] is None

    # ACPC should be read in as RAS for iEEG
    _update_sidecar(coordsystem_fname, 'iEEGCoordinateSystem', 'acpc')
    raw_test = read_raw_bids(bids_path=bids_fname, verbose=False)
    coord_frame_int = MNE_STR_TO_FRAME['ras']
    for digpoint in raw_test.info['dig']:
        assert digpoint['coord_frame'] == coord_frame_int

    # if we delete the coordsystem.json file, an error will be raised
    os.remove(coordsystem_fname)
    with pytest.raises(RuntimeError,
                       match='BIDS mandates that '
                       'the coordsystem.json'):
        raw = read_raw_bids(bids_path=bids_fname, verbose=False)

    # test error message if electrodes don't match
    bids_path.update(root=bids_root)
    write_raw_bids(raw, bids_path, overwrite=True)
    electrodes_dict = _from_tsv(electrodes_fname)
    # pop off 5 channels
    for key in electrodes_dict.keys():
        for i in range(5):
            electrodes_dict[key].pop()
    _to_tsv(electrodes_dict, electrodes_fname)
    with pytest.raises(RuntimeError, match='Channels do not correspond'):
        raw_test = read_raw_bids(bids_path=bids_fname, verbose=False)

    # make sure montage is set if there are coordinates w/ 'n/a'
    raw.info['bads'] = []
    write_raw_bids(raw, bids_path, overwrite=True, verbose=False)
    electrodes_dict = _from_tsv(electrodes_fname)
    for axis in ['x', 'y', 'z']:
        electrodes_dict[axis][0] = 'n/a'
        electrodes_dict[axis][3] = 'n/a'
    _to_tsv(electrodes_dict, electrodes_fname)

    # test if montage is correctly set via mne-bids
    # electrode coordinates should be nan
    # when coordinate is 'n/a'
    nan_chs = [electrodes_dict['name'][i] for i in [0, 3]]
    with pytest.warns(RuntimeWarning,
                      match='There are channels '
                      'without locations'):
        raw = read_raw_bids(bids_path=bids_fname, verbose=False)
        for idx, ch in enumerate(raw.info['chs']):
            if ch['ch_name'] in nan_chs:
                assert all(np.isnan(ch['loc'][:3]))
            else:
                assert not any(np.isnan(ch['loc'][:3]))
            assert ch['ch_name'] not in raw.info['bads']
Esempio n. 57
0
def test_coreg_model_with_fsaverage():
    """Test CoregModel with the fsaverage brain data."""
    tempdir = _TempDir()
    from mne.gui._coreg_gui import CoregModel

    mne.create_default_subject(subjects_dir=tempdir,
                               fs_home=op.join(subjects_dir, '..'))

    model = CoregModel()
    model.mri.subjects_dir = tempdir
    model.mri.subject = 'fsaverage'
    assert (model.mri.fid_ok)

    model.hsp.file = raw_path
    lpa_distance = model.lpa_distance
    nasion_distance = model.nasion_distance
    rpa_distance = model.rpa_distance
    avg_point_distance = np.mean(model.point_distance)

    # test hsp point omission
    model.nasion_weight = 1.
    model.trans_y = -0.008
    model.fit_fiducials(0)
    model.omit_hsp_points(0.02)
    assert model.hsp.n_omitted == 1
    model.omit_hsp_points(np.inf)
    assert model.hsp.n_omitted == 0
    model.omit_hsp_points(0.02)
    assert model.hsp.n_omitted == 1
    model.omit_hsp_points(0.01)
    assert model.hsp.n_omitted == 4
    model.omit_hsp_points(0.005)
    assert model.hsp.n_omitted == 40
    model.omit_hsp_points(0.01)
    assert model.hsp.n_omitted == 4
    model.omit_hsp_points(0.02)
    assert model.hsp.n_omitted == 1

    # scale with 1 parameter
    model.n_scale_params = 1
    model.fit_fiducials(1)
    old_x = lpa_distance**2 + rpa_distance**2 + nasion_distance**2
    new_x = (model.lpa_distance**2 + model.rpa_distance**2 +
             model.nasion_distance**2)
    assert (new_x < old_x)

    model.fit_icp(1)
    avg_point_distance_1param = np.mean(model.point_distance)
    assert (avg_point_distance_1param < avg_point_distance)

    # scaling job
    sdir, sfrom, sto, scale, skip_fiducials, labels, annot, bemsol = \
        model.get_scaling_job('scaled', False)
    assert_equal(sdir, tempdir)
    assert_equal(sfrom, 'fsaverage')
    assert_equal(sto, 'scaled')
    assert_allclose(scale, model.parameters[6:9])
    assert_equal(set(bemsol), set(('inner_skull-bem', )))
    model.prepare_bem_model = False
    sdir, sfrom, sto, scale, skip_fiducials, labels, annot, bemsol = \
        model.get_scaling_job('scaled', False)
    assert_equal(bemsol, [])

    # scale with 3 parameters
    model.n_scale_params = 3
    model.fit_icp(3)
    assert (np.mean(model.point_distance) < avg_point_distance_1param)

    # test switching raw disables point omission
    assert_equal(model.hsp.n_omitted, 1)
    model.hsp.file = kit_raw_path
    assert_equal(model.hsp.n_omitted, 0)
Esempio n. 58
0
def test_get_head_mri_trans():
    """Test getting a trans object from BIDS data."""
    import nibabel as nib

    event_id = {
        'Auditory/Left': 1,
        'Auditory/Right': 2,
        'Visual/Left': 3,
        'Visual/Right': 4,
        'Smiley': 5,
        'Button': 32
    }
    events_fname = op.join(data_path, 'MEG', 'sample',
                           'sample_audvis_trunc_raw-eve.fif')

    # Drop unknown events.
    events = mne.read_events(events_fname)
    events = events[events[:, 2] != 0]

    # Write it to BIDS
    raw = _read_raw_fif(raw_fname)
    bids_root = _TempDir()
    bids_path = _bids_path.copy().update(root=bids_root)
    write_raw_bids(raw,
                   bids_path,
                   events_data=events,
                   event_id=event_id,
                   overwrite=False)

    # We cannot recover trans, if no MRI has yet been written
    with pytest.raises(RuntimeError):
        estimated_trans = get_head_mri_trans(bids_path=bids_path)

    # Write some MRI data and supply a `trans` so that a sidecar gets written
    trans = mne.read_trans(raw_fname.replace('_raw.fif', '-trans.fif'))

    # Get the T1 weighted MRI data file ... test write_anat with a nibabel
    # image instead of a file path
    t1w_mgh = op.join(data_path, 'subjects', 'sample', 'mri', 'T1.mgz')
    t1w_mgh = nib.load(t1w_mgh)

    t1w_bidspath = BIDSPath(subject=subject_id,
                            session=session_id,
                            acquisition=acq,
                            root=bids_root)
    t1w_bidspath = write_anat(t1w_mgh,
                              bids_path=t1w_bidspath,
                              raw=raw,
                              trans=trans,
                              verbose=True)
    anat_dir = t1w_bidspath.directory

    # Try to get trans back through fitting points
    estimated_trans = get_head_mri_trans(bids_path=bids_path)

    assert trans['from'] == estimated_trans['from']
    assert trans['to'] == estimated_trans['to']
    assert_almost_equal(trans['trans'], estimated_trans['trans'])
    print(trans)
    print(estimated_trans)

    # provoke an error by introducing NaNs into MEG coords
    with pytest.raises(RuntimeError, match='AnatomicalLandmarkCoordinates'):
        raw.info['dig'][0]['r'] = np.ones(3) * np.nan
        sh.rmtree(anat_dir)
        bids_path = write_anat(t1w_mgh,
                               bids_path=t1w_bidspath,
                               raw=raw,
                               trans=trans,
                               verbose=True)
        estimated_trans = get_head_mri_trans(bids_path=bids_path)
Esempio n. 59
0
def test_coreg_gui():
    """Test CoregFrame."""
    _check_ci()
    home_dir = _TempDir()
    os.environ['_MNE_GUI_TESTING_MODE'] = 'true'
    os.environ['_MNE_FAKE_HOME_DIR'] = home_dir
    try:
        pytest.raises(ValueError,
                      mne.gui.coregistration,
                      subject='Elvis',
                      subjects_dir=subjects_dir)

        from pyface.api import GUI
        from tvtk.api import tvtk
        gui = GUI()

        # avoid modal dialog if SUBJECTS_DIR is set to a directory that
        # does not contain valid subjects
        ui, frame = mne.gui.coregistration(subjects_dir='')

        frame.model.mri.subjects_dir = subjects_dir
        frame.model.mri.subject = 'sample'

        assert not frame.model.mri.fid_ok
        frame.model.mri.lpa = [[-0.06, 0, 0]]
        frame.model.mri.nasion = [[0, 0.05, 0]]
        frame.model.mri.rpa = [[0.08, 0, 0]]
        assert (frame.model.mri.fid_ok)
        frame.data_panel.raw_src.file = raw_path
        assert isinstance(frame.eeg_obj.glyph.glyph.glyph_source.glyph_source,
                          tvtk.SphereSource)
        frame.data_panel.view_options_panel.eeg_obj.project_to_surface = True
        assert isinstance(frame.eeg_obj.glyph.glyph.glyph_source.glyph_source,
                          tvtk.CylinderSource)

        # grow hair (faster for low-res)
        assert frame.data_panel.view_options_panel.head_high_res
        frame.data_panel.view_options_panel.head_high_res = False
        frame.model.grow_hair = 40.

        # scale
        frame.coreg_panel.n_scale_params = 3
        frame.coreg_panel.scale_x_inc = True
        assert frame.model.scale_x == 101.
        frame.coreg_panel.scale_y_dec = True
        assert frame.model.scale_y == 99.

        # reset parameters
        frame.coreg_panel.reset_params = True
        assert_equal(frame.model.grow_hair, 0)
        assert not frame.data_panel.view_options_panel.head_high_res

        # configuration persistence
        assert (frame.model.prepare_bem_model)
        frame.model.prepare_bem_model = False
        frame.save_config(home_dir)
        ui.dispose()
        gui.process_events()

        ui, frame = mne.gui.coregistration(subjects_dir=subjects_dir)
        assert not frame.model.prepare_bem_model
        assert not frame.data_panel.view_options_panel.head_high_res
        ui.dispose()
        gui.process_events()
    finally:
        del os.environ['_MNE_GUI_TESTING_MODE']
        del os.environ['_MNE_FAKE_HOME_DIR']
Esempio n. 60
0
def test_read_ctf():
    """Test CTF reader."""
    temp_dir = _TempDir()
    out_fname = op.join(temp_dir, 'test_py_raw.fif')

    # Create a dummy .eeg file so we can test our reading/application of it
    os.mkdir(op.join(temp_dir, 'randpos'))
    ctf_eeg_fname = op.join(temp_dir, 'randpos', ctf_fname_catch)
    shutil.copytree(op.join(ctf_dir, ctf_fname_catch), ctf_eeg_fname)
    with pytest.warns(RuntimeWarning, match='RMSP .* changed to a MISC ch'):
        raw = _test_raw_reader(read_raw_ctf, directory=ctf_eeg_fname)
    picks = pick_types(raw.info, meg=False, eeg=True)
    pos = np.random.RandomState(42).randn(len(picks), 3)
    fake_eeg_fname = op.join(ctf_eeg_fname, 'catch-alp-good-f.eeg')
    # Create a bad file
    with open(fake_eeg_fname, 'wb') as fid:
        fid.write('foo\n'.encode('ascii'))
    pytest.raises(RuntimeError, read_raw_ctf, ctf_eeg_fname)
    # Create a good file
    with open(fake_eeg_fname, 'wb') as fid:
        for ii, ch_num in enumerate(picks):
            args = (str(ch_num + 1), raw.ch_names[ch_num],) + tuple(
                '%0.5f' % x for x in 100 * pos[ii])  # convert to cm
            fid.write(('\t'.join(args) + '\n').encode('ascii'))
    pos_read_old = np.array([raw.info['chs'][p]['loc'][:3] for p in picks])
    with pytest.warns(RuntimeWarning, match='RMSP .* changed to a MISC ch'):
        raw = read_raw_ctf(ctf_eeg_fname)  # read modified data
    pos_read = np.array([raw.info['chs'][p]['loc'][:3] for p in picks])
    assert_allclose(apply_trans(raw.info['ctf_head_t'], pos), pos_read,
                    rtol=1e-5, atol=1e-5)
    assert (pos_read == pos_read_old).mean() < 0.1
    shutil.copy(op.join(ctf_dir, 'catch-alp-good-f.ds_randpos_raw.fif'),
                op.join(temp_dir, 'randpos', 'catch-alp-good-f.ds_raw.fif'))

    # Create a version with no hc, starting out *with* EEG pos (error)
    os.mkdir(op.join(temp_dir, 'nohc'))
    ctf_no_hc_fname = op.join(temp_dir, 'no_hc', ctf_fname_catch)
    shutil.copytree(ctf_eeg_fname, ctf_no_hc_fname)
    remove_base = op.join(ctf_no_hc_fname, op.basename(ctf_fname_catch[:-3]))
    os.remove(remove_base + '.hc')
    with pytest.warns(RuntimeWarning, match='MISC channel'):
        pytest.raises(RuntimeError, read_raw_ctf, ctf_no_hc_fname)
    os.remove(remove_base + '.eeg')
    shutil.copy(op.join(ctf_dir, 'catch-alp-good-f.ds_nohc_raw.fif'),
                op.join(temp_dir, 'no_hc', 'catch-alp-good-f.ds_raw.fif'))

    # All our files
    use_fnames = [op.join(ctf_dir, c) for c in ctf_fnames]
    for fname in use_fnames:
        raw_c = read_raw_fif(fname + '_raw.fif', preload=True)
        with pytest.warns(None):  # sometimes matches "MISC channel"
            raw = read_raw_ctf(fname)

        # check info match
        assert_array_equal(raw.ch_names, raw_c.ch_names)
        assert_allclose(raw.times, raw_c.times)
        assert_allclose(raw._cals, raw_c._cals)
        assert_equal(raw.info['meas_id']['version'],
                     raw_c.info['meas_id']['version'])
        for t in ('dev_head_t', 'dev_ctf_t', 'ctf_head_t'):
            assert_allclose(raw.info[t]['trans'], raw_c.info[t]['trans'],
                            rtol=1e-4, atol=1e-7)
        for key in ('acq_pars', 'acq_stim', 'bads',
                    'ch_names', 'custom_ref_applied', 'description',
                    'events', 'experimenter', 'highpass', 'line_freq',
                    'lowpass', 'nchan', 'proj_id', 'proj_name',
                    'projs', 'sfreq', 'subject_info'):
            assert_equal(raw.info[key], raw_c.info[key], key)
        if op.basename(fname) not in single_trials:
            # We don't force buffer size to be smaller like MNE-C
            assert raw.buffer_size_sec == raw_c.buffer_size_sec
        assert_equal(len(raw.info['comps']), len(raw_c.info['comps']))
        for c1, c2 in zip(raw.info['comps'], raw_c.info['comps']):
            for key in ('colcals', 'rowcals'):
                assert_allclose(c1[key], c2[key])
            assert_equal(c1['save_calibrated'], c2['save_calibrated'])
            for key in ('row_names', 'col_names', 'nrow', 'ncol'):
                assert_array_equal(c1['data'][key], c2['data'][key])
            assert_allclose(c1['data']['data'], c2['data']['data'], atol=1e-7,
                            rtol=1e-5)
        assert_allclose(raw.info['hpi_results'][0]['coord_trans']['trans'],
                        raw_c.info['hpi_results'][0]['coord_trans']['trans'],
                        rtol=1e-5, atol=1e-7)
        assert_equal(len(raw.info['chs']), len(raw_c.info['chs']))
        for ii, (c1, c2) in enumerate(zip(raw.info['chs'], raw_c.info['chs'])):
            for key in ('kind', 'scanno', 'unit', 'ch_name', 'unit_mul',
                        'range', 'coord_frame', 'coil_type', 'logno'):
                if c1['ch_name'] == 'RMSP' and \
                        'catch-alp-good-f' in fname and \
                        key in ('kind', 'unit', 'coord_frame', 'coil_type',
                                'logno'):
                    continue  # XXX see below...
                assert_equal(c1[key], c2[key], err_msg=key)
            for key in ('cal',):
                assert_allclose(c1[key], c2[key], atol=1e-6, rtol=1e-4,
                                err_msg='raw.info["chs"][%d][%s]' % (ii, key))
            # XXX 2016/02/24: fixed bug with normal computation that used
            # to exist, once mne-C tools are updated we should update our FIF
            # conversion files, then the slices can go away (and the check
            # can be combined with that for "cal")
            for key in ('loc',):
                if c1['ch_name'] == 'RMSP' and 'catch-alp-good-f' in fname:
                    continue
                if (c2[key][:3] == 0.).all():
                    check = [np.nan] * 3
                else:
                    check = c2[key][:3]
                assert_allclose(c1[key][:3], check, atol=1e-6, rtol=1e-4,
                                err_msg='raw.info["chs"][%d][%s]' % (ii, key))
                if (c2[key][3:] == 0.).all():
                    check = [np.nan] * 3
                else:
                    check = c2[key][9:12]
                assert_allclose(c1[key][9:12], check, atol=1e-6, rtol=1e-4,
                                err_msg='raw.info["chs"][%d][%s]' % (ii, key))

        # Make sure all digitization points are in the MNE head coord frame
        for p in raw.info['dig']:
            assert_equal(p['coord_frame'], FIFF.FIFFV_COORD_HEAD,
                         err_msg='dig points must be in FIFF.FIFFV_COORD_HEAD')

        if fname.endswith('catch-alp-good-f.ds'):  # omit points from .pos file
            raw.info['dig'] = raw.info['dig'][:-10]

        # XXX: Next test would fail because c-tools assign the fiducials from
        # CTF data as HPI. Should eventually clarify/unify with Matti.
        # assert_dig_allclose(raw.info, raw_c.info)

        # check data match
        raw_c.save(out_fname, overwrite=True, buffer_size_sec=1.)
        raw_read = read_raw_fif(out_fname)

        # so let's check tricky cases based on sample boundaries
        rng = np.random.RandomState(0)
        pick_ch = rng.permutation(np.arange(len(raw.ch_names)))[:10]
        bnd = int(round(raw.info['sfreq'] * raw.buffer_size_sec))
        assert_equal(bnd, raw._raw_extras[0]['block_size'])
        assert_equal(bnd, block_sizes[op.basename(fname)])
        slices = (slice(0, bnd), slice(bnd - 1, bnd), slice(3, bnd),
                  slice(3, 300), slice(None))
        if len(raw.times) >= 2 * bnd:  # at least two complete blocks
            slices = slices + (slice(bnd, 2 * bnd), slice(bnd, bnd + 1),
                               slice(0, bnd + 100))
        for sl_time in slices:
            assert_allclose(raw[pick_ch, sl_time][0],
                            raw_c[pick_ch, sl_time][0])
            assert_allclose(raw_read[pick_ch, sl_time][0],
                            raw_c[pick_ch, sl_time][0])
        # all data / preload
        with pytest.warns(None):  # sometimes MISC
            raw = read_raw_ctf(fname, preload=True)
        assert_allclose(raw[:][0], raw_c[:][0], atol=1e-15)
        # test bad segment annotations
        if 'testdata_ctf_short.ds' in fname:
            assert 'bad' in raw.annotations.description[0]
            assert_allclose(raw.annotations.onset, [2.15])
            assert_allclose(raw.annotations.duration, [0.0225])

    pytest.raises(TypeError, read_raw_ctf, 1)
    pytest.raises(ValueError, read_raw_ctf, ctf_fname_continuous + 'foo.ds')
    # test ignoring of system clock
    read_raw_ctf(op.join(ctf_dir, ctf_fname_continuous), 'ignore')
    pytest.raises(ValueError, read_raw_ctf,
                  op.join(ctf_dir, ctf_fname_continuous), 'foo')