def test_volume_source_space():
    """Test setting up volume source spaces
    """
    fname_vol = op.join(data_path, 'subjects', 'sample', 'bem',
                        'volume-7mm-src.fif')
    src = read_source_spaces(fname_vol)
    temp_name = op.join(tempdir, 'temp-src.fif')
    try:
        # The one in the sample dataset (uses bem as bounds)
        src_new = setup_volume_source_space('sample', temp_name, pos=7.0,
                                            bem=fname_bem, mri=fname_mri,
                                            subjects_dir=subjects_dir)
        _compare_source_spaces(src, src_new, mode='approx')
        src_new = read_source_spaces(temp_name)
        _compare_source_spaces(src, src_new, mode='approx')

        # let's try the spherical one (no bem or surf supplied)
        run_subprocess(['mne_volume_source_space',
                        '--grid',  '15.0',
                        '--src', temp_name,
                        '--mri', fname_mri])
        src = read_source_spaces(temp_name)
        src_new = setup_volume_source_space('sample', temp_name, pos=15.0,
                                            mri=fname_mri,
                                            subjects_dir=subjects_dir)
        _compare_source_spaces(src, src_new, mode='approx')
    finally:
        if op.isfile(temp_name):
            os.remove(temp_name)
def test_volume_source_space():
    """Test setting up volume source spaces
    """
    fname_vol = op.join(data_path, "subjects", "sample", "bem", "volume-7mm-src.fif")
    src = read_source_spaces(fname_vol)
    temp_name = op.join(tempdir, "temp-src.fif")
    try:
        # The one in the sample dataset (uses bem as bounds)
        src_new = setup_volume_source_space(
            "sample", temp_name, pos=7.0, bem=fname_bem, mri=fname_mri, subjects_dir=subjects_dir
        )
        _compare_source_spaces(src, src_new, mode="approx")
        src_new = read_source_spaces(temp_name)
        _compare_source_spaces(src, src_new, mode="approx")

        # let's try the spherical one (no bem or surf supplied)
        run_subprocess(["mne_volume_source_space", "--grid", "15.0", "--src", temp_name, "--mri", fname_mri])
        src = read_source_spaces(temp_name)
        src_new = setup_volume_source_space("sample", temp_name, pos=15.0, mri=fname_mri, subjects_dir=subjects_dir)
        _compare_source_spaces(src, src_new, mode="approx")

        # now without MRI argument, it should give an error when we try
        # to read it
        run_subprocess(["mne_volume_source_space", "--grid", "15.0", "--src", temp_name])
        assert_raises(ValueError, read_source_spaces, temp_name)
    finally:
        if op.isfile(temp_name):
            os.remove(temp_name)
def test_volume_source_space():
    """Test setting up volume source spaces."""
    tempdir = _TempDir()
    src = read_source_spaces(fname_vol)
    temp_name = op.join(tempdir, 'temp-src.fif')
    surf = read_bem_surfaces(fname_bem, s_id=FIFF.FIFFV_BEM_SURF_ID_BRAIN)
    surf['rr'] *= 1e3  # convert to mm
    # The one in the testing dataset (uses bem as bounds)
    for bem, surf in zip((fname_bem, None), (None, surf)):
        src_new = setup_volume_source_space(
            'sample', pos=7.0, bem=bem, surface=surf, mri='T1.mgz',
            subjects_dir=subjects_dir)
        write_source_spaces(temp_name, src_new, overwrite=True)
        src[0]['subject_his_id'] = 'sample'  # XXX: to make comparison pass
        _compare_source_spaces(src, src_new, mode='approx')
        del src_new
        src_new = read_source_spaces(temp_name)
        _compare_source_spaces(src, src_new, mode='approx')
    pytest.raises(IOError, setup_volume_source_space, 'sample',
                  pos=7.0, bem=None, surface='foo',  # bad surf
                  mri=fname_mri, subjects_dir=subjects_dir)
    assert repr(src) == repr(src_new)
    assert src.kind == 'volume'
    # Spheres
    sphere = make_sphere_model(r0=(0., 0., 0.), head_radius=0.1,
                               relative_radii=(0.9, 1.0), sigmas=(0.33, 1.0))
    src = setup_volume_source_space(pos=10)
    src_new = setup_volume_source_space(pos=10, sphere=sphere)
    _compare_source_spaces(src, src_new, mode='exact')
    pytest.raises(ValueError, setup_volume_source_space, sphere='foo')
    # Need a radius
    sphere = make_sphere_model(head_radius=None)
    pytest.raises(ValueError, setup_volume_source_space, sphere=sphere)
def test_forward_mixed_source_space():
    """Test making the forward solution for a mixed source space
    """
    # get bem file
    fname_bem = op.join(subjects_dir, 'sample', 'bem',
                        'sample-5120-5120-5120-bem-sol.fif')
    # get the aseg file
    fname_aseg = op.join(subjects_dir, 'sample', 'mri', 'aseg.mgz')

    # get the surface source space
    surf = setup_source_space('sample', fname=None, spacing='ico2')

    # setup two volume source spaces
    label_names = get_volume_labels_from_aseg(fname_aseg)
    vol_labels = [label_names[int(np.random.rand() * len(label_names))]
                  for _ in range(2)]
    vol1 = setup_volume_source_space('sample', fname=None, pos=20.,
                                     mri=fname_aseg,
                                     volume_label=vol_labels[0])
    vol2 = setup_volume_source_space('sample', fname=None, pos=20.,
                                     mri=fname_aseg,
                                     volume_label=vol_labels[1])

    # merge surfaces and volume
    src = surf + vol1 + vol2

    # calculate forward solution
    fwd = make_forward_solution(fname_raw, mri=fname_mri, src=src,
                                bem=fname_bem, fname=None)

    # extract source spaces
    src_from_fwd = fwd['src']

    # get the coordinate frame of each source space
    coord_frames = np.array([s['coord_frame'] for s in src_from_fwd])

    # assert that all source spaces are in head coordinates
    assert_true((coord_frames == FIFF.FIFFV_COORD_HEAD).all())

    # run tests for SourceSpaces.export_volume
    fname_img = op.join(temp_dir, 'temp-image.mgz')

    # head coordinates and mri_resolution, but trans file
    assert_raises(ValueError, src_from_fwd.export_volume, fname_img,
                  mri_resolution=True, trans=None)

    # head coordinates and mri_resolution, but wrong trans file
    vox_mri_t = vol1[0]['vox_mri_t']
    assert_raises(RuntimeError, src_from_fwd.export_volume, fname_img,
                  mri_resolution=True, trans=vox_mri_t)
def test_discrete_source_space():
    """Test setting up (and reading/writing) discrete source spaces
    """
    src = read_source_spaces(fname)
    v = src[0]["vertno"]

    # let's make a discrete version with the C code, and with ours
    temp_name = op.join(tempdir, "temp-src.fif")
    try:
        # save
        temp_pos = op.join(tempdir, "temp-pos.txt")
        np.savetxt(temp_pos, np.c_[src[0]["rr"][v], src[0]["nn"][v]])
        # let's try the spherical one (no bem or surf supplied)
        run_subprocess(["mne_volume_source_space", "--meters", "--pos", temp_pos, "--src", temp_name])
        src_c = read_source_spaces(temp_name)
        pos_dict = dict(rr=src[0]["rr"][v], nn=src[0]["nn"][v])
        src_new = setup_volume_source_space("sample", None, pos=pos_dict, subjects_dir=subjects_dir)
        _compare_source_spaces(src_c, src_new, mode="approx")
        assert_allclose(src[0]["rr"][v], src_new[0]["rr"], rtol=1e-3, atol=1e-6)
        assert_allclose(src[0]["nn"][v], src_new[0]["nn"], rtol=1e-3, atol=1e-6)

        # now do writing
        write_source_spaces(temp_name, src_c)
        src_c2 = read_source_spaces(temp_name)
        _compare_source_spaces(src_c, src_c2)

        # now do MRI
        assert_raises(ValueError, setup_volume_source_space, "sample", pos=pos_dict, mri=fname_mri)
    finally:
        if op.isfile(temp_name):
            os.remove(temp_name)
def _mne_source_space(subject, src_tag, subjects_dir):
    """Load mne source space

    Parameters
    ----------
    subject : str
        Subejct
    src_tag : str
        Spacing (e.g., 'ico-4').
    """
    src_file = os.path.join(subjects_dir, subject, "bem", "%s-%s-src.fif" % (subject, src_tag))
    src, spacing = src_tag.split("-")
    if os.path.exists(src_file):
        return mne.read_source_spaces(src_file, False)
    elif src == "ico":
        return mne.setup_source_space(subject, src_file, src + spacing, subjects_dir=subjects_dir, add_dist=True)
    elif src == "vol":
        mri_file = os.path.join(subjects_dir, subject, "mri", "orig.mgz")
        bem_file = os.path.join(subjects_dir, subject, "bem", "sample-5120-5120-5120-bem-sol.fif")
        return mne.setup_volume_source_space(
            subject,
            src_file,
            float(spacing),
            mri=mri_file,
            bem=bem_file,
            mindist=0.0,
            exclude=0.0,
            subjects_dir=subjects_dir,
        )
    else:
        raise ValueError("src_tag=%s" % repr(src_tag))
Exemple #7
0
def test_simulate_raw_chpi():
    """Test simulation of raw data with cHPI"""
    with warnings.catch_warnings(record=True):  # MaxShield
        raw = Raw(raw_chpi_fname, allow_maxshield=True)
    sphere = make_sphere_model('auto', 'auto', raw.info)
    # make sparse spherical source space
    sphere_vol = tuple(sphere['r0'] * 1000.) + (sphere.radius * 1000.,)
    src = setup_volume_source_space('sample', sphere=sphere_vol, pos=70.)
    stc = _make_stc(raw, src)
    # simulate data with cHPI on
    raw_sim = simulate_raw(raw, stc, None, src, sphere, cov=None, chpi=False)
    raw_chpi = simulate_raw(raw, stc, None, src, sphere, cov=None, chpi=True)
    # XXX we need to test that the cHPI signals are actually in the correct
    # place, but that should be a subsequent enhancement (not trivial to do so)
    psd_sim, freqs_sim = compute_raw_psd(raw_sim)
    psd_chpi, freqs_chpi = compute_raw_psd(raw_chpi)
    assert_array_equal(freqs_sim, freqs_chpi)
    hpi_freqs = np.array([x['custom_ref'][0]
                          for x in raw.info['hpi_meas'][0]['hpi_coils']])
    freq_idx = np.sort([np.argmin(np.abs(freqs_sim - f)) for f in hpi_freqs])
    picks_meg = pick_types(raw.info, meg=True, eeg=False)
    picks_eeg = pick_types(raw.info, meg=False, eeg=True)
    assert_allclose(psd_sim[picks_eeg], psd_chpi[picks_eeg])
    assert_true((psd_chpi[picks_meg][:, freq_idx] >
                 100 * psd_sim[picks_meg][:, freq_idx]).all())
def test_read_volume_from_src():
    """Test reading volumes from a mixed source space."""
    aseg_fname = op.join(subjects_dir, 'sample', 'mri', 'aseg.mgz')
    labels_vol = ['Left-Amygdala',
                  'Brain-Stem',
                  'Right-Amygdala']

    src = read_source_spaces(fname)

    # Setup a volume source space
    vol_src = setup_volume_source_space('sample', mri=aseg_fname,
                                        pos=5.0,
                                        bem=fname_bem,
                                        volume_label=labels_vol,
                                        subjects_dir=subjects_dir)
    # Generate the mixed source space
    src += vol_src

    volume_src = get_volume_labels_from_src(src, 'sample', subjects_dir)
    volume_label = volume_src[0].name
    volume_label = 'Left-' + volume_label.replace('-lh', '')

    # Test
    assert_equal(volume_label, src[2]['seg_name'])

    assert_equal(src[2]['type'], 'vol')
def test_source_space_from_label():
    """Test generating a source space from volume label."""
    tempdir = _TempDir()
    aseg_fname = op.join(subjects_dir, 'sample', 'mri', 'aseg.mgz')
    label_names = get_volume_labels_from_aseg(aseg_fname)
    volume_label = label_names[int(np.random.rand() * len(label_names))]

    # Test pos as dict
    pos = dict()
    pytest.raises(ValueError, setup_volume_source_space, 'sample', pos=pos,
                  volume_label=volume_label, mri=aseg_fname)

    # Test no mri provided
    pytest.raises(RuntimeError, setup_volume_source_space, 'sample', mri=None,
                  volume_label=volume_label)

    # Test invalid volume label
    pytest.raises(ValueError, setup_volume_source_space, 'sample',
                  volume_label='Hello World!', mri=aseg_fname)

    src = setup_volume_source_space('sample', subjects_dir=subjects_dir,
                                    volume_label=volume_label, mri=aseg_fname,
                                    add_interpolator=False)
    assert_equal(volume_label, src[0]['seg_name'])

    # test reading and writing
    out_name = op.join(tempdir, 'temp-src.fif')
    write_source_spaces(out_name, src)
    src_from_file = read_source_spaces(out_name)
    _compare_source_spaces(src, src_from_file, mode='approx')
def test_other_volume_source_spaces():
    """Test setting up other volume source spaces"""
    # these are split off because they require the MNE tools, and
    # Travis doesn't seem to like them

    # let's try the spherical one (no bem or surf supplied)
    tempdir = _TempDir()
    temp_name = op.join(tempdir, 'temp-src.fif')
    run_subprocess(['mne_volume_source_space',
                    '--grid', '7.0',
                    '--src', temp_name,
                    '--mri', fname_mri])
    src = read_source_spaces(temp_name)
    src_new = setup_volume_source_space('sample', temp_name, pos=7.0,
                                        mri=fname_mri,
                                        subjects_dir=subjects_dir)
    _compare_source_spaces(src, src_new, mode='approx')
    del src
    del src_new
    assert_raises(ValueError, setup_volume_source_space, 'sample', temp_name,
                  pos=7.0, sphere=[1., 1.], mri=fname_mri,  # bad sphere
                  subjects_dir=subjects_dir)

    # now without MRI argument, it should give an error when we try
    # to read it
    run_subprocess(['mne_volume_source_space',
                    '--grid', '7.0',
                    '--src', temp_name])
    assert_raises(ValueError, read_source_spaces, temp_name)
Exemple #11
0
def test_gamma_map_vol_sphere():
    """Gamma MAP with a sphere forward and volumic source space"""
    evoked = read_evokeds(fname_evoked, condition=0, baseline=(None, 0),
                          proj=False)
    evoked.resample(50, npad=100)
    evoked.crop(tmin=0.1, tmax=0.16)  # crop to window around peak

    cov = read_cov(fname_cov)
    cov = regularize(cov, evoked.info)

    info = evoked.info
    sphere = mne.make_sphere_model(r0=(0., 0., 0.), head_radius=0.080)
    src = mne.setup_volume_source_space(subject=None, pos=15., mri=None,
                                        sphere=(0.0, 0.0, 0.0, 80.0),
                                        bem=None, mindist=5.0,
                                        exclude=2.0)
    fwd = mne.make_forward_solution(info, trans=None, src=src, bem=sphere,
                                    eeg=False, meg=True)

    alpha = 0.5
    assert_raises(ValueError, gamma_map, evoked, fwd, cov, alpha,
                  loose=0, return_residual=False)

    assert_raises(ValueError, gamma_map, evoked, fwd, cov, alpha,
                  loose=0.2, return_residual=False)

    stc = gamma_map(evoked, fwd, cov, alpha, tol=1e-4,
                    xyz_same_gamma=False, update_mode=2,
                    return_residual=False)

    assert_array_almost_equal(stc.times, evoked.times, 5)
Exemple #12
0
def test_volume_labels_morph(tmpdir):
    """Test generating a source space from volume label."""
    # see gh-5224
    evoked = mne.read_evokeds(fname_evoked)[0].crop(0, 0)
    evoked.pick_channels(evoked.ch_names[:306:8])
    evoked.info.normalize_proj()
    n_ch = len(evoked.ch_names)
    aseg_fname = op.join(subjects_dir, 'sample', 'mri', 'aseg.mgz')
    label_names = get_volume_labels_from_aseg(aseg_fname)
    src = setup_volume_source_space(
        'sample', subjects_dir=subjects_dir, volume_label=label_names[:2],
        mri=aseg_fname)
    assert len(src) == 2
    assert src.kind == 'volume'
    n_src = sum(s['nuse'] for s in src)
    sphere = make_sphere_model('auto', 'auto', evoked.info)
    fwd = make_forward_solution(evoked.info, fname_trans, src, sphere)
    assert fwd['sol']['data'].shape == (n_ch, n_src * 3)
    inv = make_inverse_operator(evoked.info, fwd, make_ad_hoc_cov(evoked.info),
                                loose=1.)
    stc = apply_inverse(evoked, inv)
    assert stc.data.shape == (n_src, 1)
    img = stc.as_volume(src, mri_resolution=True)
    n_on = np.array(img.dataobj).astype(bool).sum()
    assert n_on == 291  # was 291 on `master` before gh-5590
    img = stc.as_volume(src, mri_resolution=False)
    n_on = np.array(img.dataobj).astype(bool).sum()
    assert n_on == 44  # was 20 on `master` before gh-5590
def _mne_source_space(subject, src_tag, subjects_dir):
    """Load mne source space

    Parameters
    ----------
    subject : str
        Subejct
    src_tag : str
        Spacing (e.g., 'ico-4').
    """
    src_file = os.path.join(subjects_dir, subject, 'bem',
                            '%s-%s-src.fif' % (subject, src_tag))
    src, spacing = src_tag.split('-')
    if os.path.exists(src_file):
        return mne.read_source_spaces(src_file, False)
    elif src == 'ico':
        ss = mne.setup_source_space(subject, spacing=src + spacing,
                                    subjects_dir=subjects_dir, add_dist=True)
    elif src == 'vol':
        mri_file = os.path.join(subjects_dir, subject, 'mri', 'orig.mgz')
        bem_file = os.path.join(subjects_dir, subject, 'bem',
                                'sample-5120-5120-5120-bem-sol.fif')
        ss = mne.setup_volume_source_space(subject, pos=float(spacing),
                                           mri=mri_file, bem=bem_file,
                                           mindist=0., exclude=0.,
                                           subjects_dir=subjects_dir)
    else:
        raise ValueError("src_tag=%s" % repr(src_tag))
    mne.write_source_spaces(src_file, ss)
    return ss
def create_mixed_source_space(sbj_dir, sbj_id, spacing, labels, src):
    import os.path as op
    from mne import setup_volume_source_space

    bem_dir = op.join(sbj_dir, sbj_id, 'bem')

#    src_aseg_fname = op.join(bem_dir, '%s-%s-aseg-src.fif' %(sbj_id, spacing))
    aseg_fname = op.join(sbj_dir, sbj_id, 'mri/aseg.mgz')

    if spacing == 'oct-6':
        pos = 5.0
    elif spacing == 'ico-5':
        pos = 3.0

    model_fname = op.join(bem_dir, '%s-5120-bem.fif' % sbj_id)
    for l in labels:
        print l
        vol_label = setup_volume_source_space(sbj_id, mri=aseg_fname,
                                              pos=pos,
                                              bem=model_fname,
                                              volume_label=l,
                                              subjects_dir=sbj_dir)
        src += vol_label

#    write_source_spaces(src_aseg_fname, src)

    # Export source positions to nift file
    nii_fname = op.join(bem_dir, '%s-%s-aseg-src.nii' % (sbj_id, spacing))

    # Combine the source spaces
    src.export_volume(nii_fname, mri_resolution=True)

    return src
Exemple #15
0
def test_simulate_raw_chpi():
    """Test simulation of raw data with cHPI"""
    with warnings.catch_warnings(record=True):  # MaxShield
        raw = Raw(raw_chpi_fname, allow_maxshield=True)
    sphere = make_sphere_model('auto', 'auto', raw.info)
    # make sparse spherical source space
    sphere_vol = tuple(sphere['r0'] * 1000.) + (sphere.radius * 1000.,)
    src = setup_volume_source_space('sample', sphere=sphere_vol, pos=70.)
    stc = _make_stc(raw, src)
    # simulate data with cHPI on
    raw_sim = simulate_raw(raw, stc, None, src, sphere, cov=None, chpi=False)
    # need to trim extra samples off this one
    raw_chpi = simulate_raw(raw, stc, None, src, sphere, cov=None, chpi=True,
                            head_pos=pos_fname)
    # test that the cHPI signals make some reasonable values
    psd_sim, freqs_sim = compute_raw_psd(raw_sim)
    psd_chpi, freqs_chpi = compute_raw_psd(raw_chpi)
    assert_array_equal(freqs_sim, freqs_chpi)
    hpi_freqs = _get_hpi_info(raw.info)[0]
    freq_idx = np.sort([np.argmin(np.abs(freqs_sim - f)) for f in hpi_freqs])
    picks_meg = pick_types(raw.info, meg=True, eeg=False)
    picks_eeg = pick_types(raw.info, meg=False, eeg=True)
    assert_allclose(psd_sim[picks_eeg], psd_chpi[picks_eeg], atol=1e-20)
    assert_true((psd_chpi[picks_meg][:, freq_idx] >
                 100 * psd_sim[picks_meg][:, freq_idx]).all())
    # test localization based on cHPI information
    trans_sim, rot_sim, t_sim = _calculate_chpi_positions(raw_chpi)
    trans, rot, t = get_chpi_positions(pos_fname)
    t -= raw.first_samp / raw.info['sfreq']
    _compare_positions((trans, rot, t), (trans_sim, rot_sim, t_sim),
                       max_dist=0.005)
def test_discrete_source_space():
    """Test setting up (and reading/writing) discrete source spaces
    """
    src = read_source_spaces(fname)
    v = src[0]['vertno']

    # let's make a discrete version with the C code, and with ours
    temp_name = op.join(tempdir, 'temp-src.fif')
    try:
        # save
        temp_pos = op.join(tempdir, 'temp-pos.txt')
        np.savetxt(temp_pos, np.c_[src[0]['rr'][v], src[0]['nn'][v]])
        # let's try the spherical one (no bem or surf supplied)
        run_subprocess(['mne_volume_source_space', '--meters',
                        '--pos',  temp_pos, '--src', temp_name])
        src_c = read_source_spaces(temp_name)
        src_new = setup_volume_source_space('sample', None,
                                            pos=dict(rr=src[0]['rr'][v],
                                                     nn=src[0]['nn'][v]),
                                            subjects_dir=subjects_dir)
        _compare_source_spaces(src_c, src_new, mode='approx')
        assert_allclose(src[0]['rr'][v], src_new[0]['rr'],
                        rtol=1e-3, atol=1e-6)
        assert_allclose(src[0]['nn'][v], src_new[0]['nn'],
                        rtol=1e-3, atol=1e-6)

        # now do writing
        write_source_spaces(temp_name, src_c)
        src_c2 = read_source_spaces(temp_name)
        _compare_source_spaces(src_c, src_c2)
    finally:
        if op.isfile(temp_name):
            os.remove(temp_name)
def test_forward_mixed_source_space():
    """Test making the forward solution for a mixed source space
    """
    temp_dir = _TempDir()
    # get the surface source space
    surf = read_source_spaces(fname_src)

    # setup two volume source spaces
    label_names = get_volume_labels_from_aseg(fname_aseg)
    vol_labels = [label_names[int(np.random.rand() * len(label_names))]
                  for _ in range(2)]
    vol1 = setup_volume_source_space('sample', fname=None, pos=20.,
                                     mri=fname_aseg,
                                     volume_label=vol_labels[0],
                                     add_interpolator=False)
    vol2 = setup_volume_source_space('sample', fname=None, pos=20.,
                                     mri=fname_aseg,
                                     volume_label=vol_labels[1],
                                     add_interpolator=False)

    # merge surfaces and volume
    src = surf + vol1 + vol2

    # calculate forward solution
    fwd = make_forward_solution(fname_raw, fname_trans, src, fname_bem, None)
    assert_true(repr(fwd))

    # extract source spaces
    src_from_fwd = fwd['src']

    # get the coordinate frame of each source space
    coord_frames = np.array([s['coord_frame'] for s in src_from_fwd])

    # assert that all source spaces are in head coordinates
    assert_true((coord_frames == FIFF.FIFFV_COORD_HEAD).all())

    # run tests for SourceSpaces.export_volume
    fname_img = op.join(temp_dir, 'temp-image.mgz')

    # head coordinates and mri_resolution, but trans file
    assert_raises(ValueError, src_from_fwd.export_volume, fname_img,
                  mri_resolution=True, trans=None)

    # head coordinates and mri_resolution, but wrong trans file
    vox_mri_t = vol1[0]['vox_mri_t']
    assert_raises(ValueError, src_from_fwd.export_volume, fname_img,
                  mri_resolution=True, trans=vox_mri_t)
def test_make_forward_solution_discrete():
    """Test making and converting a forward solution with discrete src."""
    # smoke test for depth weighting and discrete source spaces
    src = read_source_spaces(fname_src)[0]
    src = SourceSpaces([src] + setup_volume_source_space(
        pos=dict(rr=src['rr'][src['vertno'][:3]].copy(),
                 nn=src['nn'][src['vertno'][:3]].copy())))
    sphere = make_sphere_model()
    fwd = make_forward_solution(fname_raw, fname_trans, src, sphere,
                                meg=True, eeg=False)
    convert_forward_solution(fwd, surf_ori=True)
Exemple #19
0
def test_inverse_ctf_comp():
    """Test interpolation with compensated CTF data."""
    ctf_dir = op.join(testing.data_path(download=False), 'CTF')
    raw_fname = op.join(ctf_dir, 'somMDYO-18av.ds')
    raw = mne.io.read_raw_ctf(raw_fname)
    raw.apply_gradient_compensation(1)
    sphere = make_sphere_model()
    cov = make_ad_hoc_cov(raw.info)
    src = mne.setup_volume_source_space(
        pos=dict(rr=[[0., 0., 0.01]], nn=[[0., 1., 0.]]))
    fwd = make_forward_solution(raw.info, None, src, sphere, eeg=False)
    inv = make_inverse_operator(raw.info, fwd, cov, loose=1.)
    apply_inverse_raw(raw, inv, 1. / 9.)
def test_volume_source_space():
    """Test setting up volume source spaces
    """
    tempdir = _TempDir()
    src = read_source_spaces(fname_vol)
    temp_name = op.join(tempdir, 'temp-src.fif')
    # The one in the sample dataset (uses bem as bounds)
    src_new = setup_volume_source_space('sample', temp_name, pos=7.0,
                                        bem=fname_bem, mri=fname_mri,
                                        subjects_dir=subjects_dir)
    _compare_source_spaces(src, src_new, mode='approx')
    del src_new
    src_new = read_source_spaces(temp_name)
    _compare_source_spaces(src, src_new, mode='approx')
Exemple #21
0
def test_gamma_map_vol_sphere():
    """Gamma MAP with a sphere forward and volumic source space."""
    evoked = read_evokeds(fname_evoked, condition=0, baseline=(None, 0),
                          proj=False)
    evoked.resample(50, npad=100)
    evoked.crop(tmin=0.1, tmax=0.16)  # crop to window around peak

    cov = read_cov(fname_cov)
    cov = regularize(cov, evoked.info, rank=None)

    info = evoked.info
    sphere = mne.make_sphere_model(r0=(0., 0., 0.), head_radius=0.080)
    src = mne.setup_volume_source_space(subject=None, pos=30., mri=None,
                                        sphere=(0.0, 0.0, 0.0, 80.0),
                                        bem=None, mindist=5.0,
                                        exclude=2.0)
    fwd = mne.make_forward_solution(info, trans=None, src=src, bem=sphere,
                                    eeg=False, meg=True)

    alpha = 0.5
    pytest.raises(ValueError, gamma_map, evoked, fwd, cov, alpha,
                  loose=0, return_residual=False)

    pytest.raises(ValueError, gamma_map, evoked, fwd, cov, alpha,
                  loose=0.2, return_residual=False)

    stc = gamma_map(evoked, fwd, cov, alpha, tol=1e-4,
                    xyz_same_gamma=False, update_mode=2,
                    return_residual=False)

    assert_array_almost_equal(stc.times, evoked.times, 5)

    # Compare orientation obtained using fit_dipole and gamma_map
    # for a simulated evoked containing a single dipole
    stc = mne.VolSourceEstimate(50e-9 * np.random.RandomState(42).randn(1, 4),
                                vertices=stc.vertices[:1],
                                tmin=stc.tmin,
                                tstep=stc.tstep)
    evoked_dip = mne.simulation.simulate_evoked(fwd, stc, info, cov, nave=1e9,
                                                use_cps=True)

    dip_gmap = gamma_map(evoked_dip, fwd, cov, 0.1, return_as_dipoles=True)

    amp_max = [np.max(d.amplitude) for d in dip_gmap]
    dip_gmap = dip_gmap[np.argmax(amp_max)]
    assert (dip_gmap[0].pos[0] in src[0]['rr'][stc.vertices])

    dip_fit = mne.fit_dipole(evoked_dip, cov, sphere)[0]
    assert (np.abs(np.dot(dip_fit.ori[0], dip_gmap.ori[0])) > 0.99)
Exemple #22
0
def test_simulate_raw_chpi():
    """Test simulation of raw data with cHPI."""
    raw = read_raw_fif(raw_chpi_fname, allow_maxshield='yes')
    picks = np.arange(len(raw.ch_names))
    picks = np.setdiff1d(picks, pick_types(raw.info, meg=True, eeg=True)[::4])
    raw.load_data().pick_channels([raw.ch_names[pick] for pick in picks])
    raw.info.normalize_proj()
    sphere = make_sphere_model('auto', 'auto', raw.info)
    # make sparse spherical source space
    sphere_vol = tuple(sphere['r0'] * 1000.) + (sphere.radius * 1000.,)
    src = setup_volume_source_space(sphere=sphere_vol, pos=70.)
    stc = _make_stc(raw, src)
    # simulate data with cHPI on
    with pytest.deprecated_call():
        raw_sim = simulate_raw(raw, stc, None, src, sphere, cov=None,
                               head_pos=pos_fname, interp='zero')
    # need to trim extra samples off this one
    with pytest.deprecated_call():
        raw_chpi = simulate_raw(raw, stc, None, src, sphere, cov=None,
                                chpi=True, head_pos=pos_fname, interp='zero')
    # test cHPI indication
    hpi_freqs, hpi_pick, hpi_ons = _get_hpi_info(raw.info)
    assert_allclose(raw_sim[hpi_pick][0], 0.)
    assert_allclose(raw_chpi[hpi_pick][0], hpi_ons.sum())
    # test that the cHPI signals make some reasonable values
    picks_meg = pick_types(raw.info, meg=True, eeg=False)
    picks_eeg = pick_types(raw.info, meg=False, eeg=True)

    for picks in [picks_meg[:3], picks_eeg[:3]]:
        psd_sim, freqs_sim = psd_welch(raw_sim, picks=picks)
        psd_chpi, freqs_chpi = psd_welch(raw_chpi, picks=picks)

        assert_array_equal(freqs_sim, freqs_chpi)
        freq_idx = np.sort([np.argmin(np.abs(freqs_sim - f))
                            for f in hpi_freqs])
        if picks is picks_meg:
            assert (psd_chpi[:, freq_idx] >
                    100 * psd_sim[:, freq_idx]).all()
        else:
            assert_allclose(psd_sim, psd_chpi, atol=1e-20)

    # test localization based on cHPI information
    quats_sim = _calculate_chpi_positions(raw_chpi, t_step_min=10.)
    quats = read_head_pos(pos_fname)
    _assert_quats(quats, quats_sim, dist_tol=5e-3, angle_tol=3.5)
Exemple #23
0
def test_rap_music_sphere():
    """Test RAP-MUSIC with real data, sphere model, MEG only."""
    evoked, noise_cov = _get_data(ch_decim=8)
    sphere = mne.make_sphere_model(r0=(0., 0., 0.04))
    src = mne.setup_volume_source_space(subject=None, pos=10.,
                                        sphere=(0.0, 0.0, 40, 65.0),
                                        mindist=5.0, exclude=0.0)
    forward = mne.make_forward_solution(evoked.info, trans=None, src=src,
                                        bem=sphere)

    dipoles = rap_music(evoked, forward, noise_cov, n_dipoles=2)
    # Test that there is one dipole on each hemisphere
    pos = np.array([dip.pos[0] for dip in dipoles])
    assert_equal(pos.shape, (2, 3))
    assert_equal((pos[:, 0] < 0).sum(), 1)
    assert_equal((pos[:, 0] > 0).sum(), 1)
    # Check the amplitude scale
    assert_true(1e-10 < dipoles[0].amplitude[0] < 1e-7)
Exemple #24
0
def test_lcmv_ctf_comp():
    """Test interpolation with compensated CTF data."""
    ctf_dir = op.join(testing.data_path(download=False), 'CTF')
    raw_fname = op.join(ctf_dir, 'somMDYO-18av.ds')
    raw = mne.io.read_raw_ctf(raw_fname, preload=True)

    events = mne.make_fixed_length_events(raw, duration=0.2)[:2]
    epochs = mne.Epochs(raw, events, tmin=0., tmax=0.2)
    evoked = epochs.average()

    with pytest.warns(RuntimeWarning,
                      match='Too few samples .* estimate may be unreliable'):
        data_cov = mne.compute_covariance(epochs)
    fwd = mne.make_forward_solution(evoked.info, None,
                                    mne.setup_volume_source_space(pos=15.0),
                                    mne.make_sphere_model())
    filters = mne.beamformer.make_lcmv(evoked.info, fwd, data_cov)
    assert 'weights' in filters
def test_inverse_ctf_comp():
    """Test interpolation with compensated CTF data."""
    raw = mne.io.read_raw_ctf(fname_raw_ctf).crop(0, 0)
    raw.apply_gradient_compensation(1)
    sphere = make_sphere_model()
    cov = make_ad_hoc_cov(raw.info)
    src = mne.setup_volume_source_space(
        pos=dict(rr=[[0., 0., 0.01]], nn=[[0., 1., 0.]]))
    fwd = make_forward_solution(raw.info, None, src, sphere, eeg=False)
    raw.apply_gradient_compensation(0)
    with pytest.raises(RuntimeError, match='compensation grade mismatch'):
        make_inverse_operator(raw.info, fwd, cov, loose=1.)
    raw.apply_gradient_compensation(1)
    inv = make_inverse_operator(raw.info, fwd, cov, loose=1.)
    apply_inverse_raw(raw, inv, 1. / 9.)  # smoke test
    raw.apply_gradient_compensation(0)
    with pytest.raises(RuntimeError, match='compensation grade mismatch'):
        apply_inverse_raw(raw, inv, 1. / 9.)
Exemple #26
0
def test_mxne_vol_sphere():
    """(TF-)MxNE with a sphere forward and volumic source space"""
    evoked = read_evokeds(fname_data, condition=0, baseline=(None, 0))
    evoked.crop(tmin=-0.05, tmax=0.2)
    cov = read_cov(fname_cov)

    evoked_l21 = evoked.copy()
    evoked_l21.crop(tmin=0.081, tmax=0.1)

    info = evoked.info
    sphere = mne.make_sphere_model(r0=(0., 0., 0.), head_radius=0.080)
    src = mne.setup_volume_source_space(subject=None, pos=15., mri=None,
                                        sphere=(0.0, 0.0, 0.0, 80.0),
                                        bem=None, mindist=5.0,
                                        exclude=2.0)
    fwd = mne.make_forward_solution(info, trans=None, src=src,
                                    bem=sphere, eeg=False, meg=True)

    alpha = 80.
    assert_raises(ValueError, mixed_norm, evoked, fwd, cov, alpha,
                  loose=0.0, return_residual=False,
                  maxit=3, tol=1e-8, active_set_size=10)

    assert_raises(ValueError, mixed_norm, evoked, fwd, cov, alpha,
                  loose=0.2, return_residual=False,
                  maxit=3, tol=1e-8, active_set_size=10)

    # irMxNE tests
    stc = mixed_norm(evoked_l21, fwd, cov, alpha,
                     n_mxne_iter=1, maxit=30, tol=1e-8,
                     active_set_size=10)
    assert_true(isinstance(stc, VolSourceEstimate))
    assert_array_almost_equal(stc.times, evoked_l21.times, 5)

    # Do with TF-MxNE for test memory savings
    alpha_space = 60.  # spatial regularization parameter
    alpha_time = 1.  # temporal regularization parameter

    stc, _ = tf_mixed_norm(evoked, fwd, cov, alpha_space, alpha_time,
                           maxit=3, tol=1e-4,
                           tstep=16, wsize=32, window=0.1,
                           return_residual=True)
    assert_true(isinstance(stc, VolSourceEstimate))
    assert_array_almost_equal(stc.times, evoked.times, 5)
Exemple #27
0
def test_simulate_raw_chpi():
    """Test simulation of raw data with cHPI."""
    raw = read_raw_fif(raw_chpi_fname, allow_maxshield='yes',
                       add_eeg_ref=False)
    sphere = make_sphere_model('auto', 'auto', raw.info)
    # make sparse spherical source space
    sphere_vol = tuple(sphere['r0'] * 1000.) + (sphere.radius * 1000.,)
    src = setup_volume_source_space('sample', sphere=sphere_vol, pos=70.)
    stc = _make_stc(raw, src)
    # simulate data with cHPI on
    raw_sim = simulate_raw(raw, stc, None, src, sphere, cov=None, chpi=False)
    # need to trim extra samples off this one
    raw_chpi = simulate_raw(raw, stc, None, src, sphere, cov=None, chpi=True,
                            head_pos=pos_fname)
    # test cHPI indication
    hpi_freqs, _, hpi_pick, hpi_ons = _get_hpi_info(raw.info)[:4]
    assert_allclose(raw_sim[hpi_pick][0], 0.)
    assert_allclose(raw_chpi[hpi_pick][0], hpi_ons.sum())
    # test that the cHPI signals make some reasonable values
    picks_meg = pick_types(raw.info, meg=True, eeg=False)
    picks_eeg = pick_types(raw.info, meg=False, eeg=True)

    for picks in [picks_meg, picks_eeg]:
        psd_sim, freqs_sim = psd_welch(raw_sim, picks=picks)
        psd_chpi, freqs_chpi = psd_welch(raw_chpi, picks=picks)

        assert_array_equal(freqs_sim, freqs_chpi)
        freq_idx = np.sort([np.argmin(np.abs(freqs_sim - f))
                           for f in hpi_freqs])
        if picks is picks_meg:
            assert_true((psd_chpi[:, freq_idx] >
                         100 * psd_sim[:, freq_idx]).all())
        else:
            assert_allclose(psd_sim, psd_chpi, atol=1e-20)

    # test localization based on cHPI information
    quats_sim = _calculate_chpi_positions(raw_chpi)
    trans_sim, rot_sim, t_sim = head_pos_to_trans_rot_t(quats_sim)
    trans, rot, t = head_pos_to_trans_rot_t(read_head_pos(pos_fname))
    t -= raw.first_samp / raw.info['sfreq']
    _compare_positions((trans, rot, t), (trans_sim, rot_sim, t_sim),
                       max_dist=0.005)
Exemple #28
0
def _mne_source_space(subject, src_tag, subjects_dir):
    """Load mne source space"""
    src_file = os.path.join(subjects_dir, subject, 'bem',
                            '%s-%s-src.fif' % (subject, src_tag))
    src = src_tag[:3]
    if os.path.exists(src_file):
        return mne.read_source_spaces(src_file, False)
    elif src == 'ico':
        return mne.setup_source_space(subject, src_file, 'ico4',
                                      subjects_dir=subjects_dir, add_dist=True)
    elif src == 'vol':
        mri_file = os.path.join(subjects_dir, subject, 'mri', 'orig.mgz')
        bem_file = os.path.join(subjects_dir, subject, 'bem',
                                'sample-5120-5120-5120-bem-sol.fif')
        return mne.setup_volume_source_space(subject, src_file, pos=10.,
                                             mri=mri_file, bem=bem_file,
                                             mindist=0., exclude=0.,
                                             subjects_dir=subjects_dir)
    else:
        raise ValueError("src_tag=%s" % repr(src_tag))
def test_other_volume_source_spaces():
    """Test setting up other volume source spaces."""
    # these are split off because they require the MNE tools, and
    # Travis doesn't seem to like them

    # let's try the spherical one (no bem or surf supplied)
    tempdir = _TempDir()
    temp_name = op.join(tempdir, 'temp-src.fif')
    run_subprocess(['mne_volume_source_space',
                    '--grid', '7.0',
                    '--src', temp_name,
                    '--mri', fname_mri])
    src = read_source_spaces(temp_name)
    src_new = setup_volume_source_space(None, pos=7.0, mri=fname_mri,
                                        subjects_dir=subjects_dir)
    # we use a more accurate elimination criteria, so let's fix the MNE-C
    # source space
    assert len(src_new[0]['vertno']) == 7497
    assert len(src) == 1
    assert len(src_new) == 1
    good_mask = np.in1d(src[0]['vertno'], src_new[0]['vertno'])
    src[0]['inuse'][src[0]['vertno'][~good_mask]] = 0
    assert src[0]['inuse'].sum() == 7497
    src[0]['vertno'] = src[0]['vertno'][good_mask]
    assert len(src[0]['vertno']) == 7497
    src[0]['nuse'] = len(src[0]['vertno'])
    assert src[0]['nuse'] == 7497
    _compare_source_spaces(src_new, src, mode='approx')
    assert 'volume, shape' in repr(src)
    del src
    del src_new
    pytest.raises(ValueError, setup_volume_source_space, 'sample', pos=7.0,
                  sphere=[1., 1.], mri=fname_mri,  # bad sphere
                  subjects_dir=subjects_dir)

    # now without MRI argument, it should give an error when we try
    # to read it
    run_subprocess(['mne_volume_source_space',
                    '--grid', '7.0',
                    '--src', temp_name])
    pytest.raises(ValueError, read_source_spaces, temp_name)
def test_volume_source_space():
    """Test setting up volume source spaces
    """
    tempdir = _TempDir()
    src = read_source_spaces(fname_vol)
    temp_name = op.join(tempdir, 'temp-src.fif')
    surf = read_bem_surfaces(fname_bem, s_id=FIFF.FIFFV_BEM_SURF_ID_BRAIN)
    surf['rr'] *= 1e3  # convert to mm
    # The one in the testing dataset (uses bem as bounds)
    for bem, surf in zip((fname_bem, None), (None, surf)):
        src_new = setup_volume_source_space('sample', temp_name, pos=7.0,
                                            bem=bem, surface=surf,
                                            mri=fname_mri,
                                            subjects_dir=subjects_dir)
        _compare_source_spaces(src, src_new, mode='approx')
        del src_new
        src_new = read_source_spaces(temp_name)
        _compare_source_spaces(src, src_new, mode='approx')
    assert_raises(IOError, setup_volume_source_space, 'sample', temp_name,
                  pos=7.0, bem=None, surface='foo',  # bad surf
                  mri=fname_mri, subjects_dir=subjects_dir)
Exemple #31
0
def test_plot_alignment_basic(tmp_path, renderer, mixed_fwd_cov_evoked):
    """Test plotting of -trans.fif files and MEG sensor layouts."""
    # generate fiducials file for testing
    tempdir = str(tmp_path)
    fiducials_path = op.join(tempdir, 'fiducials.fif')
    fid = [{
        'coord_frame': 5,
        'ident': 1,
        'kind': 1,
        'r': [-0.08061612, -0.02908875, -0.04131077]
    }, {
        'coord_frame': 5,
        'ident': 2,
        'kind': 1,
        'r': [0.00146763, 0.08506715, -0.03483611]
    }, {
        'coord_frame': 5,
        'ident': 3,
        'kind': 1,
        'r': [0.08436285, -0.02850276, -0.04127743]
    }]
    write_dig(fiducials_path, fid, 5)
    evoked = read_evokeds(evoked_fname)[0]
    info = evoked.info

    sample_src = read_source_spaces(src_fname)
    pytest.raises(TypeError,
                  plot_alignment,
                  'foo',
                  trans_fname,
                  subject='sample',
                  subjects_dir=subjects_dir)
    pytest.raises(OSError,
                  plot_alignment,
                  info,
                  trans_fname,
                  subject='sample',
                  subjects_dir=subjects_dir,
                  src='foo')
    pytest.raises(ValueError,
                  plot_alignment,
                  info,
                  trans_fname,
                  subject='fsaverage',
                  subjects_dir=subjects_dir,
                  src=sample_src)
    sample_src.plot(subjects_dir=subjects_dir,
                    head=True,
                    skull=True,
                    brain='white')
    # mixed source space
    mixed_src = mixed_fwd_cov_evoked[0]['src']
    assert mixed_src.kind == 'mixed'
    fig = plot_alignment(info,
                         meg=['helmet', 'sensors'],
                         dig=True,
                         coord_frame='head',
                         trans=Path(trans_fname),
                         subject='sample',
                         mri_fiducials=fiducials_path,
                         subjects_dir=subjects_dir,
                         src=mixed_src)
    assert isinstance(fig, Figure3D)
    renderer.backend._close_all()
    # no-head version
    renderer.backend._close_all()
    # trans required
    with pytest.raises(ValueError, match='transformation matrix.*in head'):
        plot_alignment(info, trans=None, src=src_fname)
    with pytest.raises(ValueError, match='transformation matrix.*in head'):
        plot_alignment(info, trans=None, mri_fiducials=True)
    with pytest.raises(ValueError, match='transformation matrix.*in head'):
        plot_alignment(info, trans=None, surfaces=['brain'])
    assert mixed_src[0]['coord_frame'] == FIFF.FIFFV_COORD_HEAD
    with pytest.raises(ValueError, match='head-coordinate source space in mr'):
        plot_alignment(trans=None, src=mixed_src, coord_frame='mri')
    # all coord frames
    plot_alignment(info)  # works: surfaces='auto' default
    for coord_frame in ('meg', 'head', 'mri'):
        plot_alignment(info,
                       meg=['helmet', 'sensors'],
                       dig=True,
                       coord_frame=coord_frame,
                       trans=Path(trans_fname),
                       subject='sample',
                       src=src_fname,
                       mri_fiducials=fiducials_path,
                       subjects_dir=subjects_dir)
    renderer.backend._close_all()
    # EEG only with strange options
    evoked_eeg_ecog_seeg = evoked.copy().pick_types(meg=False, eeg=True)
    with evoked_eeg_ecog_seeg.info._unlock():
        evoked_eeg_ecog_seeg.info['projs'] = []  # "remove" avg proj
    evoked_eeg_ecog_seeg.set_channel_types({
        'EEG 001': 'ecog',
        'EEG 002': 'seeg'
    })
    with catch_logging() as log:
        plot_alignment(evoked_eeg_ecog_seeg.info,
                       subject='sample',
                       trans=trans_fname,
                       subjects_dir=subjects_dir,
                       surfaces=['white', 'outer_skin', 'outer_skull'],
                       meg=['helmet', 'sensors'],
                       eeg=['original', 'projected'],
                       ecog=True,
                       seeg=True,
                       verbose=True)
    log = log.getvalue()
    assert 'ecog: 1' in log
    assert 'seeg: 1' in log
    renderer.backend._close_all()

    sphere = make_sphere_model(info=info, r0='auto', head_radius='auto')
    bem_sol = read_bem_solution(
        op.join(subjects_dir, 'sample', 'bem',
                'sample-1280-1280-1280-bem-sol.fif'))
    bem_surfs = read_bem_surfaces(
        op.join(subjects_dir, 'sample', 'bem',
                'sample-1280-1280-1280-bem.fif'))
    sample_src[0]['coord_frame'] = 4  # hack for coverage
    plot_alignment(
        info,
        trans_fname,
        subject='sample',
        eeg='projected',
        meg='helmet',
        bem=sphere,
        dig=True,
        surfaces=['brain', 'inner_skull', 'outer_skull', 'outer_skin'])
    plot_alignment(info,
                   trans_fname,
                   subject='sample',
                   meg='helmet',
                   subjects_dir=subjects_dir,
                   eeg='projected',
                   bem=sphere,
                   surfaces=['head', 'brain'],
                   src=sample_src)
    # no trans okay, no mri surfaces
    plot_alignment(info, bem=sphere, surfaces=['brain'])
    with pytest.raises(ValueError, match='A head surface is required'):
        plot_alignment(info,
                       trans=trans_fname,
                       subject='sample',
                       subjects_dir=subjects_dir,
                       eeg='projected',
                       surfaces=[])
    with pytest.raises(RuntimeError, match='No brain surface found'):
        plot_alignment(info,
                       trans=trans_fname,
                       subject='foo',
                       subjects_dir=subjects_dir,
                       surfaces=['brain'])
    assert all(surf['coord_frame'] == FIFF.FIFFV_COORD_MRI
               for surf in bem_sol['surfs'])
    plot_alignment(info,
                   trans_fname,
                   subject='sample',
                   meg=[],
                   subjects_dir=subjects_dir,
                   bem=bem_sol,
                   eeg=True,
                   surfaces=['head', 'inflated', 'outer_skull', 'inner_skull'])
    assert all(surf['coord_frame'] == FIFF.FIFFV_COORD_MRI
               for surf in bem_sol['surfs'])
    plot_alignment(info,
                   trans_fname,
                   subject='sample',
                   meg=True,
                   subjects_dir=subjects_dir,
                   surfaces=['head', 'inner_skull'],
                   bem=bem_surfs)
    # single-layer BEM can still plot head surface
    assert bem_surfs[-1]['id'] == FIFF.FIFFV_BEM_SURF_ID_BRAIN
    bem_sol_homog = read_bem_solution(
        op.join(subjects_dir, 'sample', 'bem', 'sample-1280-bem-sol.fif'))
    for use_bem in (bem_surfs[-1:], bem_sol_homog):
        with catch_logging() as log:
            plot_alignment(info,
                           trans_fname,
                           subject='sample',
                           meg=True,
                           subjects_dir=subjects_dir,
                           surfaces=['head', 'inner_skull'],
                           bem=use_bem,
                           verbose=True)
        log = log.getvalue()
        assert 'not find the surface for head in the provided BEM model' in log
    # sphere model
    sphere = make_sphere_model('auto', 'auto', info)
    src = setup_volume_source_space(sphere=sphere)
    plot_alignment(
        info,
        trans=Transform('head', 'mri'),
        eeg='projected',
        meg='helmet',
        bem=sphere,
        src=src,
        dig=True,
        surfaces=['brain', 'inner_skull', 'outer_skull', 'outer_skin'])
    sphere = make_sphere_model('auto', None, info)  # one layer
    # if you ask for a brain surface with a 1-layer sphere model it's an error
    with pytest.raises(RuntimeError, match='Sphere model does not have'):
        plot_alignment(trans=trans_fname,
                       subject='sample',
                       subjects_dir=subjects_dir,
                       surfaces=['brain'],
                       bem=sphere)
    # but you can ask for a specific brain surface, and
    # no info is permitted
    plot_alignment(trans=trans_fname,
                   subject='sample',
                   meg=False,
                   coord_frame='mri',
                   subjects_dir=subjects_dir,
                   surfaces=['white'],
                   bem=sphere,
                   show_axes=True)
    renderer.backend._close_all()
    # TODO: We need to make this class public and document it properly
    # assert isinstance(fig, some_public_class)
    # 3D coil with no defined draw (ConvexHull)
    info_cube = pick_info(info, np.arange(6))
    with info._unlock():
        info['dig'] = None
    info_cube['chs'][0]['coil_type'] = 9999
    info_cube['chs'][1]['coil_type'] = 9998
    with pytest.raises(RuntimeError, match='coil definition not found'):
        plot_alignment(info_cube, meg='sensors', surfaces=())
    coil_def_fname = op.join(tempdir, 'temp')
    with open(coil_def_fname, 'w') as fid:
        fid.write(coil_3d)
    # make sure our other OPMs can be plotted, too
    for ii, kind in enumerate(
        ('QUSPIN_ZFOPM_MAG', 'QUSPIN_ZFOPM_MAG2', 'FIELDLINE_OPM_MAG_GEN1',
         'KERNEL_OPM_MAG_GEN1'), 2):
        info_cube['chs'][ii]['coil_type'] = getattr(FIFF, f'FIFFV_COIL_{kind}')
    with use_coil_def(coil_def_fname):
        with catch_logging() as log:
            plot_alignment(info_cube,
                           meg='sensors',
                           surfaces=(),
                           dig=True,
                           verbose='debug')
    log = log.getvalue()
    assert 'planar geometry' in log

    # one layer bem with skull surfaces:
    with pytest.raises(RuntimeError, match='Sphere model does not.*boundary'):
        plot_alignment(info=info,
                       trans=trans_fname,
                       subject='sample',
                       subjects_dir=subjects_dir,
                       surfaces=['brain', 'head', 'inner_skull'],
                       bem=sphere)
    # wrong eeg value:
    with pytest.raises(ValueError, match='Invalid value for the .eeg'):
        plot_alignment(info=info,
                       trans=trans_fname,
                       subject='sample',
                       subjects_dir=subjects_dir,
                       eeg='foo')
    # wrong meg value:
    with pytest.raises(ValueError, match='Invalid value for the .meg'):
        plot_alignment(info=info,
                       trans=trans_fname,
                       subject='sample',
                       subjects_dir=subjects_dir,
                       meg='bar')
    # multiple brain surfaces:
    with pytest.raises(ValueError, match='Only one brain surface can be plot'):
        plot_alignment(info=info,
                       trans=trans_fname,
                       subject='sample',
                       subjects_dir=subjects_dir,
                       surfaces=['white', 'pial'])
    with pytest.raises(TypeError, match='surfaces.*must be'):
        plot_alignment(info=info,
                       trans=trans_fname,
                       subject='sample',
                       subjects_dir=subjects_dir,
                       surfaces=[1])
    with pytest.raises(ValueError, match='Unknown surface type'):
        plot_alignment(info=info,
                       trans=trans_fname,
                       subject='sample',
                       subjects_dir=subjects_dir,
                       surfaces=['foo'])
    with pytest.raises(TypeError, match="must be an instance of "):
        plot_alignment(info=info,
                       trans=trans_fname,
                       subject='sample',
                       subjects_dir=subjects_dir,
                       surfaces=dict(brain='super clear'))
    with pytest.raises(ValueError, match="must be between 0 and 1"):
        plot_alignment(info=info,
                       trans=trans_fname,
                       subject='sample',
                       subjects_dir=subjects_dir,
                       surfaces=dict(brain=42))
    fwd_fname = op.join(data_dir, 'MEG', 'sample',
                        'sample_audvis_trunc-meg-eeg-oct-4-fwd.fif')
    fwd = read_forward_solution(fwd_fname)
    plot_alignment(subject='sample',
                   subjects_dir=subjects_dir,
                   trans=trans_fname,
                   fwd=fwd,
                   surfaces='white',
                   coord_frame='head')
    fwd = convert_forward_solution(fwd, force_fixed=True)
    plot_alignment(subject='sample',
                   subjects_dir=subjects_dir,
                   trans=trans_fname,
                   fwd=fwd,
                   surfaces='white',
                   coord_frame='head')
    fwd['coord_frame'] = FIFF.FIFFV_COORD_MRI  # check required to get to MRI
    with pytest.raises(ValueError, match='transformation matrix.*in head coo'):
        plot_alignment(info, trans=None, fwd=fwd)
    # surfaces as dict
    plot_alignment(subject='sample',
                   coord_frame='head',
                   trans=trans_fname,
                   subjects_dir=subjects_dir,
                   surfaces={
                       'white': 0.4,
                       'outer_skull': 0.6,
                       'head': None
                   })
def make_discrete_forward_solutions(info,
                                    rr,
                                    vbem,
                                    trans_true,
                                    trans_man,
                                    subjects_dir,
                                    source_ori='random',
                                    fn_fwd_disc_true=None,
                                    fn_fwd_disc_man=None):
    """
    Create a discrete source space based on the rr coordinates and
    make one forward solution for the true trans file and one for
    the manually created trans file.

    Parameters:
    -----------
    info : instance of mne.Info | str
        If str, then it should be a filename to a Raw, Epochs, or Evoked
        file with measurement information. If dict, should be an info
        dict (such as one from Raw, Epochs, or Evoked).
    rr : np.array of shape (n_vertices, 3)
        The coordinates of the volume source space.
    vbem : dict | str
        Filename of the volume BEM (e.g., "sample-5120-bem-sol.fif") to
        use, or a loaded sphere model (dict).
    trans_true : str
        The true head<->MRI transform.
    trans_man : str
        The manually created head<->MRI transform.
    subjects_dir : str
        Path to the subject directory.
    source_ori : 'random' or 'orthogonal'
        The normal vectors ('nn' in the pos dict) have random orientations
        or they are orthogonal to the surface normal vector.
    fn_fwd_disc_true : None | str
        Path where the forward solution corresponding to the true
        transformation is to be saved. It should end with -fwd.fif
        or -fwd.fif.gz. If None the fwd solution will not be written
        to disk.
    fn_fwd_disc_man : None | str
        Path where the forward solution corresponding to the manually
        created transformation is to be saved. It should end with
        -fwd.fif or -fwd.fif.gz.If None the fwd solution will not be
        written to disk.

    Returns:
    --------
    fwd_disc_true : instance of mne.Forward
        The discrete forward solution created with the true trans file.
    fwd_disc_man : instance of mne.Forward
        The discrete forward solution created with the manual trans file.
    """

    ###########################################################################
    # Construct source space normals as random vectors
    ###########################################################################
    rnd_vectors = np.array([random_three_vector() for i in range(rr.shape[0])])

    if source_ori == 'random':
        pos = {'rr': rr, 'nn': rnd_vectors}

    elif source_ori == 'orthogonal':
        com = rr.mean(axis=0)  # center of mass
        # get vectors pointing from center of mass to voxels
        radial = rr - com
        tangential = np.cross(radial, rnd_vectors)
        # normalize to unit length
        nn = (tangential.T * (1. / np.linalg.norm(tangential, axis=1))).T
        pos = {'rr': rr, 'nn': nn}

    else:
        raise ValueError("nn_ori must either be 'random' or 'orthogonal'.")

    ###########################################################################
    # make discrete source space
    ###########################################################################

    # setup_volume_source_space sets coordinate frame to MRI
    vsrc_disc_mri = mne.setup_volume_source_space(subject='sample',
                                                  pos=pos,
                                                  mri=None,
                                                  bem=vbem,
                                                  subjects_dir=subjects_dir)

    # create forward solution for true trans file
    fwd_disc_true = mne.make_forward_solution(info,
                                              trans=trans_true,
                                              src=vsrc_disc_mri,
                                              bem=vbem,
                                              meg=True,
                                              eeg=False)
    if fn_fwd_disc_true is not None:
        mne.write_forward_solution(fn_fwd_disc_true,
                                   fwd_disc_true,
                                   overwrite=True)

    # create forward solution for manually created trans file
    fwd_disc_man = mne.make_forward_solution(info,
                                             trans=trans_man,
                                             src=vsrc_disc_mri,
                                             bem=vbem,
                                             meg=True,
                                             eeg=False)
    if fn_fwd_disc_man is not None:
        mne.write_forward_solution(fn_fwd_disc_man,
                                   fwd_disc_man,
                                   overwrite=True)

    return fwd_disc_true, fwd_disc_man
def test_combine_source_spaces(tmpdir):
    """Test combining source spaces."""
    rng = np.random.RandomState(0)
    aseg_fname = op.join(subjects_dir, 'sample', 'mri', 'aseg.mgz')
    label_names = get_volume_labels_from_aseg(aseg_fname)
    volume_labels = rng.choice(label_names, 2)

    # get a surface source space (no need to test creation here)
    srf = read_source_spaces(fname, patch_stats=False)

    # setup 2 volume source spaces
    vol = setup_volume_source_space('sample',
                                    subjects_dir=subjects_dir,
                                    volume_label=volume_labels[0],
                                    mri=aseg_fname,
                                    add_interpolator=False)

    # setup a discrete source space
    rr = rng.randint(0, 20, (100, 3)) * 1e-3
    nn = np.zeros(rr.shape)
    nn[:, -1] = 1
    pos = {'rr': rr, 'nn': nn}
    disc = setup_volume_source_space('sample',
                                     subjects_dir=subjects_dir,
                                     pos=pos,
                                     verbose='error')

    # combine source spaces
    assert srf.kind == 'surface'
    assert vol.kind == 'volume'
    assert disc.kind == 'discrete'
    src = srf + vol + disc
    assert src.kind == 'mixed'
    assert srf.kind == 'surface'
    assert vol.kind == 'volume'
    assert disc.kind == 'discrete'

    # test addition of source spaces
    assert len(src) == 4

    # test reading and writing
    src_out_name = tmpdir.join('temp-src.fif')
    src.save(src_out_name)
    src_from_file = read_source_spaces(src_out_name)
    _compare_source_spaces(src, src_from_file, mode='approx')
    assert_equal(repr(src), repr(src_from_file))
    assert_equal(src.kind, 'mixed')

    # test that all source spaces are in MRI coordinates
    coord_frames = np.array([s['coord_frame'] for s in src])
    assert (coord_frames == FIFF.FIFFV_COORD_MRI).all()

    # test errors for export_volume
    image_fname = tmpdir.join('temp-image.mgz')

    # source spaces with no volume
    pytest.raises(ValueError, srf.export_volume, image_fname, verbose='error')

    # unrecognized source type
    disc2 = disc.copy()
    disc2[0]['type'] = 'kitty'
    with pytest.raises(ValueError, match='Invalid value'):
        src + disc2
    del disc2

    # unrecognized file type
    bad_image_fname = tmpdir.join('temp-image.png')
    # vertices outside vol space warning
    pytest.raises(ValueError,
                  src.export_volume,
                  bad_image_fname,
                  verbose='error')

    # mixed coordinate frames
    disc3 = disc.copy()
    disc3[0]['coord_frame'] = 10
    src_mixed_coord = src + disc3
    pytest.raises(ValueError,
                  src_mixed_coord.export_volume,
                  image_fname,
                  verbose='error')
Exemple #34
0
# build fs_average mixed 'oct6' with limbic source space & save (to use as morph target later)
labels_limb = [
    'Left-Thalamus-Proper', 'Left-Caudate', 'Left-Putamen', 'Left-Pallidum',
    'Left-Hippocampus', 'Left-Amygdala', 'Right-Thalamus-Proper',
    'Right-Caudate', 'Right-Putamen', 'Right-Pallidum', 'Right-Hippocampus',
    'Right-Amygdala'
]
fs_src = mne.setup_source_space("fsaverage",
                                spacing='oct6',
                                surface="white",
                                subjects_dir=mri_dir,
                                n_jobs=6)
fs_limb_src = mne.setup_volume_source_space("fsaverage",
                                            mri="aseg.mgz",
                                            pos=5.0,
                                            bem=bem,
                                            volume_label=labels_limb,
                                            subjects_dir=mri_dir,
                                            add_interpolator=True,
                                            verbose=True)
fs_src += fs_limb_src
# print out the number of spaces and points
n = sum(fs_src[i]['nuse'] for i in range(len(fs_src)))
print('the fs_src space contains %d spaces and %d points' % (len(fs_src), n))
fs_src.plot(subjects_dir=mri_dir)
# save the mixed source space
fs_src.save("{}fsaverage_oct6_mix-src.fif".format(meg_dir), overwrite=True)
del fs_src
# create another volume source space, with limbic structures as single volume (for later cluster stats)
fs_limb_vol = mne.setup_volume_source_space("fsaverage",
                                            mri="aseg.mgz",
                                            pos=5.0,
bem_sol = mne.make_bem_solution(bem)
mne.write_bem_solution(fname.bem, bem_sol)

# create surface source space & forward solution
src_surf = mne.setup_source_space(subject=subject_id,
                                  subjects_dir=fname.subjects_dir,
                                  n_jobs=n_jobs)
fwd_surf = mne.make_forward_solution(info=info,
                                     trans=trans,
                                     src=src_surf,
                                     bem=bem_sol)

# create volume source space & forward solution
src = mne.setup_volume_source_space(subject=subject_id,
                                    pos=7.,
                                    mri=fname.mri,
                                    bem=bem_sol,
                                    subjects_dir=fname.subjects_dir)
fwd = mne.make_forward_solution(info=info, trans=trans, src=src, bem=bem_sol)

# Save things
trans.save(fname.trans)
src_surf.save(fname.src_surf, overwrite=True)
mne.write_forward_solution(fname.fwd_surf, fwd_surf, overwrite=True)

src.save(fname.src, overwrite=True)
mne.write_forward_solution(fname.fwd, fwd, overwrite=True)

# Visualize surface source space and MEG sensors
fig = mne.viz.plot_alignment(info=info,
                             trans=trans,
Exemple #36
0
def test_scale_mri(tmpdir, few_surfaces, scale):
    """Test creating fsaverage and scaling it."""
    # create fsaverage using the testing "fsaverage" instead of the FreeSurfer
    # one
    tempdir = str(tmpdir)
    fake_home = testing.data_path()
    create_default_subject(subjects_dir=tempdir,
                           fs_home=fake_home,
                           verbose=True)
    assert _is_mri_subject('fsaverage', tempdir), "Creating fsaverage failed"

    fid_path = op.join(tempdir, 'fsaverage', 'bem', 'fsaverage-fiducials.fif')
    os.remove(fid_path)
    create_default_subject(update=True,
                           subjects_dir=tempdir,
                           fs_home=fake_home)
    assert op.exists(fid_path), "Updating fsaverage"

    # copy MRI file from sample data (shouldn't matter that it's incorrect,
    # so here choose a small one)
    path_from = op.join(testing.data_path(), 'subjects', 'sample', 'mri',
                        'T1.mgz')
    path_to = op.join(tempdir, 'fsaverage', 'mri', 'orig.mgz')
    copyfile(path_from, path_to)

    # remove redundant label files
    label_temp = op.join(tempdir, 'fsaverage', 'label', '*.label')
    label_paths = glob(label_temp)
    for label_path in label_paths[1:]:
        os.remove(label_path)

    # create source space
    print('Creating surface source space')
    path = op.join(tempdir, 'fsaverage', 'bem', 'fsaverage-%s-src.fif')
    src = mne.setup_source_space('fsaverage',
                                 'ico0',
                                 subjects_dir=tempdir,
                                 add_dist=False)
    mri = op.join(tempdir, 'fsaverage', 'mri', 'orig.mgz')
    print('Creating volume source space')
    vsrc = mne.setup_volume_source_space('fsaverage',
                                         pos=50,
                                         mri=mri,
                                         subjects_dir=tempdir,
                                         add_interpolator=False)
    write_source_spaces(path % 'vol-50', vsrc)

    # scale fsaverage
    write_source_spaces(path % 'ico-0', src, overwrite=True)
    with pytest.warns(None):  # sometimes missing nibabel
        scale_mri('fsaverage',
                  'flachkopf',
                  scale,
                  True,
                  subjects_dir=tempdir,
                  verbose='debug')
    assert _is_mri_subject('flachkopf', tempdir), "Scaling failed"
    spath = op.join(tempdir, 'flachkopf', 'bem', 'flachkopf-%s-src.fif')

    assert op.exists(spath % 'ico-0'), "Source space ico-0 was not scaled"
    assert os.path.isfile(
        os.path.join(tempdir, 'flachkopf', 'surf', 'lh.sphere.reg'))
    vsrc_s = mne.read_source_spaces(spath % 'vol-50')
    for vox in ([0, 0, 0], [1, 0, 0], [0, 1, 0], [0, 0, 1], [1, 2, 3]):
        idx = np.ravel_multi_index(vox, vsrc[0]['shape'], order='F')
        err_msg = f'idx={idx} @ {vox}, scale={scale}'
        assert_allclose(apply_trans(vsrc[0]['src_mri_t'], vox),
                        vsrc[0]['rr'][idx],
                        err_msg=err_msg)
        assert_allclose(apply_trans(vsrc_s[0]['src_mri_t'], vox),
                        vsrc_s[0]['rr'][idx],
                        err_msg=err_msg)
    scale_labels('flachkopf', subjects_dir=tempdir)

    # add distances to source space after hacking the properties to make
    # it run *much* faster
    src_dist = src.copy()
    for s in src_dist:
        s.update(rr=s['rr'][s['vertno']],
                 nn=s['nn'][s['vertno']],
                 tris=s['use_tris'])
        s.update(np=len(s['rr']),
                 ntri=len(s['tris']),
                 vertno=np.arange(len(s['rr'])),
                 inuse=np.ones(len(s['rr']), int))
    mne.add_source_space_distances(src_dist)
    write_source_spaces(path % 'ico-0', src_dist, overwrite=True)

    # scale with distances
    os.remove(spath % 'ico-0')
    scale_source_space('flachkopf', 'ico-0', subjects_dir=tempdir)
    ssrc = mne.read_source_spaces(spath % 'ico-0')
    assert ssrc[0]['dist'] is not None
    assert ssrc[0]['nearest'] is not None

    # check patch info computation (only if SciPy is new enough to be fast)
    if check_version('scipy', '1.3'):
        for s in src_dist:
            for key in ('dist', 'dist_limit'):
                s[key] = None
        write_source_spaces(path % 'ico-0', src_dist, overwrite=True)

        # scale with distances
        os.remove(spath % 'ico-0')
        scale_source_space('flachkopf', 'ico-0', subjects_dir=tempdir)
        ssrc = mne.read_source_spaces(spath % 'ico-0')
        assert ssrc[0]['dist'] is None
        assert ssrc[0]['nearest'] is not None
src = mne.setup_source_space(subject,
                             spacing='oct5',
                             add_dist=False,
                             subjects_dir=subjects_dir)

###############################################################################
# Now we create a mixed src space by adding the volume regions specified in the
# list labels_vol. First, read the aseg file and the source space bounds
# using the inner skull surface (here using 10mm spacing to save time,
# we recommend something smaller like 5.0 in actual analyses):

vol_src = mne.setup_volume_source_space(
    subject,
    mri=fname_aseg,
    pos=10.0,
    bem=fname_model,
    volume_label=labels_vol,
    subjects_dir=subjects_dir,
    add_interpolator=False,  # just for speed, usually this should be True
    verbose=True)

# Generate the mixed source space
src += vol_src

# Visualize the source space.
src.plot(subjects_dir=subjects_dir)

n = sum(src[i]['nuse'] for i in range(len(src)))
print('the src space contains %d spaces and %d points' % (len(src), n))

###############################################################################
Exemple #38
0
def test_plot_alignment():
    """Test plotting of -trans.fif files and MEG sensor layouts."""
    # generate fiducials file for testing
    tempdir = _TempDir()
    fiducials_path = op.join(tempdir, 'fiducials.fif')
    fid = [{
        'coord_frame': 5,
        'ident': 1,
        'kind': 1,
        'r': [-0.08061612, -0.02908875, -0.04131077]
    }, {
        'coord_frame': 5,
        'ident': 2,
        'kind': 1,
        'r': [0.00146763, 0.08506715, -0.03483611]
    }, {
        'coord_frame': 5,
        'ident': 3,
        'kind': 1,
        'r': [0.08436285, -0.02850276, -0.04127743]
    }]
    write_dig(fiducials_path, fid, 5)

    mlab = _import_mlab()
    evoked = read_evokeds(evoked_fname)[0]
    sample_src = read_source_spaces(src_fname)
    bti = read_raw_bti(pdf_fname,
                       config_fname,
                       hs_fname,
                       convert=True,
                       preload=False).info
    infos = dict(
        Neuromag=evoked.info,
        CTF=read_raw_ctf(ctf_fname).info,
        BTi=bti,
        KIT=read_raw_kit(sqd_fname).info,
    )
    for system, info in infos.items():
        meg = ['helmet', 'sensors']
        if system == 'KIT':
            meg.append('ref')
        plot_alignment(info,
                       trans_fname,
                       subject='sample',
                       subjects_dir=subjects_dir,
                       meg=meg)
        mlab.close(all=True)
    # KIT ref sensor coil def is defined
    mlab.close(all=True)
    info = infos['Neuromag']
    pytest.raises(TypeError,
                  plot_alignment,
                  'foo',
                  trans_fname,
                  subject='sample',
                  subjects_dir=subjects_dir)
    pytest.raises(TypeError,
                  plot_alignment,
                  info,
                  trans_fname,
                  subject='sample',
                  subjects_dir=subjects_dir,
                  src='foo')
    pytest.raises(ValueError,
                  plot_alignment,
                  info,
                  trans_fname,
                  subject='fsaverage',
                  subjects_dir=subjects_dir,
                  src=sample_src)
    sample_src.plot(subjects_dir=subjects_dir,
                    head=True,
                    skull=True,
                    brain='white')
    mlab.close(all=True)
    # no-head version
    mlab.close(all=True)
    # all coord frames
    pytest.raises(ValueError, plot_alignment, info)
    plot_alignment(info, surfaces=[])
    for coord_frame in ('meg', 'head', 'mri'):
        plot_alignment(info,
                       meg=['helmet', 'sensors'],
                       dig=True,
                       coord_frame=coord_frame,
                       trans=trans_fname,
                       subject='sample',
                       mri_fiducials=fiducials_path,
                       subjects_dir=subjects_dir,
                       src=sample_src)
        mlab.close(all=True)
    # EEG only with strange options
    evoked_eeg_ecog_seeg = evoked.copy().pick_types(meg=False, eeg=True)
    evoked_eeg_ecog_seeg.info['projs'] = []  # "remove" avg proj
    evoked_eeg_ecog_seeg.set_channel_types({
        'EEG 001': 'ecog',
        'EEG 002': 'seeg'
    })
    with pytest.warns(RuntimeWarning, match='Cannot plot MEG'):
        plot_alignment(evoked_eeg_ecog_seeg.info,
                       subject='sample',
                       trans=trans_fname,
                       subjects_dir=subjects_dir,
                       surfaces=['white', 'outer_skin', 'outer_skull'],
                       meg=['helmet', 'sensors'],
                       eeg=['original', 'projected'],
                       ecog=True,
                       seeg=True)
    mlab.close(all=True)

    sphere = make_sphere_model(info=evoked.info, r0='auto', head_radius='auto')
    bem_sol = read_bem_solution(
        op.join(subjects_dir, 'sample', 'bem',
                'sample-1280-1280-1280-bem-sol.fif'))
    bem_surfs = read_bem_surfaces(
        op.join(subjects_dir, 'sample', 'bem',
                'sample-1280-1280-1280-bem.fif'))
    sample_src[0]['coord_frame'] = 4  # hack for coverage
    plot_alignment(
        info,
        subject='sample',
        eeg='projected',
        meg='helmet',
        bem=sphere,
        dig=True,
        surfaces=['brain', 'inner_skull', 'outer_skull', 'outer_skin'])
    plot_alignment(info,
                   trans_fname,
                   subject='sample',
                   meg='helmet',
                   subjects_dir=subjects_dir,
                   eeg='projected',
                   bem=sphere,
                   surfaces=['head', 'brain'],
                   src=sample_src)
    plot_alignment(info,
                   trans_fname,
                   subject='sample',
                   meg=[],
                   subjects_dir=subjects_dir,
                   bem=bem_sol,
                   eeg=True,
                   surfaces=['head', 'inflated', 'outer_skull', 'inner_skull'])
    plot_alignment(info,
                   trans_fname,
                   subject='sample',
                   meg=True,
                   subjects_dir=subjects_dir,
                   surfaces=['head', 'inner_skull'],
                   bem=bem_surfs)
    sphere = make_sphere_model('auto', 'auto', evoked.info)
    src = setup_volume_source_space(sphere=sphere)
    plot_alignment(
        info,
        eeg='projected',
        meg='helmet',
        bem=sphere,
        src=src,
        dig=True,
        surfaces=['brain', 'inner_skull', 'outer_skull', 'outer_skin'])
    sphere = make_sphere_model('auto', None, evoked.info)  # one layer
    plot_alignment(info,
                   trans_fname,
                   subject='sample',
                   meg=False,
                   coord_frame='mri',
                   subjects_dir=subjects_dir,
                   surfaces=['brain'],
                   bem=sphere,
                   show_axes=True)

    # one layer bem with skull surfaces:
    pytest.raises(ValueError,
                  plot_alignment,
                  info=info,
                  trans=trans_fname,
                  subject='sample',
                  subjects_dir=subjects_dir,
                  surfaces=['brain', 'head', 'inner_skull'],
                  bem=sphere)
    # wrong eeg value:
    pytest.raises(ValueError,
                  plot_alignment,
                  info=info,
                  trans=trans_fname,
                  subject='sample',
                  subjects_dir=subjects_dir,
                  eeg='foo')
    # wrong meg value:
    pytest.raises(ValueError,
                  plot_alignment,
                  info=info,
                  trans=trans_fname,
                  subject='sample',
                  subjects_dir=subjects_dir,
                  meg='bar')
    # multiple brain surfaces:
    pytest.raises(ValueError,
                  plot_alignment,
                  info=info,
                  trans=trans_fname,
                  subject='sample',
                  subjects_dir=subjects_dir,
                  surfaces=['white', 'pial'])
    pytest.raises(TypeError,
                  plot_alignment,
                  info=info,
                  trans=trans_fname,
                  subject='sample',
                  subjects_dir=subjects_dir,
                  surfaces=[1])
    pytest.raises(ValueError,
                  plot_alignment,
                  info=info,
                  trans=trans_fname,
                  subject='sample',
                  subjects_dir=subjects_dir,
                  surfaces=['foo'])
    mlab.close(all=True)
Exemple #39
0
def test_make_lcmv(tmpdir):
    """Test LCMV with evoked data and single trials."""
    raw, epochs, evoked, data_cov, noise_cov, label, forward,\
        forward_surf_ori, forward_fixed, forward_vol = _get_data()

    for fwd in [forward, forward_vol]:
        filters = make_lcmv(evoked.info, fwd, data_cov, reg=0.01,
                            noise_cov=noise_cov)
        stc = apply_lcmv(evoked, filters, max_ori_out='signed')
        stc.crop(0.02, None)

        stc_pow = np.sum(np.abs(stc.data), axis=1)
        idx = np.argmax(stc_pow)
        max_stc = stc.data[idx]
        tmax = stc.times[np.argmax(max_stc)]

        assert 0.09 < tmax < 0.12, tmax
        assert 0.9 < np.max(max_stc) < 3., np.max(max_stc)

        if fwd is forward:
            # Test picking normal orientation (surface source space only).
            filters = make_lcmv(evoked.info, forward_surf_ori, data_cov,
                                reg=0.01, noise_cov=noise_cov,
                                pick_ori='normal', weight_norm=None)
            stc_normal = apply_lcmv(evoked, filters, max_ori_out='signed')
            stc_normal.crop(0.02, None)

            stc_pow = np.sum(np.abs(stc_normal.data), axis=1)
            idx = np.argmax(stc_pow)
            max_stc = stc_normal.data[idx]
            tmax = stc_normal.times[np.argmax(max_stc)]

            assert 0.04 < tmax < 0.13, tmax
            assert 3e-7 < np.max(max_stc) < 5e-7, np.max(max_stc)

            # No weight normalization was applied, so the amplitude of normal
            # orientation results should always be smaller than free
            # orientation results.
            assert (np.abs(stc_normal.data) <= stc.data).all()

        # Test picking source orientation maximizing output source power
        filters = make_lcmv(evoked.info, fwd, data_cov, reg=0.01,
                            noise_cov=noise_cov, pick_ori='max-power')
        stc_max_power = apply_lcmv(evoked, filters, max_ori_out='signed')
        stc_max_power.crop(0.02, None)
        stc_pow = np.sum(np.abs(stc_max_power.data), axis=1)
        idx = np.argmax(stc_pow)
        max_stc = np.abs(stc_max_power.data[idx])
        tmax = stc.times[np.argmax(max_stc)]

        assert 0.08 < tmax < 0.12, tmax
        assert 0.8 < np.max(max_stc) < 3., np.max(max_stc)

        stc_max_power.data[:, :] = np.abs(stc_max_power.data)

        if fwd is forward:
            # Maximum output source power orientation results should be
            # similar to free orientation results in areas with channel
            # coverage
            label = mne.read_label(fname_label)
            mean_stc = stc.extract_label_time_course(label, fwd['src'],
                                                     mode='mean')
            mean_stc_max_pow = \
                stc_max_power.extract_label_time_course(label, fwd['src'],
                                                        mode='mean')
            assert_array_less(np.abs(mean_stc - mean_stc_max_pow), 0.6)

        # Test NAI weight normalization:
        filters = make_lcmv(evoked.info, fwd, data_cov, reg=0.01,
                            noise_cov=noise_cov, pick_ori='max-power',
                            weight_norm='nai')
        stc_nai = apply_lcmv(evoked, filters, max_ori_out='signed')
        stc_nai.crop(0.02, None)

        # Test whether unit-noise-gain solution is a scaled version of NAI
        pearsoncorr = np.corrcoef(np.concatenate(np.abs(stc_nai.data)),
                                  np.concatenate(stc_max_power.data))
        assert_almost_equal(pearsoncorr[0, 1], 1.)

    # Test sphere head model with unit-noise gain beamformer and orientation
    # selection and rank reduction of the leadfield
    sphere = mne.make_sphere_model(r0=(0., 0., 0.), head_radius=0.080)
    src = mne.setup_volume_source_space(subject=None, pos=15., mri=None,
                                        sphere=(0.0, 0.0, 0.0, 80.0),
                                        bem=None, mindist=5.0, exclude=2.0)

    fwd_sphere = mne.make_forward_solution(evoked.info, trans=None, src=src,
                                           bem=sphere, eeg=False, meg=True)

    # Test that we get an error if not reducing rank
    with pytest.raises(ValueError):  # Singular matrix or complex spectrum
        make_lcmv(
            evoked.info, fwd_sphere, data_cov, reg=0.1,
            noise_cov=noise_cov, weight_norm='unit-noise-gain',
            pick_ori='max-power', reduce_rank=False, rank='full')

    # Now let's reduce it
    filters = make_lcmv(evoked.info, fwd_sphere, data_cov, reg=0.1,
                        noise_cov=noise_cov, weight_norm='unit-noise-gain',
                        pick_ori='max-power', reduce_rank=True)
    stc_sphere = apply_lcmv(evoked, filters, max_ori_out='signed')
    stc_sphere = np.abs(stc_sphere)
    stc_sphere.crop(0.02, None)

    stc_pow = np.sum(stc_sphere.data, axis=1)
    idx = np.argmax(stc_pow)
    max_stc = stc_sphere.data[idx]
    tmax = stc_sphere.times[np.argmax(max_stc)]

    assert 0.08 < tmax < 0.15, tmax
    assert 0.4 < np.max(max_stc) < 2., np.max(max_stc)

    # Test if spatial filter contains src_type
    assert 'src_type' in filters

    # __repr__
    assert 'LCMV' in repr(filters)
    assert 'unknown subject' not in repr(filters)
    assert '484' in repr(filters)
    assert '20' in repr(filters)
    assert 'rank 17' in repr(filters)

    # I/O
    fname = op.join(str(tmpdir), 'filters.h5')
    with pytest.warns(RuntimeWarning, match='-lcmv.h5'):
        filters.save(fname)
    filters_read = read_beamformer(fname)
    assert isinstance(filters, Beamformer)
    assert isinstance(filters_read, Beamformer)
    # deal with object_diff strictness
    filters_read['rank'] = int(filters_read['rank'])
    filters['rank'] = int(filters['rank'])
    assert object_diff(filters, filters_read) == ''

    # Test if fixed forward operator is detected when picking normal or
    # max-power orientation
    pytest.raises(ValueError, make_lcmv, evoked.info, forward_fixed, data_cov,
                  reg=0.01, noise_cov=noise_cov, pick_ori='normal')
    pytest.raises(ValueError, make_lcmv, evoked.info, forward_fixed, data_cov,
                  reg=0.01, noise_cov=noise_cov, pick_ori='max-power')

    # Test if non-surface oriented forward operator is detected when picking
    # normal orientation
    pytest.raises(ValueError, make_lcmv, evoked.info, forward, data_cov,
                  reg=0.01, noise_cov=noise_cov, pick_ori='normal')

    # Test if volume forward operator is detected when picking normal
    # orientation
    pytest.raises(ValueError, make_lcmv, evoked.info, forward_vol, data_cov,
                  reg=0.01, noise_cov=noise_cov, pick_ori='normal')

    # Test if missing of noise covariance matrix is detected when more than
    # one channel type is present in the data
    pytest.raises(ValueError, make_lcmv, evoked.info, forward_vol,
                  data_cov=data_cov, reg=0.01, noise_cov=None,
                  pick_ori='max-power')

    # Test if wrong channel selection is detected in application of filter
    evoked_ch = deepcopy(evoked)
    evoked_ch.pick_channels(evoked_ch.ch_names[1:])
    filters = make_lcmv(evoked.info, forward_vol, data_cov, reg=0.01,
                        noise_cov=noise_cov)
    pytest.raises(ValueError, apply_lcmv, evoked_ch, filters,
                  max_ori_out='signed')

    # Test if discrepancies in channel selection of data and fwd model are
    # handled correctly in apply_lcmv
    # make filter with data where first channel was removed
    filters = make_lcmv(evoked_ch.info, forward_vol, data_cov, reg=0.01,
                        noise_cov=noise_cov)
    # applying that filter to the full data set should automatically exclude
    # this channel from the data
    # also test here that no warnings are thrown - implemented to check whether
    # src should not be None warning occurs
    with pytest.warns(None) as w:
        stc = apply_lcmv(evoked, filters, max_ori_out='signed')
    assert len(w) == 0
    # the result should be equal to applying this filter to a dataset without
    # this channel:
    stc_ch = apply_lcmv(evoked_ch, filters, max_ori_out='signed')
    assert_array_almost_equal(stc.data, stc_ch.data)

    # Test if non-matching SSP projection is detected in application of filter
    raw_proj = deepcopy(raw)
    raw_proj.del_proj()
    pytest.raises(ValueError, apply_lcmv_raw, raw_proj, filters,
                  max_ori_out='signed')

    # Test if setting reduce_rank to True returns a NotImplementedError
    # when no orientation selection is done or pick_ori='normal'
    pytest.raises(NotImplementedError, make_lcmv, evoked.info, forward_vol,
                  data_cov, noise_cov=noise_cov, pick_ori=None,
                  weight_norm='nai', reduce_rank=True)
    pytest.raises(NotImplementedError, make_lcmv, evoked.info,
                  forward_surf_ori, data_cov, noise_cov=noise_cov,
                  pick_ori='normal', weight_norm='nai', reduce_rank=True)

    # Test if spatial filter contains src_type
    assert 'src_type' in filters

    # check whether a filters object without src_type throws expected warning
    del filters['src_type']  # emulate 0.16 behaviour to cause warning
    with pytest.warns(RuntimeWarning, match='spatial filter does not contain '
                      'src_type'):
        apply_lcmv(evoked, filters, max_ori_out='signed')

    # Now test single trial using fixed orientation forward solution
    # so we can compare it to the evoked solution
    filters = make_lcmv(epochs.info, forward_fixed, data_cov, reg=0.01,
                        noise_cov=noise_cov)
    stcs = apply_lcmv_epochs(epochs, filters, max_ori_out='signed')
    stcs_ = apply_lcmv_epochs(epochs, filters, return_generator=True,
                              max_ori_out='signed')
    assert_array_equal(stcs[0].data, next(stcs_).data)

    epochs.drop_bad()
    assert (len(epochs.events) == len(stcs))

    # average the single trial estimates
    stc_avg = np.zeros_like(stcs[0].data)
    for this_stc in stcs:
        stc_avg += this_stc.data
    stc_avg /= len(stcs)

    # compare it to the solution using evoked with fixed orientation
    filters = make_lcmv(evoked.info, forward_fixed, data_cov, reg=0.01,
                        noise_cov=noise_cov)
    stc_fixed = apply_lcmv(evoked, filters, max_ori_out='signed')
    assert_array_almost_equal(stc_avg, stc_fixed.data)

    # use a label so we have few source vertices and delayed computation is
    # not used
    filters = make_lcmv(epochs.info, forward_fixed, data_cov, reg=0.01,
                        noise_cov=noise_cov, label=label)
    stcs_label = apply_lcmv_epochs(epochs, filters, max_ori_out='signed')

    assert_array_almost_equal(stcs_label[0].data, stcs[0].in_label(label).data)

    # Test condition where the filters weights are zero. There should not be
    # any divide-by-zero errors
    zero_cov = data_cov.copy()
    zero_cov['data'][:] = 0
    filters = make_lcmv(epochs.info, forward_fixed, zero_cov, reg=0.01,
                        noise_cov=noise_cov)
    assert_array_equal(filters['weights'], 0)
Exemple #40
0
def test_volume_source_morph_round_trip(tmp_path, subject_from, subject_to,
                                        lower, upper, dtype, morph_mat,
                                        monkeypatch):
    """Test volume source estimate morph round-trips well."""
    import nibabel as nib
    from nibabel.processing import resample_from_to
    src = dict()
    if morph_mat:
        # ~1.5 minutes with pos=7. (4157 morphs!) for sample, so only test
        # morph_mat computation mode with a few labels
        label_names = sorted(get_volume_labels_from_aseg(fname_aseg))[1:2]
        if 'sample' in (subject_from, subject_to):
            src['sample'] = setup_volume_source_space(
                'sample',
                subjects_dir=subjects_dir,
                volume_label=label_names,
                mri=fname_aseg)
            assert sum(s['nuse'] for s in src['sample']) == 12
        if 'fsaverage' in (subject_from, subject_to):
            src['fsaverage'] = setup_volume_source_space(
                'fsaverage',
                subjects_dir=subjects_dir,
                volume_label=label_names[:3],
                mri=fname_aseg_fs)
            assert sum(s['nuse'] for s in src['fsaverage']) == 16
    else:
        assert not morph_mat
        if 'sample' in (subject_from, subject_to):
            src['sample'] = mne.read_source_spaces(fname_vol)
            src['sample'][0]['subject_his_id'] = 'sample'
            assert src['sample'][0]['nuse'] == 4157
        if 'fsaverage' in (subject_from, subject_to):
            # Created to save space with:
            #
            # bem = op.join(op.dirname(mne.__file__), 'data', 'fsaverage',
            #               'fsaverage-inner_skull-bem.fif')
            # src_fsaverage = mne.setup_volume_source_space(
            #     'fsaverage', pos=7., bem=bem, mindist=0,
            #     subjects_dir=subjects_dir, add_interpolator=False)
            # mne.write_source_spaces(fname_fs_vol, src_fsaverage,
            #                         overwrite=True)
            #
            # For speed we do it without the interpolator because it's huge.
            src['fsaverage'] = mne.read_source_spaces(fname_fs_vol)
            src['fsaverage'][0].update(vol_dims=np.array([23, 29, 25]),
                                       seg_name='brain')
            _add_interpolator(src['fsaverage'])
            assert src['fsaverage'][0]['nuse'] == 6379
    src_to, src_from = src[subject_to], src[subject_from]
    del src
    # No SDR just for speed once everything works
    kwargs = dict(niter_sdr=(),
                  niter_affine=(1, ),
                  subjects_dir=subjects_dir,
                  verbose=True)
    morph_from_to = compute_source_morph(src=src_from,
                                         src_to=src_to,
                                         subject_to=subject_to,
                                         **kwargs)
    morph_to_from = compute_source_morph(src=src_to,
                                         src_to=src_from,
                                         subject_to=subject_from,
                                         **kwargs)
    nuse = sum(s['nuse'] for s in src_from)
    assert nuse > 10
    use = np.linspace(0, nuse - 1, 10).round().astype(int)
    data = np.eye(nuse)[:, use]
    if dtype is complex:
        data = data * 1j
    vertices = [s['vertno'] for s in src_from]
    stc_from = VolSourceEstimate(data, vertices, 0, 1)
    with catch_logging() as log:
        stc_from_rt = morph_to_from.apply(
            morph_from_to.apply(stc_from, verbose='debug'))
    log = log.getvalue()
    assert 'individual volume morph' in log
    maxs = np.argmax(stc_from_rt.data, axis=0)
    src_rr = np.concatenate([s['rr'][s['vertno']] for s in src_from])
    dists = 1000 * np.linalg.norm(src_rr[use] - src_rr[maxs], axis=1)
    mu = np.mean(dists)
    # fsaverage=5.99; 7.97 without additional src_ras_t fix
    # fsaverage=7.97; 25.4 without src_ras_t fix
    assert lower <= mu < upper, f'round-trip distance {mu}'
    # check that pre_affine is close to identity when subject_to==subject_from
    if subject_to == subject_from:
        for morph in (morph_to_from, morph_from_to):
            assert_allclose(morph.pre_affine.affine, np.eye(4), atol=1e-2)
    # check that power is more or less preserved (labelizing messes with this)
    if morph_mat:
        if subject_to == 'fsaverage':
            limits = (18, 18.5)
        else:
            limits = (7, 7.5)
    else:
        limits = (1, 1.2)
    stc_from_unit = stc_from.copy().crop(0, 0)
    stc_from_unit._data.fill(1.)
    stc_from_unit_rt = morph_to_from.apply(morph_from_to.apply(stc_from_unit))
    assert_power_preserved(stc_from_unit, stc_from_unit_rt, limits=limits)
    if morph_mat:
        fname = tmp_path / 'temp-morph.h5'
        morph_to_from.save(fname)
        morph_to_from = read_source_morph(fname)
        assert morph_to_from.vol_morph_mat is None
        morph_to_from.compute_vol_morph_mat(verbose=True)
        morph_to_from.save(fname, overwrite=True)
        morph_to_from = read_source_morph(fname)
        assert isinstance(morph_to_from.vol_morph_mat, csr_matrix), 'csr'
        # equivalence (plus automatic calling)
        assert morph_from_to.vol_morph_mat is None
        monkeypatch.setattr(mne.morph, '_VOL_MAT_CHECK_RATIO', 0.)
        with catch_logging() as log:
            with pytest.warns(RuntimeWarning, match=r'calling morph\.compute'):
                stc_from_rt_lin = morph_to_from.apply(
                    morph_from_to.apply(stc_from, verbose='debug'))
        assert isinstance(morph_from_to.vol_morph_mat, csr_matrix), 'csr'
        log = log.getvalue()
        assert 'sparse volume morph matrix' in log
        assert_allclose(stc_from_rt.data, stc_from_rt_lin.data)
        del stc_from_rt_lin
        stc_from_unit_rt_lin = morph_to_from.apply(
            morph_from_to.apply(stc_from_unit))
        assert_allclose(stc_from_unit_rt.data, stc_from_unit_rt_lin.data)
        del stc_from_unit_rt_lin
    del stc_from, stc_from_rt
    # before and after morph, check the proportion of vertices
    # that are inside and outside the brainmask.mgz
    brain = nib.load(op.join(subjects_dir, subject_from, 'mri', 'brain.mgz'))
    mask = _get_img_fdata(brain) > 0
    if subject_from == subject_to == 'sample':
        for stc in [stc_from_unit, stc_from_unit_rt]:
            img = stc.as_volume(src_from, mri_resolution=True)
            img = nib.Nifti1Image(  # abs to convert complex
                np.abs(_get_img_fdata(img)[:, :, :, 0]), img.affine)
            img = _get_img_fdata(resample_from_to(img, brain, order=1))
            assert img.shape == mask.shape
            in_ = img[mask].astype(bool).mean()
            out = img[~mask].astype(bool).mean()
            if morph_mat:
                out_max = 0.001
                in_min, in_max = 0.005, 0.007
            else:
                out_max = 0.02
                in_min, in_max = 0.97, 0.98
            assert out < out_max, f'proportion out of volume {out}'
            assert in_min < in_ < in_max, f'proportion inside volume {in_}'
Exemple #41
0
    mne.viz.plot_alignment(info,
                           trans,
                           subject=mri,
                           dig='fiducials',
                           meg=['helmet', 'sensors'],
                           eeg=False,
                           subjects_dir=mri_dir,
                           surfaces='head-dense',
                           bem=bem,
                           src=src)
    del src

    # build the volume source space for the subjects
    vol_src = mne.setup_volume_source_space(subject=mri,
                                            pos=5.0,
                                            mri=None,
                                            bem=bem,
                                            surface=None,
                                            mindist=5.0,
                                            exclude=0.0,
                                            subjects_dir=mri_dir)
    print(vol_src)
    mne.viz.plot_bem(subject=mri,
                     subjects_dir=mri_dir,
                     brain_surfaces='white',
                     src=vol_src,
                     orientation='coronal')
    # save the volume source space
    vol_src.save("{}{}_vol-src.fif".format(meg_dir, meg), overwrite=True)
    del vol_src
Exemple #42
0
mne.viz.plot_bem(subject=subject,
                 subjects_dir=subjects_dir,
                 brain_surfaces='white',
                 src=src,
                 orientation='coronal')

###############################################################################
# To compute a volume based source space defined with a grid of candidate
# dipoles inside a sphere of radius 90mm centered at (0.0, 0.0, 40.0)
# you can use the following code.
# Obviously here, the sphere is not perfect. It is not restricted to the
# brain and it can miss some parts of the cortex.

sphere = (0.0, 0.0, 40.0, 90.0)
vol_src = mne.setup_volume_source_space(subject,
                                        subjects_dir=subjects_dir,
                                        sphere=sphere)
print(vol_src)

mne.viz.plot_bem(subject=subject,
                 subjects_dir=subjects_dir,
                 brain_surfaces='white',
                 src=vol_src,
                 orientation='coronal')

###############################################################################
# To compute a volume based source space defined with a grid of candidate
# dipoles inside the brain (requires the :term:`BEM` surfaces) you can use the
# following.

surface = op.join(subjects_dir, subject, 'bem', 'inner_skull.surf')
Exemple #43
0
# hemisphere (258 locations) and one for the right hemisphere (258
# locations). Sources can be visualized on top of the BEM surfaces in purple.

mne.viz.plot_bem(src=src, **plot_bem_kwargs)

# %%
# To compute a volume based source space defined with a grid of candidate
# dipoles inside a sphere of radius 90mm centered at (0.0, 0.0, 40.0) mm
# you can use the following code.
# Obviously here, the sphere is not perfect. It is not restricted to the
# brain and it can miss some parts of the cortex.

sphere = (0.0, 0.0, 0.04, 0.09)
vol_src = mne.setup_volume_source_space(
    subject,
    subjects_dir=subjects_dir,
    sphere=sphere,
    sphere_units='m',
    add_interpolator=False)  # just for speed!
print(vol_src)

mne.viz.plot_bem(src=vol_src, **plot_bem_kwargs)

# %%
# To compute a volume based source space defined with a grid of candidate
# dipoles inside the brain (requires the :term:`BEM` surfaces) you can use the
# following.

surface = op.join(subjects_dir, subject, 'bem', 'inner_skull.surf')
vol_src = mne.setup_volume_source_space(
    subject,
    subjects_dir=subjects_dir,
    fig.subplots_adjust(top=0.9)
    fig.suptitle('{} reference'.format(title), size='xx-large', weight='bold')

###############################################################################
# Using an infinite reference (REST)
# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
#
# To use the "point at infinity" reference technique described in
# :footcite:`Yao2001` requires a forward model, which we can create in a few
# steps. Here we use a fairly large spacing of vertices (``pos`` = 15 mm) to
# reduce computation time; a 5 mm spacing is more typical for real data
# analysis:

raw.del_proj()  # remove our average reference projector first
sphere = mne.make_sphere_model('auto', 'auto', raw.info)
src = mne.setup_volume_source_space(sphere=sphere, exclude=30., pos=15.)
forward = mne.make_forward_solution(raw.info, trans=None, src=src, bem=sphere)
raw_rest = raw.copy().set_eeg_reference('REST', forward=forward)

for title, _raw in zip(['Original', 'REST (∞)'], [raw, raw_rest]):
    fig = _raw.plot(n_channels=len(raw), scalings=dict(eeg=5e-5))
    # make room for title
    fig.subplots_adjust(top=0.9)
    fig.suptitle('{} reference'.format(title), size='xx-large', weight='bold')

###############################################################################
# EEG reference and source modeling
# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
#
# If you plan to perform source modeling (either with EEG or combined EEG/MEG
# data), it is **strongly recommended** to use the
import mne
import sys

if len(sys.argv) > 1:
    subj = sys.argv[1]
else:
    subj = 'ABUTRIKQ'

freesurfer_dir = '/mnt/shaw/MEG_structural/freesurfer/%s/' % subj
data_dir = '/mnt/shaw/MEG_data/analysis/stop/parsed_red/'
fwd_dir = '/mnt/shaw/MEG_data/analysis/stop/'
evoked_fname = data_dir + '%s_stop_parsed_matched_BP1-100_DS300-ave.fif' % subj

src = mne.setup_volume_source_space(subj,
                                    mri=freesurfer_dir + '/mri/brainmask.mgz',
                                    bem=freesurfer_dir +
                                    '/bem/%s-5120-bem.fif' % subj,
                                    pos=5)

evoked = mne.read_evokeds(evoked_fname)
fwd_fname = '%s/%s_task-vol-5-fwd.fif' % (fwd_dir, subj)
trans_fname = '%s/%s-trans.fif' % (fwd_dir, subj)
fwd = mne.make_forward_solution(evoked[0].info,
                                trans_fname,
                                src,
                                freesurfer_dir +
                                '/bem/%s-5120-bem-sol.fif' % subj,
                                fname=fwd_fname,
                                meg=True,
                                eeg=False,
                                n_jobs=1,
Exemple #46
0
def test_plot_alignment(tmpdir, renderer, mixed_fwd_cov_evoked):
    """Test plotting of -trans.fif files and MEG sensor layouts."""
    # generate fiducials file for testing
    tempdir = str(tmpdir)
    fiducials_path = op.join(tempdir, 'fiducials.fif')
    fid = [{
        'coord_frame': 5,
        'ident': 1,
        'kind': 1,
        'r': [-0.08061612, -0.02908875, -0.04131077]
    }, {
        'coord_frame': 5,
        'ident': 2,
        'kind': 1,
        'r': [0.00146763, 0.08506715, -0.03483611]
    }, {
        'coord_frame': 5,
        'ident': 3,
        'kind': 1,
        'r': [0.08436285, -0.02850276, -0.04127743]
    }]
    write_dig(fiducials_path, fid, 5)

    renderer.backend._close_all()
    evoked = read_evokeds(evoked_fname)[0]
    sample_src = read_source_spaces(src_fname)
    bti = read_raw_bti(pdf_fname,
                       config_fname,
                       hs_fname,
                       convert=True,
                       preload=False).info
    infos = dict(
        Neuromag=evoked.info,
        CTF=read_raw_ctf(ctf_fname).info,
        BTi=bti,
        KIT=read_raw_kit(sqd_fname).info,
    )
    for system, info in infos.items():
        meg = ['helmet', 'sensors']
        if system == 'KIT':
            meg.append('ref')
        fig = plot_alignment(info,
                             read_trans(trans_fname),
                             subject='sample',
                             subjects_dir=subjects_dir,
                             meg=meg)
        rend = renderer.backend._Renderer(fig=fig)
        rend.close()
    # KIT ref sensor coil def is defined
    renderer.backend._close_all()
    info = infos['Neuromag']
    pytest.raises(TypeError,
                  plot_alignment,
                  'foo',
                  trans_fname,
                  subject='sample',
                  subjects_dir=subjects_dir)
    pytest.raises(OSError,
                  plot_alignment,
                  info,
                  trans_fname,
                  subject='sample',
                  subjects_dir=subjects_dir,
                  src='foo')
    pytest.raises(ValueError,
                  plot_alignment,
                  info,
                  trans_fname,
                  subject='fsaverage',
                  subjects_dir=subjects_dir,
                  src=sample_src)
    sample_src.plot(subjects_dir=subjects_dir,
                    head=True,
                    skull=True,
                    brain='white')
    # mixed source space
    mixed_src = mixed_fwd_cov_evoked[0]['src']
    assert mixed_src.kind == 'mixed'
    plot_alignment(info,
                   meg=['helmet', 'sensors'],
                   dig=True,
                   coord_frame='head',
                   trans=Path(trans_fname),
                   subject='sample',
                   mri_fiducials=fiducials_path,
                   subjects_dir=subjects_dir,
                   src=mixed_src)
    renderer.backend._close_all()
    # no-head version
    renderer.backend._close_all()
    # all coord frames
    plot_alignment(info)  # works: surfaces='auto' default
    for coord_frame in ('meg', 'head', 'mri'):
        fig = plot_alignment(info,
                             meg=['helmet', 'sensors'],
                             dig=True,
                             coord_frame=coord_frame,
                             trans=Path(trans_fname),
                             subject='sample',
                             mri_fiducials=fiducials_path,
                             subjects_dir=subjects_dir,
                             src=src_fname)
    renderer.backend._close_all()
    # EEG only with strange options
    evoked_eeg_ecog_seeg = evoked.copy().pick_types(meg=False, eeg=True)
    evoked_eeg_ecog_seeg.info['projs'] = []  # "remove" avg proj
    evoked_eeg_ecog_seeg.set_channel_types({
        'EEG 001': 'ecog',
        'EEG 002': 'seeg'
    })
    with pytest.warns(RuntimeWarning, match='Cannot plot MEG'):
        with catch_logging() as log:
            plot_alignment(evoked_eeg_ecog_seeg.info,
                           subject='sample',
                           trans=trans_fname,
                           subjects_dir=subjects_dir,
                           surfaces=['white', 'outer_skin', 'outer_skull'],
                           meg=['helmet', 'sensors'],
                           eeg=['original', 'projected'],
                           ecog=True,
                           seeg=True,
                           verbose=True)
    log = log.getvalue()
    assert '1 ECoG location' in log
    assert '1 sEEG location' in log
    renderer.backend._close_all()

    sphere = make_sphere_model(info=evoked.info, r0='auto', head_radius='auto')
    bem_sol = read_bem_solution(
        op.join(subjects_dir, 'sample', 'bem',
                'sample-1280-1280-1280-bem-sol.fif'))
    bem_surfs = read_bem_surfaces(
        op.join(subjects_dir, 'sample', 'bem',
                'sample-1280-1280-1280-bem.fif'))
    sample_src[0]['coord_frame'] = 4  # hack for coverage
    plot_alignment(
        info,
        subject='sample',
        eeg='projected',
        meg='helmet',
        bem=sphere,
        dig=True,
        surfaces=['brain', 'inner_skull', 'outer_skull', 'outer_skin'])
    plot_alignment(info,
                   trans_fname,
                   subject='sample',
                   meg='helmet',
                   subjects_dir=subjects_dir,
                   eeg='projected',
                   bem=sphere,
                   surfaces=['head', 'brain'],
                   src=sample_src)
    assert all(surf['coord_frame'] == FIFF.FIFFV_COORD_MRI
               for surf in bem_sol['surfs'])
    plot_alignment(info,
                   trans_fname,
                   subject='sample',
                   meg=[],
                   subjects_dir=subjects_dir,
                   bem=bem_sol,
                   eeg=True,
                   surfaces=['head', 'inflated', 'outer_skull', 'inner_skull'])
    assert all(surf['coord_frame'] == FIFF.FIFFV_COORD_MRI
               for surf in bem_sol['surfs'])
    plot_alignment(info,
                   trans_fname,
                   subject='sample',
                   meg=True,
                   subjects_dir=subjects_dir,
                   surfaces=['head', 'inner_skull'],
                   bem=bem_surfs)
    # single-layer BEM can still plot head surface
    assert bem_surfs[-1]['id'] == FIFF.FIFFV_BEM_SURF_ID_BRAIN
    bem_sol_homog = read_bem_solution(
        op.join(subjects_dir, 'sample', 'bem', 'sample-1280-bem-sol.fif'))
    for use_bem in (bem_surfs[-1:], bem_sol_homog):
        with catch_logging() as log:
            plot_alignment(info,
                           trans_fname,
                           subject='sample',
                           meg=True,
                           subjects_dir=subjects_dir,
                           surfaces=['head', 'inner_skull'],
                           bem=use_bem,
                           verbose=True)
        log = log.getvalue()
        assert 'not find the surface for head in the provided BEM model' in log
    # sphere model
    sphere = make_sphere_model('auto', 'auto', evoked.info)
    src = setup_volume_source_space(sphere=sphere)
    plot_alignment(
        info,
        eeg='projected',
        meg='helmet',
        bem=sphere,
        src=src,
        dig=True,
        surfaces=['brain', 'inner_skull', 'outer_skull', 'outer_skin'])
    sphere = make_sphere_model('auto', None, evoked.info)  # one layer
    # if you ask for a brain surface with a 1-layer sphere model it's an error
    with pytest.raises(RuntimeError, match='Sphere model does not have'):
        fig = plot_alignment(subject='sample',
                             subjects_dir=subjects_dir,
                             surfaces=['brain'],
                             bem=sphere)
    # but you can ask for a specific brain surface, and
    # no info is permitted
    fig = plot_alignment(trans=trans_fname,
                         subject='sample',
                         meg=False,
                         coord_frame='mri',
                         subjects_dir=subjects_dir,
                         surfaces=['white'],
                         bem=sphere,
                         show_axes=True)
    renderer.backend._close_all()
    if renderer._get_3d_backend() == 'mayavi':
        import mayavi  # noqa: F401 analysis:ignore
        assert isinstance(fig, mayavi.core.scene.Scene)
    # 3D coil with no defined draw (ConvexHull)
    info_cube = pick_info(info, [0])
    info['dig'] = None
    info_cube['chs'][0]['coil_type'] = 9999
    with pytest.raises(RuntimeError, match='coil definition not found'):
        plot_alignment(info_cube, meg='sensors', surfaces=())
    coil_def_fname = op.join(tempdir, 'temp')
    with open(coil_def_fname, 'w') as fid:
        fid.write(coil_3d)
    with use_coil_def(coil_def_fname):
        plot_alignment(info_cube, meg='sensors', surfaces=(), dig=True)

    # one layer bem with skull surfaces:
    with pytest.raises(RuntimeError, match='Sphere model does not.*boundary'):
        plot_alignment(info=info,
                       trans=trans_fname,
                       subject='sample',
                       subjects_dir=subjects_dir,
                       surfaces=['brain', 'head', 'inner_skull'],
                       bem=sphere)
    # wrong eeg value:
    with pytest.raises(ValueError, match='Invalid value for the .eeg'):
        plot_alignment(info=info,
                       trans=trans_fname,
                       subject='sample',
                       subjects_dir=subjects_dir,
                       eeg='foo')
    # wrong meg value:
    with pytest.raises(ValueError, match='Invalid value for the .meg'):
        plot_alignment(info=info,
                       trans=trans_fname,
                       subject='sample',
                       subjects_dir=subjects_dir,
                       meg='bar')
    # multiple brain surfaces:
    with pytest.raises(ValueError, match='Only one brain surface can be plot'):
        plot_alignment(info=info,
                       trans=trans_fname,
                       subject='sample',
                       subjects_dir=subjects_dir,
                       surfaces=['white', 'pial'])
    with pytest.raises(TypeError, match='surfaces.*must be'):
        plot_alignment(info=info,
                       trans=trans_fname,
                       subject='sample',
                       subjects_dir=subjects_dir,
                       surfaces=[1])
    with pytest.raises(ValueError, match='Unknown surface type'):
        plot_alignment(info=info,
                       trans=trans_fname,
                       subject='sample',
                       subjects_dir=subjects_dir,
                       surfaces=['foo'])
    with pytest.raises(TypeError, match="must be an instance of "):
        plot_alignment(info=info,
                       trans=trans_fname,
                       subject='sample',
                       subjects_dir=subjects_dir,
                       surfaces=dict(brain='super clear'))
    with pytest.raises(ValueError, match="must be between 0 and 1"):
        plot_alignment(info=info,
                       trans=trans_fname,
                       subject='sample',
                       subjects_dir=subjects_dir,
                       surfaces=dict(brain=42))
    fwd_fname = op.join(data_dir, 'MEG', 'sample',
                        'sample_audvis_trunc-meg-eeg-oct-4-fwd.fif')
    fwd = read_forward_solution(fwd_fname)
    plot_alignment(subject='sample',
                   subjects_dir=subjects_dir,
                   trans=trans_fname,
                   fwd=fwd,
                   surfaces='white',
                   coord_frame='head')
    fwd = convert_forward_solution(fwd, force_fixed=True)
    plot_alignment(subject='sample',
                   subjects_dir=subjects_dir,
                   trans=trans_fname,
                   fwd=fwd,
                   surfaces='white',
                   coord_frame='head')
    # surfaces as dict
    plot_alignment(subject='sample',
                   coord_frame='head',
                   subjects_dir=subjects_dir,
                   surfaces={
                       'white': 0.4,
                       'outer_skull': 0.6,
                       'head': None
                   })
    # fNIRS (default is pairs)
    info = read_raw_nirx(nirx_fname).info
    with catch_logging() as log:
        plot_alignment(info, subject='fsaverage', surfaces=(), verbose=True)
    log = log.getvalue()
    assert '26 fNIRS pairs' in log
    assert '26 fNIRS locations' not in log
    assert '26 fNIRS sources' not in log
    assert '26 fNIRS detectors' not in log

    with catch_logging() as log:
        plot_alignment(info,
                       subject='fsaverage',
                       surfaces=(),
                       verbose=True,
                       fnirs=['channels', 'sources', 'detectors'])
    log = log.getvalue()
    assert '26 fNIRS pairs' not in log
    assert '26 fNIRS locations' in log
    assert '26 fNIRS sources' in log
    assert '26 fNIRS detectors' in log

    renderer.backend._close_all()
def test_combine_source_spaces(tmpdir):
    """Test combining source spaces."""
    import nibabel as nib
    rng = np.random.RandomState(2)
    volume_labels = ['Brain-Stem', 'Right-Hippocampus']  # two fairly large

    # create a sparse surface source space to ensure all get mapped
    # when mri_resolution=False
    srf = setup_source_space('sample',
                             'oct3',
                             add_dist=False,
                             subjects_dir=subjects_dir)

    # setup 2 volume source spaces
    vol = setup_volume_source_space('sample',
                                    subjects_dir=subjects_dir,
                                    volume_label=volume_labels[0],
                                    mri=aseg_fname,
                                    add_interpolator=False)

    # setup a discrete source space
    rr = rng.randint(0, 11, (20, 3)) * 5e-3
    nn = np.zeros(rr.shape)
    nn[:, -1] = 1
    pos = {'rr': rr, 'nn': nn}
    disc = setup_volume_source_space('sample',
                                     subjects_dir=subjects_dir,
                                     pos=pos,
                                     verbose='error')

    # combine source spaces
    assert srf.kind == 'surface'
    assert vol.kind == 'volume'
    assert disc.kind == 'discrete'
    src = srf + vol + disc
    assert src.kind == 'mixed'
    assert srf.kind == 'surface'
    assert vol.kind == 'volume'
    assert disc.kind == 'discrete'

    # test addition of source spaces
    assert len(src) == 4

    # test reading and writing
    src_out_name = tmpdir.join('temp-src.fif')
    src.save(src_out_name)
    src_from_file = read_source_spaces(src_out_name)
    _compare_source_spaces(src, src_from_file, mode='approx')
    assert_equal(repr(src), repr(src_from_file))
    assert_equal(src.kind, 'mixed')

    # test that all source spaces are in MRI coordinates
    coord_frames = np.array([s['coord_frame'] for s in src])
    assert (coord_frames == FIFF.FIFFV_COORD_MRI).all()

    # test errors for export_volume
    image_fname = tmpdir.join('temp-image.mgz')

    # source spaces with no volume
    with pytest.raises(ValueError, match='at least one volume'):
        srf.export_volume(image_fname, verbose='error')

    # unrecognized source type
    disc2 = disc.copy()
    disc2[0]['type'] = 'kitty'
    with pytest.raises(ValueError, match='Invalid value'):
        src + disc2
    del disc2

    # unrecognized file type
    bad_image_fname = tmpdir.join('temp-image.png')
    # vertices outside vol space warning
    pytest.raises(ValueError,
                  src.export_volume,
                  bad_image_fname,
                  verbose='error')

    # mixed coordinate frames
    disc3 = disc.copy()
    disc3[0]['coord_frame'] = 10
    src_mixed_coord = src + disc3
    with pytest.raises(ValueError, match='must be in head coordinates'):
        src_mixed_coord.export_volume(image_fname, verbose='error')

    # now actually write it
    fname_img = tmpdir.join('img.nii')
    for mri_resolution in (False, 'sparse', True):
        for src, up in ((vol, 705), (srf + vol, 27272), (disc + vol, 705)):
            src.export_volume(fname_img,
                              use_lut=False,
                              mri_resolution=mri_resolution,
                              overwrite=True)
            img_data = _get_img_fdata(nib.load(str(fname_img)))
            n_src = img_data.astype(bool).sum()
            n_want = sum(s['nuse'] for s in src)
            if mri_resolution is True:
                n_want += up
            assert n_src == n_want, src

    # gh-8004
    temp_aseg = tmpdir.join('aseg.mgz')
    aseg_img = nib.load(aseg_fname)
    aseg_affine = aseg_img.affine
    aseg_affine[:3, :3] *= 0.7
    new_aseg = nib.MGHImage(aseg_img.dataobj, aseg_affine)
    nib.save(new_aseg, str(temp_aseg))
    lh_cereb = mne.setup_volume_source_space(
        "sample",
        mri=temp_aseg,
        volume_label="Left-Cerebellum-Cortex",
        add_interpolator=False,
        subjects_dir=subjects_dir)
    src = srf + lh_cereb
    with pytest.warns(RuntimeWarning, match='2 surf vertices lay outside'):
        src.export_volume(image_fname, mri_resolution="sparse", overwrite=True)
def test_scale_mri():
    """Test creating fsaverage and scaling it."""
    # create fsaverage using the testing "fsaverage" instead of the FreeSurfer
    # one
    tempdir = _TempDir()
    fake_home = testing.data_path()
    create_default_subject(subjects_dir=tempdir,
                           fs_home=fake_home,
                           verbose=True)
    assert _is_mri_subject('fsaverage', tempdir), "Creating fsaverage failed"

    fid_path = op.join(tempdir, 'fsaverage', 'bem', 'fsaverage-fiducials.fif')
    os.remove(fid_path)
    create_default_subject(update=True,
                           subjects_dir=tempdir,
                           fs_home=fake_home)
    assert op.exists(fid_path), "Updating fsaverage"

    # copy MRI file from sample data (shouldn't matter that it's incorrect,
    # so here choose a small one)
    path_from = op.join(testing.data_path(), 'subjects', 'sample', 'mri',
                        'T1.mgz')
    path_to = op.join(tempdir, 'fsaverage', 'mri', 'orig.mgz')
    copyfile(path_from, path_to)

    # remove redundant label files
    label_temp = op.join(tempdir, 'fsaverage', 'label', '*.label')
    label_paths = glob(label_temp)
    for label_path in label_paths[1:]:
        os.remove(label_path)

    # create source space
    print('Creating surface source space')
    path = op.join(tempdir, 'fsaverage', 'bem', 'fsaverage-%s-src.fif')
    src = mne.setup_source_space('fsaverage',
                                 'ico0',
                                 subjects_dir=tempdir,
                                 add_dist=False)
    mri = op.join(tempdir, 'fsaverage', 'mri', 'orig.mgz')
    print('Creating volume source space')
    vsrc = mne.setup_volume_source_space('fsaverage',
                                         pos=50,
                                         mri=mri,
                                         subjects_dir=tempdir,
                                         add_interpolator=False)
    write_source_spaces(path % 'vol-50', vsrc)

    # scale fsaverage
    for scale in (.9, [1, .2, .8]):
        write_source_spaces(path % 'ico-0', src, overwrite=True)
        os.environ['_MNE_FEW_SURFACES'] = 'true'
        with pytest.warns(None):  # sometimes missing nibabel
            scale_mri('fsaverage',
                      'flachkopf',
                      scale,
                      True,
                      subjects_dir=tempdir,
                      verbose='debug')
        del os.environ['_MNE_FEW_SURFACES']
        assert _is_mri_subject('flachkopf', tempdir), "Scaling failed"
        spath = op.join(tempdir, 'flachkopf', 'bem', 'flachkopf-%s-src.fif')

        assert op.exists(spath % 'ico-0'), "Source space ico-0 was not scaled"
        assert os.path.isfile(
            os.path.join(tempdir, 'flachkopf', 'surf', 'lh.sphere.reg'))
        vsrc_s = mne.read_source_spaces(spath % 'vol-50')
        pt = np.array([0.12, 0.41, -0.22])
        assert_array_almost_equal(
            apply_trans(vsrc_s[0]['src_mri_t'], pt * np.array(scale)),
            apply_trans(vsrc[0]['src_mri_t'], pt))
        scale_labels('flachkopf', subjects_dir=tempdir)

        # add distances to source space after hacking the properties to make
        # it run *much* faster
        src_dist = src.copy()
        for s in src_dist:
            s.update(rr=s['rr'][s['vertno']],
                     nn=s['nn'][s['vertno']],
                     tris=s['use_tris'])
            s.update(np=len(s['rr']),
                     ntri=len(s['tris']),
                     vertno=np.arange(len(s['rr'])),
                     inuse=np.ones(len(s['rr']), int))
        mne.add_source_space_distances(src_dist)
        write_source_spaces(path % 'ico-0', src_dist, overwrite=True)

        # scale with distances
        os.remove(spath % 'ico-0')
        scale_source_space('flachkopf', 'ico-0', subjects_dir=tempdir)
        ssrc = mne.read_source_spaces(spath % 'ico-0')
        assert ssrc[0]['dist'] is not None
Exemple #49
0
def test_iterable():
    """Test iterable support for simulate_raw."""
    raw = read_raw_fif(raw_fname_short).load_data()
    raw.pick_channels(raw.ch_names[:10] + ['STI 014'])
    src = setup_volume_source_space(
        pos=dict(rr=[[-0.05, 0, 0], [0.1, 0, 0]],
                 nn=[[0, 1., 0], [0, 1., 0]]))
    assert src.kind == 'discrete'
    trans = None
    sphere = make_sphere_model(head_radius=None, info=raw.info)
    tstep = 1. / raw.info['sfreq']
    rng = np.random.RandomState(0)
    vertices = np.array([1])
    data = rng.randn(1, 2)
    stc = VolSourceEstimate(data, vertices, 0, tstep)
    assert isinstance(stc.vertices, np.ndarray)
    with pytest.raises(ValueError, match='at least three time points'):
        simulate_raw(raw.info, stc, trans, src, sphere, None)
    data = rng.randn(1, 1000)
    n_events = (len(raw.times) - 1) // 1000 + 1
    stc = VolSourceEstimate(data, vertices, 0, tstep)
    assert isinstance(stc.vertices, np.ndarray)
    raw_sim = simulate_raw(raw.info, [stc] * 15, trans, src, sphere, None,
                           first_samp=raw.first_samp)
    raw_sim.crop(0, raw.times[-1])
    assert_allclose(raw.times, raw_sim.times)
    events = find_events(raw_sim, initial_event=True)
    assert len(events) == n_events
    assert_array_equal(events[:, 2], 1)

    # Degenerate STCs
    with pytest.raises(RuntimeError,
                       match=r'Iterable did not provide stc\[0\]'):
        simulate_raw(raw.info, [], trans, src, sphere, None)
    # tuple with ndarray
    event_data = np.zeros(len(stc.times), int)
    event_data[0] = 3
    raw_new = simulate_raw(raw.info, [(stc, event_data)] * 15,
                           trans, src, sphere, None, first_samp=raw.first_samp)
    assert raw_new.n_times == 15000
    raw_new.crop(0, raw.times[-1])
    _assert_iter_sim(raw_sim, raw_new, 3)
    with pytest.raises(ValueError, match='event data had shape .* but need'):
        simulate_raw(raw.info, [(stc, event_data[:-1])], trans, src, sphere,
                     None)
    with pytest.raises(ValueError, match='stim_data in a stc tuple .* int'):
        simulate_raw(raw.info, [(stc, event_data * 1.)], trans, src, sphere,
                     None)

    # iterable
    def stc_iter():
        stim_data = np.zeros(len(stc.times), int)
        stim_data[0] = 4
        ii = 0
        while ii < 15:
            ii += 1
            yield (stc, stim_data)
    raw_new = simulate_raw(raw.info, stc_iter(), trans, src, sphere, None,
                           first_samp=raw.first_samp)
    raw_new.crop(0, raw.times[-1])
    _assert_iter_sim(raw_sim, raw_new, 4)

    def stc_iter_bad():
        ii = 0
        while ii < 100:
            ii += 1
            yield (stc, 4, 3)
    with pytest.raises(ValueError, match='stc, if tuple, must be length'):
        simulate_raw(raw.info, stc_iter_bad(), trans, src, sphere, None)
    _assert_iter_sim(raw_sim, raw_new, 4)

    def stc_iter_bad():
        ii = 0
        while ii < 100:
            ii += 1
            stc_new = stc.copy()
            stc_new.vertices = np.array([ii % 2])
            yield stc_new
    with pytest.raises(RuntimeError, match=r'Vertex mismatch for stc\[1\]'):
        simulate_raw(raw.info, stc_iter_bad(), trans, src, sphere, None)

    # Forward omission
    vertices = np.array([0, 1])
    data = rng.randn(2, 1000)
    stc = VolSourceEstimate(data, vertices, 0, tstep)
    assert isinstance(stc.vertices, np.ndarray)
    # XXX eventually we should support filtering based on sphere radius, too,
    # by refactoring the code in source_space.py that does it!
    surf = _get_ico_surface(3)
    surf['rr'] *= 60  # mm
    model = _surfaces_to_bem([surf], [FIFF.FIFFV_BEM_SURF_ID_BRAIN], [0.3])
    bem = make_bem_solution(model)
    with pytest.warns(RuntimeWarning,
                      match='1 of 2 SourceEstimate vertices'):
        simulate_raw(raw.info, stc, trans, src, bem, None)
Exemple #50
0
def test_gamma_map_vol_sphere():
    """Gamma MAP with a sphere forward and volumic source space."""
    evoked = read_evokeds(fname_evoked,
                          condition=0,
                          baseline=(None, 0),
                          proj=False)
    evoked.resample(50, npad=100)
    evoked.crop(tmin=0.1, tmax=0.16)  # crop to window around peak

    cov = read_cov(fname_cov)
    cov = regularize(cov, evoked.info, rank=None)

    info = evoked.info
    sphere = mne.make_sphere_model(r0=(0., 0., 0.), head_radius=0.080)
    src = mne.setup_volume_source_space(subject=None,
                                        pos=30.,
                                        mri=None,
                                        sphere=(0.0, 0.0, 0.0, 0.08),
                                        bem=None,
                                        mindist=5.0,
                                        exclude=2.0,
                                        sphere_units='m')
    fwd = mne.make_forward_solution(info,
                                    trans=None,
                                    src=src,
                                    bem=sphere,
                                    eeg=False,
                                    meg=True)

    alpha = 0.5
    pytest.raises(ValueError,
                  gamma_map,
                  evoked,
                  fwd,
                  cov,
                  alpha,
                  loose=0,
                  return_residual=False)

    pytest.raises(ValueError,
                  gamma_map,
                  evoked,
                  fwd,
                  cov,
                  alpha,
                  loose=0.2,
                  return_residual=False)

    stc = gamma_map(evoked,
                    fwd,
                    cov,
                    alpha,
                    tol=1e-4,
                    xyz_same_gamma=False,
                    update_mode=2,
                    return_residual=False)

    assert_array_almost_equal(stc.times, evoked.times, 5)

    # Compare orientation obtained using fit_dipole and gamma_map
    # for a simulated evoked containing a single dipole
    stc = mne.VolSourceEstimate(50e-9 * np.random.RandomState(42).randn(1, 4),
                                vertices=[stc.vertices[0][:1]],
                                tmin=stc.tmin,
                                tstep=stc.tstep)
    evoked_dip = mne.simulation.simulate_evoked(fwd,
                                                stc,
                                                info,
                                                cov,
                                                nave=1e9,
                                                use_cps=True)

    dip_gmap = gamma_map(evoked_dip, fwd, cov, 0.1, return_as_dipoles=True)

    amp_max = [np.max(d.amplitude) for d in dip_gmap]
    dip_gmap = dip_gmap[np.argmax(amp_max)]
    assert (dip_gmap[0].pos[0] in src[0]['rr'][stc.vertices[0]])

    dip_fit = mne.fit_dipole(evoked_dip, cov, sphere)[0]
    assert (np.abs(np.dot(dip_fit.ori[0], dip_gmap.ori[0])) > 0.99)
Exemple #51
0
              'Left-Thalamus-Proper',
              'Left-Cerebellum-Cortex',
              'Brain-Stem',
              'Right-Amygdala',
              'Right-Thalamus-Proper',
              'Right-Cerebellum-Cortex']

# Setup a surface-based source space
src = setup_source_space(subject, subjects_dir=subjects_dir,
                         spacing='oct6', add_dist=False)

# Setup a volume source space
# set pos=7.0 for speed issue
vol_src = setup_volume_source_space(subject, mri=fname_aseg,
                                    pos=7.0,
                                    bem=fname_model,
                                    volume_label=labels_vol,
                                    subjects_dir=subjects_dir)
# Generate the mixed source space
src += vol_src

# compute the fwd matrix
fwd = make_forward_solution(fname_raw, fname_trans, src, fname_bem,
                            mindist=5.0,  # ignore sources<=5mm from innerskull
                            meg=True, eeg=False,
                            n_jobs=1)

# Load data
raw = read_raw_fif(fname_raw, preload=True)
noise_cov = mne.read_cov(fname_cov)
events = mne.read_events(fname_event)
Exemple #52
0
def test_combine_source_spaces():
    """Test combining source spaces
    """
    tempdir = _TempDir()
    aseg_fname = op.join(subjects_dir, 'sample', 'mri', 'aseg.mgz')
    label_names = get_volume_labels_from_aseg(aseg_fname)
    volume_labels = [
        label_names[int(np.random.rand() * len(label_names))]
        for ii in range(2)
    ]

    # get a surface source space (no need to test creation here)
    srf = read_source_spaces(fname, patch_stats=False)

    # setup 2 volume source spaces
    vol = setup_volume_source_space('sample',
                                    subjects_dir=subjects_dir,
                                    volume_label=volume_labels[0],
                                    mri=aseg_fname,
                                    add_interpolator=False)

    # setup a discrete source space
    rr = rng.randint(0, 20, (100, 3)) * 1e-3
    nn = np.zeros(rr.shape)
    nn[:, -1] = 1
    pos = {'rr': rr, 'nn': nn}
    disc = setup_volume_source_space('sample',
                                     subjects_dir=subjects_dir,
                                     pos=pos,
                                     verbose='error')

    # combine source spaces
    src = srf + vol + disc

    # test addition of source spaces
    assert_equal(type(src), SourceSpaces)
    assert_equal(len(src), 4)

    # test reading and writing
    src_out_name = op.join(tempdir, 'temp-src.fif')
    src.save(src_out_name)
    src_from_file = read_source_spaces(src_out_name)
    _compare_source_spaces(src, src_from_file, mode='approx')
    assert_equal(repr(src), repr(src_from_file))
    assert_equal(src.kind, 'combined')

    # test that all source spaces are in MRI coordinates
    coord_frames = np.array([s['coord_frame'] for s in src])
    assert_true((coord_frames == FIFF.FIFFV_COORD_MRI).all())

    # test errors for export_volume
    image_fname = op.join(tempdir, 'temp-image.mgz')

    # source spaces with no volume
    assert_raises(ValueError, srf.export_volume, image_fname, verbose='error')

    # unrecognized source type
    disc2 = disc.copy()
    disc2[0]['type'] = 'kitty'
    src_unrecognized = src + disc2
    assert_raises(ValueError,
                  src_unrecognized.export_volume,
                  image_fname,
                  verbose='error')

    # unrecognized file type
    bad_image_fname = op.join(tempdir, 'temp-image.png')
    with warnings.catch_warnings(record=True):  # vertices outside vol space
        assert_raises(ValueError,
                      src.export_volume,
                      bad_image_fname,
                      verbose='error')

    # mixed coordinate frames
    disc3 = disc.copy()
    disc3[0]['coord_frame'] = 10
    src_mixed_coord = src + disc3
    assert_raises(ValueError,
                  src_mixed_coord.export_volume,
                  image_fname,
                  verbose='error')
Exemple #53
0
# Pick the channels of interest
raw_filt.pick_types(meg='grad')

# Re-normalize projectors after subselection
raw_filt.info.normalize_proj()

# regularized data covariance
data_cov = mne.compute_raw_covariance(raw_filt, n_jobs=n_jobs)

# beamformer requirements
bem = op.join(subjects_dir, subject, 'bem', 'genz501_17a-5120-bem-sol.fif')
sphere = mne.make_sphere_model(r0='auto', head_radius='auto',
                               info=raw_filt.info)
src = mne.setup_volume_source_space(subject='fsaverage', bem=bem,
                                    mri=op.join(subjects_dir, 'fsaverage',
                                                'mri', 'T1.mgz')
                                    subjects_dir=subjects_dir)
fwd = mne.make_forward_solution(raw_filt.info, trans=None, src=src,
                                bem=bem, n_jobs=n_jobs)
filters = make_lcmv(raw_filt.info, fwd, data_cov, reg=0.05,
                    pick_ori='max-power', weight_norm='nai',
                    reduce_rank=True)
t0 = time.time()
stc = apply_lcmv_raw(raw_filt, filters)
print(' Time: %s mns' % round((time.time() - t0) / 60, 2))

# Save result in stc files
stc.save(op.join(datapath, subject, 'lcmv-vol'))
stc.crop(0.0, 1.0)
# plot dSPM time course in src space
                     detrend= None, on_missing='error', reject_by_annotation=True, verbose=True)
 epochs.drop(badtrial, badreason)
 #%% Find trial variance > index outliers> remove beyond plow and phigh percentile
 plow, phigh = 2.0, 98.0
 bad_trials=my_var_cut_fn(epochs, plow, phigh, to_plot=True)
 print('\n%d trial to remove from total %d trials...\nNo. of remaining trials = %d\n'%(len(bad_trials), 
                                                                                       len(epochs), 
                                                                                       len(epochs)-len(bad_trials)))
 epochs.drop(bad_trials, reason='eye_blink and high variance', verbose=True) 
 
 #%% Compute forward solution/leadfield
 bem=mne.make_sphere_model(r0=(0.0, 0.0, 0.0), head_radius=None, info=None, verbose=True)
 
 if not 'src_vol' in locals():
     src_vol = mne.setup_volume_source_space(subject=subject, pos=5.0, mri=mrifile, bem=None, surface=surffile,
                                          mindist=5.0, exclude=10.0, subjects_dir=subjects_dir, 
                                          volume_label=None, add_interpolator=True, verbose=True)
 if more_plots:
     mne.viz.plot_bem(subject=subject, subjects_dir=subjects_dir, orientation='coronal', slices=range(73,193,5), 
                  brain_surfaces=None, src=src_vol, show=True)
     mne.viz.plot_alignment(epochs.info, trans=transfile, subject=subject, subjects_dir=subjects_dir, fig=None,
                        surfaces=['head-dense', 'inner_skull'], coord_frame='head', show_axes=True,
                        meg=False, eeg='original', dig=True, ecog=True, bem=None, seeg=True,
                        src=src_vol, mri_fiducials=False,  verbose=True) 
 
 if not 'fwd' in locals():
     fwd = mne.make_forward_solution(epochs.info, trans=transfile, src=src_vol, bem=bem, 
                                     meg=True, eeg=False, mindist=2.5, n_jobs=1)
 if 'fwd' in locals() and not len(epochs.ch_names)==fwd['nchan']:
     fwd = mne.make_forward_solution(epochs.info, trans=transfile, src=src_vol, bem=bem, 
                                     meg=True, eeg=False, mindist=2.5, n_jobs=1)
                    tmax=5.,
                    baseline=None,
                    reject=dict(mag=8e-13),
                    preload=True)
del raw

##############################################################################
# Compute the forward and inverse
# -------------------------------

# This source space is really far too coarse, but we do this for speed
# considerations here
pos = 15.  # 1.5 cm is very broad, done here for speed!
src = mne.setup_volume_source_space('bst_resting',
                                    pos,
                                    bem=bem,
                                    subjects_dir=subjects_dir,
                                    verbose=True)
fwd = mne.make_forward_solution(epochs.info, trans, src, bem)
data_cov = mne.compute_covariance(epochs)
filters = make_lcmv(epochs.info,
                    fwd,
                    data_cov,
                    0.05,
                    cov,
                    pick_ori='max-power',
                    weight_norm='nai')
del fwd

##############################################################################
# Compute label time series and do envelope correlation
# mne.gui.coregistration(subject='sample', subjects_dir=subjects_dir)

###############################################################################
# .. _plot_source_alignment_without_mri:
#
# Alignment without MRI
# ---------------------
# The surface alignments above are possible if you have the surfaces available
# from Freesurfer. :func:`mne.viz.plot_alignment` automatically searches for
# the correct surfaces from the provided ``subjects_dir``. Another option is
# to use a :ref:`spherical conductor model <eeg_sphere_model>`. It is
# passed through ``bem`` parameter.

sphere = mne.make_sphere_model(info=raw.info, r0='auto', head_radius='auto')
src = mne.setup_volume_source_space(sphere=sphere, pos=10.)
mne.viz.plot_alignment(
    raw.info, eeg='projected', bem=sphere, src=src, dig=True,
    surfaces=['brain', 'outer_skin'], coord_frame='meg', show_axes=True)

###############################################################################
# It is also possible to use :func:`mne.gui.coregistration`
# to warp a subject (usually ``fsaverage``) to subject digitization data, see
# `these slides
# <https://www.slideshare.net/mne-python/mnepython-scale-mri>`_.
#
# .. _rotation and translation matrix: https://en.wikipedia.org/wiki/Transformation_matrix  # noqa: E501
# .. _NAS: https://en.wikipedia.org/wiki/Nasion
# .. _LPA: http://www.fieldtriptoolbox.org/faq/how_are_the_lpa_and_rpa_points_defined/  # noqa:E501
# .. _RPA: http://www.fieldtriptoolbox.org/faq/how_are_the_lpa_and_rpa_points_defined/  # noqa:E501
# .. _Polhemus: https://polhemus.com/scanning-digitizing/digitizing-products/
                                   tstep=1 / info['sfreq'],
                                   subject='sample')

stc_signal.save(vfname.stc_signal(noise=config.noise, vertex=config.vertex))

###############################################################################
# Create discrete source space based on voxels in volume source space
###############################################################################

if not op.exists(vfname.fwd_discrete):

    pos = {'rr': rr, 'nn': nn}

    # make discrete source space
    src_disc = mne.setup_volume_source_space(subject='sample',
                                             pos=pos,
                                             mri=None,
                                             bem=bem)

    # setup_volume_source_space sets coordinate frame to MRI
    # but coordinates we supplied are in head frame -> set correctly
    src_disc[0]['coord_frame'] = fwd['src'][0]['coord_frame']

    # np.array_equal(fwd_sel['src'][0]['rr'], fwd['src'][0]['rr'][stc.vertices]) is True
    # np.isclose(fwd_sel['src'][0]['nn'], fwd['src'][0]['nn'][stc.vertices]) is True for all entries
    fwd_disc = mne.make_forward_solution(info,
                                         trans=trans,
                                         src=src_disc,
                                         bem=bem_fname,
                                         meg=True,
                                         eeg=False)
def test_volume_source_space(tmpdir):
    """Test setting up volume source spaces."""
    src = read_source_spaces(fname_vol)
    temp_name = tmpdir.join('temp-src.fif')
    surf = read_bem_surfaces(fname_bem, s_id=FIFF.FIFFV_BEM_SURF_ID_BRAIN)
    surf['rr'] *= 1e3  # convert to mm
    bem_sol = read_bem_solution(fname_bem_3_sol)
    bem = read_bem_solution(fname_bem_sol)
    # The one in the testing dataset (uses bem as bounds)
    for this_bem, this_surf in zip(
        (bem, fname_bem, fname_bem_3, bem_sol, fname_bem_3_sol, None),
        (None, None, None, None, None, surf)):
        src_new = setup_volume_source_space('sample',
                                            pos=7.0,
                                            bem=this_bem,
                                            surface=this_surf,
                                            subjects_dir=subjects_dir)
        write_source_spaces(temp_name, src_new, overwrite=True)
        src[0]['subject_his_id'] = 'sample'  # XXX: to make comparison pass
        _compare_source_spaces(src, src_new, mode='approx')
        del src_new
        src_new = read_source_spaces(temp_name)
        _compare_source_spaces(src, src_new, mode='approx')
    with pytest.raises(IOError, match='surface file.*not found'):
        setup_volume_source_space('sample',
                                  surface='foo',
                                  mri=fname_mri,
                                  subjects_dir=subjects_dir)
    bem['surfs'][-1]['coord_frame'] = FIFF.FIFFV_COORD_HEAD
    with pytest.raises(ValueError, match='BEM is not in MRI coord.* got head'):
        setup_volume_source_space('sample',
                                  bem=bem,
                                  mri=fname_mri,
                                  subjects_dir=subjects_dir)
    bem['surfs'] = bem['surfs'][:-1]  # no inner skull surf
    with pytest.raises(ValueError, match='Could not get inner skul.*from BEM'):
        setup_volume_source_space('sample',
                                  bem=bem,
                                  mri=fname_mri,
                                  subjects_dir=subjects_dir)
    del bem
    assert repr(src) == repr(src_new)
    assert src.kind == 'volume'
    # Spheres
    sphere = make_sphere_model(r0=(0., 0., 0.),
                               head_radius=0.1,
                               relative_radii=(0.9, 1.0),
                               sigmas=(0.33, 1.0))
    src = setup_volume_source_space(pos=10, sphere=(0., 0., 0., 0.09))
    src_new = setup_volume_source_space(pos=10, sphere=sphere)
    _compare_source_spaces(src, src_new, mode='exact')
    with pytest.raises(ValueError, match='sphere, if str'):
        setup_volume_source_space(sphere='foo')
    # Need a radius
    sphere = make_sphere_model(head_radius=None)
    with pytest.raises(ValueError, match='be spherical with multiple layers'):
        setup_volume_source_space(sphere=sphere)
Exemple #59
0
def test_mxne_vol_sphere():
    """Test (TF-)MxNE with a sphere forward and volumic source space."""
    evoked = read_evokeds(fname_data, condition=0, baseline=(None, 0))
    evoked.crop(tmin=-0.05, tmax=0.2)
    cov = read_cov(fname_cov)

    evoked_l21 = evoked.copy()
    evoked_l21.crop(tmin=0.081, tmax=0.1)

    info = evoked.info
    sphere = mne.make_sphere_model(r0=(0., 0., 0.), head_radius=0.080)
    src = mne.setup_volume_source_space(subject=None,
                                        pos=15.,
                                        mri=None,
                                        sphere=(0.0, 0.0, 0.0, 0.08),
                                        bem=None,
                                        mindist=5.0,
                                        exclude=2.0,
                                        sphere_units='m')
    fwd = mne.make_forward_solution(info,
                                    trans=None,
                                    src=src,
                                    bem=sphere,
                                    eeg=False,
                                    meg=True)

    alpha = 80.

    # Computing inverse with restricted orientations should also work, since
    # we have a discrete source space.
    stc = mixed_norm(evoked_l21,
                     fwd,
                     cov,
                     alpha,
                     loose=0.2,
                     return_residual=False,
                     maxit=3,
                     tol=1e-8,
                     active_set_size=10)
    assert_array_almost_equal(stc.times, evoked_l21.times, 5)

    # irMxNE tests
    with catch_logging() as log:
        stc = mixed_norm(evoked_l21,
                         fwd,
                         cov,
                         alpha,
                         n_mxne_iter=1,
                         maxit=30,
                         tol=1e-8,
                         active_set_size=10,
                         verbose=True)
    assert isinstance(stc, VolSourceEstimate)
    assert_array_almost_equal(stc.times, evoked_l21.times, 5)
    assert_var_exp_log(log.getvalue(), 9, 11)  # 10.2

    # Compare orientation obtained using fit_dipole and gamma_map
    # for a simulated evoked containing a single dipole
    stc = mne.VolSourceEstimate(50e-9 * np.random.RandomState(42).randn(1, 4),
                                vertices=[stc.vertices[0][:1]],
                                tmin=stc.tmin,
                                tstep=stc.tstep)
    evoked_dip = mne.simulation.simulate_evoked(fwd,
                                                stc,
                                                info,
                                                cov,
                                                nave=1e9,
                                                use_cps=True)

    dip_mxne = mixed_norm(evoked_dip,
                          fwd,
                          cov,
                          alpha=80,
                          n_mxne_iter=1,
                          maxit=30,
                          tol=1e-8,
                          active_set_size=10,
                          return_as_dipoles=True)

    amp_max = [np.max(d.amplitude) for d in dip_mxne]
    dip_mxne = dip_mxne[np.argmax(amp_max)]
    assert dip_mxne.pos[0] in src[0]['rr'][stc.vertices[0]]

    dip_fit = mne.fit_dipole(evoked_dip, cov, sphere)[0]
    assert np.abs(np.dot(dip_fit.ori[0], dip_mxne.ori[0])) > 0.99
    dist = 1000 * np.linalg.norm(dip_fit.pos[0] - dip_mxne.pos[0])
    assert dist < 4.  # within 4 mm

    # Do with TF-MxNE for test memory savings
    alpha = 60.  # overall regularization parameter
    l1_ratio = 0.01  # temporal regularization proportion

    stc, _ = tf_mixed_norm(evoked,
                           fwd,
                           cov,
                           maxit=3,
                           tol=1e-4,
                           tstep=16,
                           wsize=32,
                           window=0.1,
                           alpha=alpha,
                           l1_ratio=l1_ratio,
                           return_residual=True)
    assert isinstance(stc, VolSourceEstimate)
    assert_array_almost_equal(stc.times, evoked.times, 5)
def test_channel_name_limit(tmpdir, monkeypatch, fname):
    """Test that our remapping works properly."""
    #
    # raw
    #
    if fname.endswith('fif'):
        raw = read_raw_fif(fname)
        raw.pick_channels(raw.ch_names[:3])
        ref_names = []
        data_names = raw.ch_names
    else:
        assert fname.endswith('.ds')
        raw = read_raw_ctf(fname)
        ref_names = [
            raw.ch_names[pick]
            for pick in pick_types(raw.info, meg=False, ref_meg=True)
        ]
        data_names = raw.ch_names[32:35]
    proj = dict(data=np.ones((1, len(data_names))),
                col_names=data_names[:2].copy(),
                row_names=None,
                nrow=1)
    proj = Projection(data=proj,
                      active=False,
                      desc='test',
                      kind=0,
                      explained_var=0.)
    raw.add_proj(proj, remove_existing=True)
    raw.info.normalize_proj()
    raw.pick_channels(data_names + ref_names).crop(0, 2)
    long_names = ['123456789abcdefg' + name for name in raw.ch_names]
    fname = tmpdir.join('test-raw.fif')
    with catch_logging() as log:
        raw.save(fname)
    log = log.getvalue()
    assert 'truncated' not in log
    rename = dict(zip(raw.ch_names, long_names))
    long_data_names = [rename[name] for name in data_names]
    long_proj_names = long_data_names[:2]
    raw.rename_channels(rename)
    for comp in raw.info['comps']:
        for key in ('row_names', 'col_names'):
            for name in comp['data'][key]:
                assert name in raw.ch_names
    if raw.info['comps']:
        assert raw.compensation_grade == 0
        raw.apply_gradient_compensation(3)
        assert raw.compensation_grade == 3
    assert len(raw.info['projs']) == 1
    assert raw.info['projs'][0]['data']['col_names'] == long_proj_names
    raw.info['bads'] = bads = long_data_names[2:3]
    good_long_data_names = [
        name for name in long_data_names if name not in bads
    ]
    with catch_logging() as log:
        raw.save(fname, overwrite=True, verbose=True)
    log = log.getvalue()
    assert 'truncated to 15' in log
    for name in raw.ch_names:
        assert len(name) > 15
    # first read the full way
    with catch_logging() as log:
        raw_read = read_raw_fif(fname, verbose=True)
    log = log.getvalue()
    assert 'Reading extended channel information' in log
    for ra in (raw, raw_read):
        assert ra.ch_names == long_names
    assert raw_read.info['projs'][0]['data']['col_names'] == long_proj_names
    del raw_read
    # next read as if no longer names could be read
    monkeypatch.setattr(meas_info, '_read_extended_ch_info',
                        lambda x, y, z: None)
    with catch_logging() as log:
        raw_read = read_raw_fif(fname, verbose=True)
    log = log.getvalue()
    assert 'extended' not in log
    if raw.info['comps']:
        assert raw_read.compensation_grade == 3
        raw_read.apply_gradient_compensation(0)
        assert raw_read.compensation_grade == 0
    monkeypatch.setattr(  # restore
        meas_info, '_read_extended_ch_info', _read_extended_ch_info)
    short_proj_names = [
        f'{name[:13 - bool(len(ref_names))]}-{len(ref_names) + ni}'
        for ni, name in enumerate(long_data_names[:2])
    ]
    assert raw_read.info['projs'][0]['data']['col_names'] == short_proj_names
    #
    # epochs
    #
    epochs = Epochs(raw, make_fixed_length_events(raw))
    fname = tmpdir.join('test-epo.fif')
    epochs.save(fname)
    epochs_read = read_epochs(fname)
    for ep in (epochs, epochs_read):
        assert ep.info['ch_names'] == long_names
        assert ep.ch_names == long_names
    del raw, epochs_read
    # cov
    epochs.info['bads'] = []
    cov = compute_covariance(epochs, verbose='error')
    fname = tmpdir.join('test-cov.fif')
    write_cov(fname, cov)
    cov_read = read_cov(fname)
    for co in (cov, cov_read):
        assert co['names'] == long_data_names
        assert co['bads'] == []
    del cov_read

    #
    # evoked
    #
    evoked = epochs.average()
    evoked.info['bads'] = bads
    assert evoked.nave == 1
    fname = tmpdir.join('test-ave.fif')
    evoked.save(fname)
    evoked_read = read_evokeds(fname)[0]
    for ev in (evoked, evoked_read):
        assert ev.ch_names == long_names
        assert ev.info['bads'] == bads
    del evoked_read, epochs

    #
    # forward
    #
    with pytest.warns(None):  # not enough points for CTF
        sphere = make_sphere_model('auto', 'auto', evoked.info)
    src = setup_volume_source_space(
        pos=dict(rr=[[0, 0, 0.04]], nn=[[0, 1., 0.]]))
    fwd = make_forward_solution(evoked.info, None, src, sphere)
    fname = tmpdir.join('temp-fwd.fif')
    write_forward_solution(fname, fwd)
    fwd_read = read_forward_solution(fname)
    for fw in (fwd, fwd_read):
        assert fw['sol']['row_names'] == long_data_names
        assert fw['info']['ch_names'] == long_data_names
        assert fw['info']['bads'] == bads
    del fwd_read

    #
    # inv
    #
    inv = make_inverse_operator(evoked.info, fwd, cov)
    fname = tmpdir.join('test-inv.fif')
    write_inverse_operator(fname, inv)
    inv_read = read_inverse_operator(fname)
    for iv in (inv, inv_read):
        assert iv['info']['ch_names'] == good_long_data_names
    apply_inverse(evoked, inv)  # smoke test