def test_volume_source_space(): """Test setting up volume source spaces """ fname_vol = op.join(data_path, 'subjects', 'sample', 'bem', 'volume-7mm-src.fif') src = read_source_spaces(fname_vol) temp_name = op.join(tempdir, 'temp-src.fif') try: # The one in the sample dataset (uses bem as bounds) src_new = setup_volume_source_space('sample', temp_name, pos=7.0, bem=fname_bem, mri=fname_mri, subjects_dir=subjects_dir) _compare_source_spaces(src, src_new, mode='approx') src_new = read_source_spaces(temp_name) _compare_source_spaces(src, src_new, mode='approx') # let's try the spherical one (no bem or surf supplied) run_subprocess(['mne_volume_source_space', '--grid', '15.0', '--src', temp_name, '--mri', fname_mri]) src = read_source_spaces(temp_name) src_new = setup_volume_source_space('sample', temp_name, pos=15.0, mri=fname_mri, subjects_dir=subjects_dir) _compare_source_spaces(src, src_new, mode='approx') finally: if op.isfile(temp_name): os.remove(temp_name)
def test_write_source_space(): """Test writing and reading of source spaces """ src0 = read_source_spaces(fname, add_geom=False) src0_old = read_source_spaces(fname, add_geom=False) write_source_spaces(op.join(tempdir, 'tmp.fif'), src0) src1 = read_source_spaces(op.join(tempdir, 'tmp.fif'), add_geom=False) for orig in [src0, src0_old]: for s0, s1 in zip(src0, src1): for name in ['nuse', 'dist_limit', 'ntri', 'np', 'type', 'id', 'subject_his_id']: assert_true(s0[name] == s1[name]) for name in ['nn', 'rr', 'inuse', 'vertno', 'nuse_tri', 'coord_frame', 'use_tris', 'tris', 'nearest', 'nearest_dist']: assert_array_equal(s0[name], s1[name]) for name in ['dist']: if s0[name] is not None: assert_true(s1[name].shape == s0[name].shape) assert_true(len((s0['dist'] - s1['dist']).data) == 0) for name in ['pinfo']: if s0[name] is not None: assert_true(len(s0[name]) == len(s1[name])) for p1, p2 in zip(s0[name], s1[name]): assert_true(all(p1 == p2)) # The above "if s0[name] is not None" can be removed once the sample # dataset is updated to have a source space with distance info for name in ['working_dir', 'command_line']: assert_true(src0.info[name] == src1.info[name])
def test_setup_source_space(): """Test setting up ico, oct, and all source spaces """ fname_all = op.join(data_path, "subjects", "sample", "bem", "sample-all-src.fif") fname_ico = op.join(data_path, "subjects", "fsaverage", "bem", "fsaverage-ico-5-src.fif") # first lets test some input params assert_raises(ValueError, setup_source_space, "sample", spacing="oct") assert_raises(ValueError, setup_source_space, "sample", spacing="octo") assert_raises(ValueError, setup_source_space, "sample", spacing="oct6e") assert_raises(ValueError, setup_source_space, "sample", spacing="7emm") assert_raises(ValueError, setup_source_space, "sample", spacing="alls") assert_raises(IOError, setup_source_space, "sample", spacing="oct6", subjects_dir=subjects_dir) # ico 5 (fsaverage) - write to temp file src = read_source_spaces(fname_ico) temp_name = op.join(tempdir, "temp-src.fif") with warnings.catch_warnings(record=True): # sklearn equiv neighbors src_new = setup_source_space("fsaverage", temp_name, spacing="ico5", subjects_dir=subjects_dir) _compare_source_spaces(src, src_new, mode="approx") # oct-6 (sample) - auto filename + IO src = read_source_spaces(fname) temp_name = op.join(tempdir, "temp-src.fif") with warnings.catch_warnings(record=True): # sklearn equiv neighbors src_new = setup_source_space("sample", temp_name, spacing="oct6", subjects_dir=subjects_dir, overwrite=True) _compare_source_spaces(src, src_new, mode="approx") src_new = read_source_spaces(temp_name) _compare_source_spaces(src, src_new, mode="approx") # all source points - no file writing src = read_source_spaces(fname_all) src_new = setup_source_space("sample", None, spacing="all", subjects_dir=subjects_dir) _compare_source_spaces(src, src_new, mode="approx")
def test_add_source_space_distances_limited(): """Test adding distances to source space with a dist_limit.""" tempdir = _TempDir() src = read_source_spaces(fname) src_new = read_source_spaces(fname) del src_new[0]['dist'] del src_new[1]['dist'] n_do = 200 # limit this for speed src_new[0]['vertno'] = src_new[0]['vertno'][:n_do].copy() src_new[1]['vertno'] = src_new[1]['vertno'][:n_do].copy() out_name = op.join(tempdir, 'temp-src.fif') try: add_source_space_distances(src_new, dist_limit=0.007) except RuntimeError: # what we throw when scipy version is wrong raise SkipTest('dist_limit requires scipy > 0.13') write_source_spaces(out_name, src_new) src_new = read_source_spaces(out_name) for so, sn in zip(src, src_new): assert_array_equal(so['dist_limit'], np.array([-0.007], np.float32)) assert_array_equal(sn['dist_limit'], np.array([0.007], np.float32)) do = so['dist'] dn = sn['dist'] # clean out distances > 0.007 in C code do.data[do.data > 0.007] = 0 do.eliminate_zeros() # make sure we have some comparable distances assert np.sum(do.data < 0.007) > 400 # do comparison over the region computed d = (do - dn)[:sn['vertno'][n_do - 1]][:, :sn['vertno'][n_do - 1]] assert_allclose(np.zeros_like(d.data), d.data, rtol=0, atol=1e-6)
def test_discrete_source_space(): """Test setting up (and reading/writing) discrete source spaces """ src = read_source_spaces(fname) v = src[0]["vertno"] # let's make a discrete version with the C code, and with ours temp_name = op.join(tempdir, "temp-src.fif") try: # save temp_pos = op.join(tempdir, "temp-pos.txt") np.savetxt(temp_pos, np.c_[src[0]["rr"][v], src[0]["nn"][v]]) # let's try the spherical one (no bem or surf supplied) run_subprocess(["mne_volume_source_space", "--meters", "--pos", temp_pos, "--src", temp_name]) src_c = read_source_spaces(temp_name) pos_dict = dict(rr=src[0]["rr"][v], nn=src[0]["nn"][v]) src_new = setup_volume_source_space("sample", None, pos=pos_dict, subjects_dir=subjects_dir) _compare_source_spaces(src_c, src_new, mode="approx") assert_allclose(src[0]["rr"][v], src_new[0]["rr"], rtol=1e-3, atol=1e-6) assert_allclose(src[0]["nn"][v], src_new[0]["nn"], rtol=1e-3, atol=1e-6) # now do writing write_source_spaces(temp_name, src_c) src_c2 = read_source_spaces(temp_name) _compare_source_spaces(src_c, src_c2) # now do MRI assert_raises(ValueError, setup_volume_source_space, "sample", pos=pos_dict, mri=fname_mri) finally: if op.isfile(temp_name): os.remove(temp_name)
def test_stc_to_label(): """Test stc_to_label """ src = read_source_spaces(src_fname) src_bad = read_source_spaces(src_bad_fname) stc = read_source_estimate(stc_fname, 'sample') os.environ['SUBJECTS_DIR'] = op.join(data_path, 'subjects') labels1 = stc_to_label(stc, src='sample', smooth=3) with warnings.catch_warnings(record=True) as w: # connectedness warning warnings.simplefilter('always') labels2 = stc_to_label(stc, src=src, smooth=3) assert_true(len(w) == 1) assert_true(len(labels1) == len(labels2)) for l1, l2 in zip(labels1, labels2): assert_labels_equal(l1, l2, decimal=4) with warnings.catch_warnings(record=True) as w: # connectedness warning warnings.simplefilter('always') labels_lh, labels_rh = stc_to_label(stc, src=src, smooth=3, connected=True) assert_true(len(w) == 1) assert_raises(ValueError, stc_to_label, stc, 'sample', smooth=3, connected=True) assert_raises(RuntimeError, stc_to_label, stc, src=src_bad, connected=True) assert_true(len(labels_lh) == 1) assert_true(len(labels_rh) == 1)
def test_add_patch_info(monkeypatch): """Test adding patch info to source space.""" # let's setup a small source space src = read_source_spaces(fname_small) src_new = read_source_spaces(fname_small) for s in src_new: s['nearest'] = None s['nearest_dist'] = None s['pinfo'] = None # test that no patch info is added for small dist_limit add_source_space_distances(src_new, dist_limit=0.00001) assert all(s['nearest'] is None for s in src_new) assert all(s['nearest_dist'] is None for s in src_new) assert all(s['pinfo'] is None for s in src_new) # now let's use one that works (and test our warning-throwing) monkeypatch.setattr(mne.source_space, '_DIST_WARN_LIMIT', 1) with pytest.warns(RuntimeWarning, match='Computing distances for 258'): add_source_space_distances(src_new) for s1, s2 in zip(src, src_new): assert_array_equal(s1['nearest'], s2['nearest']) assert_allclose(s1['nearest_dist'], s2['nearest_dist'], atol=1e-7) assert_equal(len(s1['pinfo']), len(s2['pinfo'])) for p1, p2 in zip(s1['pinfo'], s2['pinfo']): assert_array_equal(p1, p2)
def test_add_patch_info(): """Test adding patch info to source space.""" # let's setup a small source space src = read_source_spaces(fname_small) src_new = read_source_spaces(fname_small) for s in src_new: s['nearest'] = None s['nearest_dist'] = None s['pinfo'] = None # test that no patch info is added for small dist_limit try: add_source_space_distances(src_new, dist_limit=0.00001) except RuntimeError: # what we throw when scipy version is wrong pass else: assert all(s['nearest'] is None for s in src_new) assert all(s['nearest_dist'] is None for s in src_new) assert all(s['pinfo'] is None for s in src_new) # now let's use one that works add_source_space_distances(src_new) for s1, s2 in zip(src, src_new): assert_array_equal(s1['nearest'], s2['nearest']) assert_allclose(s1['nearest_dist'], s2['nearest_dist'], atol=1e-7) assert_equal(len(s1['pinfo']), len(s2['pinfo'])) for p1, p2 in zip(s1['pinfo'], s2['pinfo']): assert_array_equal(p1, p2)
def test_morph_source_spaces(): """Test morphing of source spaces.""" src = read_source_spaces(fname_fs) src_morph = read_source_spaces(fname_morph) src_morph_py = morph_source_spaces(src, 'sample', subjects_dir=subjects_dir) _compare_source_spaces(src_morph, src_morph_py, mode='approx')
def test_volume_source_space(): """Test setting up volume source spaces """ fname_vol = op.join(data_path, "subjects", "sample", "bem", "volume-7mm-src.fif") src = read_source_spaces(fname_vol) temp_name = op.join(tempdir, "temp-src.fif") try: # The one in the sample dataset (uses bem as bounds) src_new = setup_volume_source_space( "sample", temp_name, pos=7.0, bem=fname_bem, mri=fname_mri, subjects_dir=subjects_dir ) _compare_source_spaces(src, src_new, mode="approx") src_new = read_source_spaces(temp_name) _compare_source_spaces(src, src_new, mode="approx") # let's try the spherical one (no bem or surf supplied) run_subprocess(["mne_volume_source_space", "--grid", "15.0", "--src", temp_name, "--mri", fname_mri]) src = read_source_spaces(temp_name) src_new = setup_volume_source_space("sample", temp_name, pos=15.0, mri=fname_mri, subjects_dir=subjects_dir) _compare_source_spaces(src, src_new, mode="approx") # now without MRI argument, it should give an error when we try # to read it run_subprocess(["mne_volume_source_space", "--grid", "15.0", "--src", temp_name]) assert_raises(ValueError, read_source_spaces, temp_name) finally: if op.isfile(temp_name): os.remove(temp_name)
def test_write_source_space(): """Test writing and reading of source spaces """ src0 = read_source_spaces(fname, add_geom=False) write_source_spaces(op.join(tempdir, 'tmp.fif'), src0) src1 = read_source_spaces(op.join(tempdir, 'tmp.fif'), add_geom=False) _compare_source_spaces(src0, src1)
def test_discrete_source_space(): """Test setting up (and reading/writing) discrete source spaces """ src = read_source_spaces(fname) v = src[0]['vertno'] # let's make a discrete version with the C code, and with ours temp_name = op.join(tempdir, 'temp-src.fif') try: # save temp_pos = op.join(tempdir, 'temp-pos.txt') np.savetxt(temp_pos, np.c_[src[0]['rr'][v], src[0]['nn'][v]]) # let's try the spherical one (no bem or surf supplied) run_subprocess(['mne_volume_source_space', '--meters', '--pos', temp_pos, '--src', temp_name]) src_c = read_source_spaces(temp_name) src_new = setup_volume_source_space('sample', None, pos=dict(rr=src[0]['rr'][v], nn=src[0]['nn'][v]), subjects_dir=subjects_dir) _compare_source_spaces(src_c, src_new, mode='approx') assert_allclose(src[0]['rr'][v], src_new[0]['rr'], rtol=1e-3, atol=1e-6) assert_allclose(src[0]['nn'][v], src_new[0]['nn'], rtol=1e-3, atol=1e-6) # now do writing write_source_spaces(temp_name, src_c) src_c2 = read_source_spaces(temp_name) _compare_source_spaces(src_c, src_c2) finally: if op.isfile(temp_name): os.remove(temp_name)
def create_src_space(sbj_dir, sbj_id, spacing, is_blind): import os.path as op import mne bem_dir = op.join(sbj_dir, sbj_id, 'bem') # check if source space exists, if not it creates using mne-python fun # we have to create the cortical surface source space even when aseg is # True if is_blind: # if is_blind we have to precomputed the source space sincw we had # to remove some labels src_fname = op.join(bem_dir, '%s-blind-%s-src.fif' % (sbj_id, spacing)) if not op.isfile(src_fname): raise '\n *** you have to compute the source space blind!!! ***\n' else: print '\n*** source space file %s exists!!!\n' % src_fname src = mne.read_source_spaces(src_fname) else: src_fname = op.join(bem_dir, '%s-%s-src.fif' % (sbj_id, spacing)) if not op.isfile(src_fname): src = mne.setup_source_space(sbj_id, subjects_dir=sbj_dir, fname=True, spacing=spacing.replace('-', ''), add_dist=False, overwrite=True, n_jobs=2) print '\n*** source space file %s written ***\n' % src_fname else: print '\n*** source space file %s exists!!!\n' % src_fname src = mne.read_source_spaces(src_fname) return src
def test_volume_source_space(): """Test setting up volume source spaces.""" tempdir = _TempDir() src = read_source_spaces(fname_vol) temp_name = op.join(tempdir, 'temp-src.fif') surf = read_bem_surfaces(fname_bem, s_id=FIFF.FIFFV_BEM_SURF_ID_BRAIN) surf['rr'] *= 1e3 # convert to mm # The one in the testing dataset (uses bem as bounds) for bem, surf in zip((fname_bem, None), (None, surf)): src_new = setup_volume_source_space( 'sample', pos=7.0, bem=bem, surface=surf, mri='T1.mgz', subjects_dir=subjects_dir) write_source_spaces(temp_name, src_new, overwrite=True) src[0]['subject_his_id'] = 'sample' # XXX: to make comparison pass _compare_source_spaces(src, src_new, mode='approx') del src_new src_new = read_source_spaces(temp_name) _compare_source_spaces(src, src_new, mode='approx') pytest.raises(IOError, setup_volume_source_space, 'sample', pos=7.0, bem=None, surface='foo', # bad surf mri=fname_mri, subjects_dir=subjects_dir) assert repr(src) == repr(src_new) assert src.kind == 'volume' # Spheres sphere = make_sphere_model(r0=(0., 0., 0.), head_radius=0.1, relative_radii=(0.9, 1.0), sigmas=(0.33, 1.0)) src = setup_volume_source_space(pos=10) src_new = setup_volume_source_space(pos=10, sphere=sphere) _compare_source_spaces(src, src_new, mode='exact') pytest.raises(ValueError, setup_volume_source_space, sphere='foo') # Need a radius sphere = make_sphere_model(head_radius=None) pytest.raises(ValueError, setup_volume_source_space, sphere=sphere)
def test_morphed_source_space_return(): """Test returning a morphed source space to the original subject""" # let's create some random data on fsaverage data = rng.randn(20484, 1) tmin, tstep = 0, 1. src_fs = read_source_spaces(fname_fs) stc_fs = SourceEstimate(data, [s['vertno'] for s in src_fs], tmin, tstep, 'fsaverage') # Create our morph source space src_morph = morph_source_spaces(src_fs, 'sample', subjects_dir=subjects_dir) # Morph the data over using standard methods stc_morph = stc_fs.morph('sample', [s['vertno'] for s in src_morph], smooth=1, subjects_dir=subjects_dir) # We can now pretend like this was real data we got e.g. from an inverse. # To be complete, let's remove some vertices keeps = [np.sort(rng.permutation(np.arange(len(v)))[:len(v) - 10]) for v in stc_morph.vertices] stc_morph = SourceEstimate( np.concatenate([stc_morph.lh_data[keeps[0]], stc_morph.rh_data[keeps[1]]]), [v[k] for v, k in zip(stc_morph.vertices, keeps)], tmin, tstep, 'sample') # Return it to the original subject stc_morph_return = stc_morph.to_original_src( src_fs, subjects_dir=subjects_dir) # Compare to the original data stc_morph_morph = stc_morph.morph('fsaverage', stc_morph_return.vertices, smooth=1, subjects_dir=subjects_dir) assert_equal(stc_morph_return.subject, stc_morph_morph.subject) for ii in range(2): assert_array_equal(stc_morph_return.vertices[ii], stc_morph_morph.vertices[ii]) # These will not match perfectly because morphing pushes data around corr = np.corrcoef(stc_morph_return.data[:, 0], stc_morph_morph.data[:, 0])[0, 1] assert_true(corr > 0.99, corr) # Degenerate cases stc_morph.subject = None # no .subject provided assert_raises(ValueError, stc_morph.to_original_src, src_fs, subject_orig='fsaverage', subjects_dir=subjects_dir) stc_morph.subject = 'sample' del src_fs[0]['subject_his_id'] # no name in src_fsaverage assert_raises(ValueError, stc_morph.to_original_src, src_fs, subjects_dir=subjects_dir) src_fs[0]['subject_his_id'] = 'fsaverage' # name mismatch assert_raises(ValueError, stc_morph.to_original_src, src_fs, subject_orig='foo', subjects_dir=subjects_dir) src_fs[0]['subject_his_id'] = 'sample' src = read_source_spaces(fname) # wrong source space assert_raises(RuntimeError, stc_morph.to_original_src, src, subjects_dir=subjects_dir)
def test_stc_to_label(): """Test stc_to_label """ with warnings.catch_warnings(record=True) as w: warnings.simplefilter('always') src = read_source_spaces(fwd_fname) src_bad = read_source_spaces(src_bad_fname) stc = read_source_estimate(stc_fname, 'sample') os.environ['SUBJECTS_DIR'] = op.join(data_path, 'subjects') labels1 = _stc_to_label(stc, src='sample', smooth=3) labels2 = _stc_to_label(stc, src=src, smooth=3) assert_equal(len(labels1), len(labels2)) for l1, l2 in zip(labels1, labels2): assert_labels_equal(l1, l2, decimal=4) with warnings.catch_warnings(record=True) as w: # connectedness warning warnings.simplefilter('always') labels_lh, labels_rh = stc_to_label(stc, src=src, smooth=True, connected=True) assert_true(len(w) > 0) assert_raises(ValueError, stc_to_label, stc, 'sample', smooth=True, connected=True) assert_raises(RuntimeError, stc_to_label, stc, smooth=True, src=src_bad, connected=True) assert_equal(len(labels_lh), 1) assert_equal(len(labels_rh), 1) # test getting tris tris = labels_lh[0].get_tris(src[0]['use_tris'], vertices=stc.vertices[0]) assert_raises(ValueError, spatial_tris_connectivity, tris, remap_vertices=False) connectivity = spatial_tris_connectivity(tris, remap_vertices=True) assert_true(connectivity.shape[0] == len(stc.vertices[0])) # "src" as a subject name assert_raises(TypeError, stc_to_label, stc, src=1, smooth=False, connected=False, subjects_dir=subjects_dir) assert_raises(ValueError, stc_to_label, stc, src=SourceSpaces([src[0]]), smooth=False, connected=False, subjects_dir=subjects_dir) assert_raises(ValueError, stc_to_label, stc, src='sample', smooth=False, connected=True, subjects_dir=subjects_dir) assert_raises(ValueError, stc_to_label, stc, src='sample', smooth=True, connected=False, subjects_dir=subjects_dir) labels_lh, labels_rh = stc_to_label(stc, src='sample', smooth=False, connected=False, subjects_dir=subjects_dir) assert_true(len(labels_lh) > 1) assert_true(len(labels_rh) > 1) # with smooth='patch' with warnings.catch_warnings(record=True) as w: # connectedness warning warnings.simplefilter('always') labels_patch = stc_to_label(stc, src=src, smooth=True) assert_equal(len(w), 1) assert_equal(len(labels_patch), len(labels1)) for l1, l2 in zip(labels1, labels2): assert_labels_equal(l1, l2, decimal=4)
def test_setup_source_space(): """Test setting up ico, oct, and all source spaces """ tempdir = _TempDir() fname_ico = op.join(data_path, 'subjects', 'fsaverage', 'bem', 'fsaverage-ico-5-src.fif') # first lets test some input params assert_raises(ValueError, setup_source_space, 'sample', spacing='oct', add_dist=False) assert_raises(ValueError, setup_source_space, 'sample', spacing='octo', add_dist=False) assert_raises(ValueError, setup_source_space, 'sample', spacing='oct6e', add_dist=False) assert_raises(ValueError, setup_source_space, 'sample', spacing='7emm', add_dist=False) assert_raises(ValueError, setup_source_space, 'sample', spacing='alls', add_dist=False) assert_raises(IOError, setup_source_space, 'sample', spacing='oct6', subjects_dir=subjects_dir, add_dist=False) # ico 5 (fsaverage) - write to temp file src = read_source_spaces(fname_ico) temp_name = op.join(tempdir, 'temp-src.fif') with warnings.catch_warnings(record=True): # sklearn equiv neighbors warnings.simplefilter('always') src_new = setup_source_space('fsaverage', temp_name, spacing='ico5', subjects_dir=subjects_dir, add_dist=False, overwrite=True) _compare_source_spaces(src, src_new, mode='approx') assert_equal(repr(src), repr(src_new)) assert_equal(repr(src).count('surface ('), 2) assert_array_equal(src[0]['vertno'], np.arange(10242)) assert_array_equal(src[1]['vertno'], np.arange(10242)) # oct-6 (sample) - auto filename + IO src = read_source_spaces(fname) temp_name = op.join(tempdir, 'temp-src.fif') with warnings.catch_warnings(record=True): # sklearn equiv neighbors warnings.simplefilter('always') src_new = setup_source_space('sample', temp_name, spacing='oct6', subjects_dir=subjects_dir, overwrite=True, add_dist=False) _compare_source_spaces(src, src_new, mode='approx', nearest=False) src_new = read_source_spaces(temp_name) _compare_source_spaces(src, src_new, mode='approx', nearest=False) # all source points - no file writing src_new = setup_source_space('sample', None, spacing='all', subjects_dir=subjects_dir, add_dist=False) assert_true(src_new[0]['nuse'] == len(src_new[0]['rr'])) assert_true(src_new[1]['nuse'] == len(src_new[1]['rr'])) # dense source space to hit surf['inuse'] lines of _create_surf_spacing assert_raises(RuntimeError, setup_source_space, 'sample', None, spacing='ico6', subjects_dir=subjects_dir, add_dist=False)
def test_stc_to_label(): """Test stc_to_label.""" src = read_source_spaces(fwd_fname) src_bad = read_source_spaces(src_bad_fname) stc = read_source_estimate(stc_fname, 'sample') os.environ['SUBJECTS_DIR'] = op.join(data_path, 'subjects') labels1 = _stc_to_label(stc, src='sample', smooth=3) labels2 = _stc_to_label(stc, src=src, smooth=3) assert_equal(len(labels1), len(labels2)) for l1, l2 in zip(labels1, labels2): assert_labels_equal(l1, l2, decimal=4) with pytest.warns(RuntimeWarning, match='have holes'): labels_lh, labels_rh = stc_to_label(stc, src=src, smooth=True, connected=True) pytest.raises(ValueError, stc_to_label, stc, 'sample', smooth=True, connected=True) pytest.raises(RuntimeError, stc_to_label, stc, smooth=True, src=src_bad, connected=True) assert_equal(len(labels_lh), 1) assert_equal(len(labels_rh), 1) # test getting tris tris = labels_lh[0].get_tris(src[0]['use_tris'], vertices=stc.vertices[0]) pytest.raises(ValueError, spatial_tris_connectivity, tris, remap_vertices=False) connectivity = spatial_tris_connectivity(tris, remap_vertices=True) assert (connectivity.shape[0] == len(stc.vertices[0])) # "src" as a subject name pytest.raises(TypeError, stc_to_label, stc, src=1, smooth=False, connected=False, subjects_dir=subjects_dir) pytest.raises(ValueError, stc_to_label, stc, src=SourceSpaces([src[0]]), smooth=False, connected=False, subjects_dir=subjects_dir) pytest.raises(ValueError, stc_to_label, stc, src='sample', smooth=False, connected=True, subjects_dir=subjects_dir) pytest.raises(ValueError, stc_to_label, stc, src='sample', smooth=True, connected=False, subjects_dir=subjects_dir) labels_lh, labels_rh = stc_to_label(stc, src='sample', smooth=False, connected=False, subjects_dir=subjects_dir) assert (len(labels_lh) > 1) assert (len(labels_rh) > 1) # with smooth='patch' with pytest.warns(RuntimeWarning, match='have holes'): labels_patch = stc_to_label(stc, src=src, smooth=True) assert len(labels_patch) == len(labels1) for l1, l2 in zip(labels1, labels2): assert_labels_equal(l1, l2, decimal=4)
def test_volume_source_space(): """Test setting up volume source spaces """ tempdir = _TempDir() src = read_source_spaces(fname_vol) temp_name = op.join(tempdir, 'temp-src.fif') # The one in the sample dataset (uses bem as bounds) src_new = setup_volume_source_space('sample', temp_name, pos=7.0, bem=fname_bem, mri=fname_mri, subjects_dir=subjects_dir) _compare_source_spaces(src, src_new, mode='approx') del src_new src_new = read_source_spaces(temp_name) _compare_source_spaces(src, src_new, mode='approx')
def test_write_source_space(): """Test reading and writing of source spaces.""" tempdir = _TempDir() src0 = read_source_spaces(fname, patch_stats=False) write_source_spaces(op.join(tempdir, 'tmp-src.fif'), src0) src1 = read_source_spaces(op.join(tempdir, 'tmp-src.fif'), patch_stats=False) _compare_source_spaces(src0, src1) # test warnings on bad filenames src_badname = op.join(tempdir, 'test-bad-name.fif.gz') with pytest.warns(RuntimeWarning, match='-src.fif'): write_source_spaces(src_badname, src0) with pytest.warns(RuntimeWarning, match='-src.fif'): read_source_spaces(src_badname)
def test_setup_source_space(): """Test setting up ico, oct, and all source spaces """ fname_all = op.join(data_path, 'subjects', 'sample', 'bem', 'sample-all-src.fif') fname_ico = op.join(data_path, 'subjects', 'fsaverage', 'bem', 'fsaverage-ico-5-src.fif') # first lets test some input params assert_raises(ValueError, setup_source_space, 'sample', spacing='oct', add_dist=False) assert_raises(ValueError, setup_source_space, 'sample', spacing='octo', add_dist=False) assert_raises(ValueError, setup_source_space, 'sample', spacing='oct6e', add_dist=False) assert_raises(ValueError, setup_source_space, 'sample', spacing='7emm', add_dist=False) assert_raises(ValueError, setup_source_space, 'sample', spacing='alls', add_dist=False) assert_raises(IOError, setup_source_space, 'sample', spacing='oct6', subjects_dir=subjects_dir, add_dist=False) # ico 5 (fsaverage) - write to temp file src = read_source_spaces(fname_ico) temp_name = op.join(tempdir, 'temp-src.fif') with warnings.catch_warnings(record=True): # sklearn equiv neighbors warnings.simplefilter('always') src_new = setup_source_space('fsaverage', temp_name, spacing='ico5', subjects_dir=subjects_dir, add_dist=False, overwrite=True) _compare_source_spaces(src, src_new, mode='approx') # oct-6 (sample) - auto filename + IO src = read_source_spaces(fname) temp_name = op.join(tempdir, 'temp-src.fif') with warnings.catch_warnings(record=True): # sklearn equiv neighbors warnings.simplefilter('always') src_new = setup_source_space('sample', temp_name, spacing='oct6', subjects_dir=subjects_dir, overwrite=True, add_dist=False) _compare_source_spaces(src, src_new, mode='approx') src_new = read_source_spaces(temp_name) _compare_source_spaces(src, src_new, mode='approx') # all source points - no file writing src = read_source_spaces(fname_all) src_new = setup_source_space('sample', None, spacing='all', subjects_dir=subjects_dir, add_dist=False) _compare_source_spaces(src, src_new, mode='approx')
def test_source_space_from_label(): """Test generating a source space from volume label.""" tempdir = _TempDir() aseg_fname = op.join(subjects_dir, 'sample', 'mri', 'aseg.mgz') label_names = get_volume_labels_from_aseg(aseg_fname) volume_label = label_names[int(np.random.rand() * len(label_names))] # Test pos as dict pos = dict() pytest.raises(ValueError, setup_volume_source_space, 'sample', pos=pos, volume_label=volume_label, mri=aseg_fname) # Test no mri provided pytest.raises(RuntimeError, setup_volume_source_space, 'sample', mri=None, volume_label=volume_label) # Test invalid volume label pytest.raises(ValueError, setup_volume_source_space, 'sample', volume_label='Hello World!', mri=aseg_fname) src = setup_volume_source_space('sample', subjects_dir=subjects_dir, volume_label=volume_label, mri=aseg_fname, add_interpolator=False) assert_equal(volume_label, src[0]['seg_name']) # test reading and writing out_name = op.join(tempdir, 'temp-src.fif') write_source_spaces(out_name, src) src_from_file = read_source_spaces(out_name) _compare_source_spaces(src, src_from_file, mode='approx')
def test_spatial_inter_hemi_connectivity(): """Test spatial connectivity between hemispheres""" # trivial cases conn = spatial_inter_hemi_connectivity(fname_src_3, 5e-6) assert_equal(conn.data.size, 0) conn = spatial_inter_hemi_connectivity(fname_src_3, 5e6) assert_equal(conn.data.size, np.prod(conn.shape) // 2) # actually interesting case (1cm), should be between 2 and 10% of verts src = read_source_spaces(fname_src_3) conn = spatial_inter_hemi_connectivity(src, 10e-3) conn = conn.tocsr() n_src = conn.shape[0] assert_true(n_src * 0.02 < conn.data.size < n_src * 0.10) assert_equal(conn[:src[0]['nuse'], :src[0]['nuse']].data.size, 0) assert_equal(conn[-src[1]['nuse']:, -src[1]['nuse']:].data.size, 0) c = (conn.T + conn) / 2. - conn c.eliminate_zeros() assert_equal(c.data.size, 0) # check locations upper_right = conn[:src[0]['nuse'], src[0]['nuse']:].toarray() assert_equal(upper_right.sum(), conn.sum() // 2) good_labels = ['S_pericallosal', 'Unknown', 'G_and_S_cingul-Mid-Post', 'G_cuneus'] for hi, hemi in enumerate(('lh', 'rh')): has_neighbors = src[hi]['vertno'][np.where(np.any(upper_right, axis=1 - hi))[0]] labels = read_labels_from_annot('sample', 'aparc.a2009s', hemi, subjects_dir=subjects_dir) use_labels = [l.name[:-3] for l in labels if np.in1d(l.vertices, has_neighbors).any()] assert_true(set(use_labels) - set(good_labels) == set())
def test_volume_stc(): """Test volume STCs """ N = 100 data = np.arange(N)[:, np.newaxis] datas = [data, data, np.arange(2)[:, np.newaxis]] vertno = np.arange(N) vertnos = [vertno, vertno[:, np.newaxis], np.arange(2)[:, np.newaxis]] vertno_reads = [vertno, vertno, np.arange(2)] for data, vertno, vertno_read in zip(datas, vertnos, vertno_reads): stc = VolSourceEstimate(data, vertno, 0, 1) fname_temp = op.join(tempdir, 'temp-vl.stc') stc_new = stc for _ in xrange(2): stc_new.save(fname_temp) stc_new = read_source_estimate(fname_temp) assert_true(isinstance(stc_new, VolSourceEstimate)) assert_array_equal(vertno_read, stc_new.vertno) assert_array_almost_equal(stc.data, stc_new.data) # now let's actually read a MNE-C processed file stc = read_source_estimate(fname_vol, 'sample') assert_true(isinstance(stc, VolSourceEstimate)) assert_true('sample' in repr(stc)) stc_new = stc assert_raises(ValueError, stc.save, fname_vol, ftype='whatever') for _ in xrange(2): fname_temp = op.join(tempdir, 'temp-vol.w') stc_new.save(fname_temp, ftype='w') stc_new = read_source_estimate(fname_temp) assert_true(isinstance(stc_new, VolSourceEstimate)) assert_array_equal(stc.vertno, stc_new.vertno) assert_array_almost_equal(stc.data, stc_new.data) # save the stc as a nifti file and export try: import nibabel as nib src = read_source_spaces(fname_vsrc) vol_fname = op.join(tempdir, 'stc.nii.gz') stc.save_as_volume(vol_fname, src, dest='surf', mri_resolution=False) img = nib.load(vol_fname) assert_true(img.shape == src[0]['shape'] + (len(stc.times),)) t1_img = nib.load(fname_t1) stc.save_as_volume(op.join(tempdir, 'stc.nii.gz'), src, dest='mri', mri_resolution=True) img = nib.load(vol_fname) assert_true(img.shape == t1_img.shape + (len(stc.times),)) assert_array_almost_equal(img.get_affine(), t1_img.get_affine(), decimal=5) # export without saving img = stc.as_volume(src, dest='mri', mri_resolution=True) assert_true(img.shape == t1_img.shape + (len(stc.times),)) assert_array_almost_equal(img.get_affine(), t1_img.get_affine(), decimal=5) except ImportError: print 'Save as nifti test skipped, needs NiBabel'
def test_write_source_space(): """Test writing and reading of source spaces """ src0 = read_source_spaces(fname, add_geom=False) write_source_spaces(op.join(tempdir, 'tmp-src.fif'), src0) src1 = read_source_spaces(op.join(tempdir, 'tmp-src.fif'), add_geom=False) _compare_source_spaces(src0, src1) # test warnings on bad filenames with warnings.catch_warnings(record=True) as w: warnings.simplefilter('always') src_badname = op.join(tempdir, 'test-bad-name.fif.gz') write_source_spaces(src_badname, src0) read_source_spaces(src_badname) print([ww.message for ww in w]) assert_equal(len(w), 2)
def test_other_volume_source_spaces(): """Test setting up other volume source spaces""" # these are split off because they require the MNE tools, and # Travis doesn't seem to like them # let's try the spherical one (no bem or surf supplied) tempdir = _TempDir() temp_name = op.join(tempdir, 'temp-src.fif') run_subprocess(['mne_volume_source_space', '--grid', '7.0', '--src', temp_name, '--mri', fname_mri]) src = read_source_spaces(temp_name) src_new = setup_volume_source_space('sample', temp_name, pos=7.0, mri=fname_mri, subjects_dir=subjects_dir) _compare_source_spaces(src, src_new, mode='approx') del src del src_new assert_raises(ValueError, setup_volume_source_space, 'sample', temp_name, pos=7.0, sphere=[1., 1.], mri=fname_mri, # bad sphere subjects_dir=subjects_dir) # now without MRI argument, it should give an error when we try # to read it run_subprocess(['mne_volume_source_space', '--grid', '7.0', '--src', temp_name]) assert_raises(ValueError, read_source_spaces, temp_name)
def apply_rois(fn_stc, tmin, tmax, thr, min_subject='fsaverage'): #fn_avg = subjects_dir+'/fsaverage/%s_ROIs/%s-lh.stc' %(method,evt_st) stc_avg = mne.read_source_estimate(fn_stc) stc_avg = stc_avg.crop(tmin, tmax) src_pow = np.sum(stc_avg.data ** 2, axis=1) stc_avg.data[src_pow < np.percentile(src_pow, thr)] = 0. fn_src = subjects_dir+'/%s/bem/fsaverage-ico-5-src.fif' %min_subject src_inv = mne.read_source_spaces(fn_src) func_labels_lh, func_labels_rh = mne.stc_to_label( stc_avg, src=src_inv, smooth=True, subjects_dir=subjects_dir, connected=True) # Left hemisphere definition i = 0 labels_path = fn_stc[:fn_stc.rfind('-')] + '/ini' reset_directory(labels_path) while i < len(func_labels_lh): func_label = func_labels_lh[i] func_label.save(labels_path + '/ROI_%d' %(i)) i = i + 1 # right hemisphere definition j = 0 while j < len(func_labels_rh): func_label = func_labels_rh[j] func_label.save(labels_path + '/ROI_%d' %(j)) j = j + 1
def test_read_volume_from_src(): """Test reading volumes from a mixed source space.""" aseg_fname = op.join(subjects_dir, 'sample', 'mri', 'aseg.mgz') labels_vol = ['Left-Amygdala', 'Brain-Stem', 'Right-Amygdala'] src = read_source_spaces(fname) # Setup a volume source space vol_src = setup_volume_source_space('sample', mri=aseg_fname, pos=5.0, bem=fname_bem, volume_label=labels_vol, subjects_dir=subjects_dir) # Generate the mixed source space src += vol_src volume_src = get_volume_labels_from_src(src, 'sample', subjects_dir) volume_label = volume_src[0].name volume_label = 'Left-' + volume_label.replace('-lh', '') # Test assert_equal(volume_label, src[2]['seg_name']) assert_equal(src[2]['type'], 'vol')
def _mne_source_space(subject, src_tag, subjects_dir): """Load mne source space Parameters ---------- subject : str Subejct src_tag : str Spacing (e.g., 'ico-4'). """ src_file = os.path.join(subjects_dir, subject, 'bem', '%s-%s-src.fif' % (subject, src_tag)) src, spacing = src_tag.split('-') if os.path.exists(src_file): return mne.read_source_spaces(src_file, False) elif src == 'ico': ss = mne.setup_source_space(subject, spacing=src + spacing, subjects_dir=subjects_dir, add_dist=True) elif src == 'vol': mri_file = os.path.join(subjects_dir, subject, 'mri', 'orig.mgz') bem_file = os.path.join(subjects_dir, subject, 'bem', 'sample-5120-5120-5120-bem-sol.fif') ss = mne.setup_volume_source_space(subject, pos=float(spacing), mri=mri_file, bem=bem_file, mindist=0., exclude=0., subjects_dir=subjects_dir) else: raise ValueError("src_tag=%s" % repr(src_tag)) mne.write_source_spaces(src_file, ss) return ss
def test_label_in_src(): """Test label in src""" src = read_source_spaces(src_fname) label = read_label(v1_label_fname) # construct label from source space vertices vert_in_src = np.intersect1d(label.vertices, src[0]['vertno'], True) where = in1d(label.vertices, vert_in_src) pos_in_src = label.pos[where] values_in_src = label.values[where] label_src = Label(vert_in_src, pos_in_src, values_in_src, hemi='lh').fill(src) # check label vertices vertices_status = in1d(src[0]['nearest'], label.vertices) vertices_in = np.nonzero(vertices_status)[0] vertices_out = np.nonzero(np.logical_not(vertices_status))[0] assert_array_equal(label_src.vertices, vertices_in) assert_array_equal(in1d(vertices_out, label_src.vertices), False) # check values value_idx = digitize(src[0]['nearest'][vertices_in], vert_in_src, True) assert_array_equal(label_src.values, values_in_src[value_idx]) # test exception vertices = np.append([-1], vert_in_src) assert_raises(ValueError, Label(vertices, hemi='lh').fill, src)
def test_stc_to_label(): """Test stc_to_label.""" with warnings.catch_warnings(record=True) as w: warnings.simplefilter('always') src = read_source_spaces(fwd_fname) src_bad = read_source_spaces(src_bad_fname) stc = read_source_estimate(stc_fname, 'sample') os.environ['SUBJECTS_DIR'] = op.join(data_path, 'subjects') labels1 = _stc_to_label(stc, src='sample', smooth=3) labels2 = _stc_to_label(stc, src=src, smooth=3) assert_equal(len(labels1), len(labels2)) for l1, l2 in zip(labels1, labels2): assert_labels_equal(l1, l2, decimal=4) with warnings.catch_warnings(record=True) as w: # connectedness warning warnings.simplefilter('always') labels_lh, labels_rh = stc_to_label(stc, src=src, smooth=True, connected=True) assert_true(len(w) > 0) assert_raises(ValueError, stc_to_label, stc, 'sample', smooth=True, connected=True) assert_raises(RuntimeError, stc_to_label, stc, smooth=True, src=src_bad, connected=True) assert_equal(len(labels_lh), 1) assert_equal(len(labels_rh), 1) # test getting tris tris = labels_lh[0].get_tris(src[0]['use_tris'], vertices=stc.vertices[0]) assert_raises(ValueError, spatial_tris_connectivity, tris, remap_vertices=False) connectivity = spatial_tris_connectivity(tris, remap_vertices=True) assert_true(connectivity.shape[0] == len(stc.vertices[0])) # "src" as a subject name assert_raises(TypeError, stc_to_label, stc, src=1, smooth=False, connected=False, subjects_dir=subjects_dir) assert_raises(ValueError, stc_to_label, stc, src=SourceSpaces([src[0]]), smooth=False, connected=False, subjects_dir=subjects_dir) assert_raises(ValueError, stc_to_label, stc, src='sample', smooth=False, connected=True, subjects_dir=subjects_dir) assert_raises(ValueError, stc_to_label, stc, src='sample', smooth=True, connected=False, subjects_dir=subjects_dir) labels_lh, labels_rh = stc_to_label(stc, src='sample', smooth=False, connected=False, subjects_dir=subjects_dir) assert_true(len(labels_lh) > 1) assert_true(len(labels_rh) > 1) # with smooth='patch' with warnings.catch_warnings(record=True) as w: # connectedness warning warnings.simplefilter('always') labels_patch = stc_to_label(stc, src=src, smooth=True) assert_equal(len(w), 1) assert_equal(len(labels_patch), len(labels1)) for l1, l2 in zip(labels1, labels2): assert_labels_equal(l1, l2, decimal=4)
def test_morphed_source_space_return(): """Test returning a morphed source space to the original subject""" # let's create some random data on fsaverage data = rng.randn(20484, 1) tmin, tstep = 0, 1. src_fs = read_source_spaces(fname_fs) stc_fs = SourceEstimate(data, [s['vertno'] for s in src_fs], tmin, tstep, 'fsaverage') # Create our morph source space src_morph = morph_source_spaces(src_fs, 'sample', subjects_dir=subjects_dir) # Morph the data over using standard methods stc_morph = stc_fs.morph('sample', [s['vertno'] for s in src_morph], smooth=1, subjects_dir=subjects_dir) # We can now pretend like this was real data we got e.g. from an inverse. # To be complete, let's remove some vertices keeps = [ np.sort(rng.permutation(np.arange(len(v)))[:len(v) - 10]) for v in stc_morph.vertices ] stc_morph = SourceEstimate( np.concatenate([ stc_morph.lh_data[keeps[0]], stc_morph.rh_data[keeps[1]] ]), [v[k] for v, k in zip(stc_morph.vertices, keeps)], tmin, tstep, 'sample') # Return it to the original subject stc_morph_return = stc_morph.to_original_src(src_fs, subjects_dir=subjects_dir) # Compare to the original data stc_morph_morph = stc_morph.morph('fsaverage', stc_morph_return.vertices, smooth=1, subjects_dir=subjects_dir) assert_equal(stc_morph_return.subject, stc_morph_morph.subject) for ii in range(2): assert_array_equal(stc_morph_return.vertices[ii], stc_morph_morph.vertices[ii]) # These will not match perfectly because morphing pushes data around corr = np.corrcoef(stc_morph_return.data[:, 0], stc_morph_morph.data[:, 0])[0, 1] assert_true(corr > 0.99, corr) # Degenerate cases stc_morph.subject = None # no .subject provided assert_raises(ValueError, stc_morph.to_original_src, src_fs, subject_orig='fsaverage', subjects_dir=subjects_dir) stc_morph.subject = 'sample' del src_fs[0]['subject_his_id'] # no name in src_fsaverage assert_raises(ValueError, stc_morph.to_original_src, src_fs, subjects_dir=subjects_dir) src_fs[0]['subject_his_id'] = 'fsaverage' # name mismatch assert_raises(ValueError, stc_morph.to_original_src, src_fs, subject_orig='foo', subjects_dir=subjects_dir) src_fs[0]['subject_his_id'] = 'sample' src = read_source_spaces(fname) # wrong source space assert_raises(RuntimeError, stc_morph.to_original_src, src, subjects_dir=subjects_dir)
def _fwd_surf(_evoked_cov_sphere): """Compute a forward for a surface source space.""" evoked, cov, sphere = _evoked_cov_sphere src_surf = mne.read_source_spaces(fname_src) return mne.make_forward_solution( evoked.info, fname_trans, src_surf, sphere, mindist=5.0)
residual.plot(ylim=ylim, proj=True, time_unit='s') ############################################################################### # Generate stc from dipoles stc = make_stc_from_dipoles(dipoles, forward['src']) ############################################################################### # View in 2D and 3D ("glass" brain like 3D plot) solver = "MxNE" if n_mxne_iter == 1 else "irMxNE" plot_sparse_source_estimates(forward['src'], stc, bgcolor=(1, 1, 1), fig_name="%s (cond %s)" % (solver, condition), opacity=0.1) ############################################################################### # Morph onto fsaverage brain and view morph = mne.compute_source_morph(stc, subject_from='sample', subject_to='fsaverage', spacing=None, sparse=True, subjects_dir=subjects_dir) stc_fsaverage = morph.apply(stc) src_fsaverage_fname = subjects_dir + '/fsaverage/bem/fsaverage-ico-5-src.fif' src_fsaverage = mne.read_source_spaces(src_fsaverage_fname) plot_sparse_source_estimates(src_fsaverage, stc_fsaverage, bgcolor=(1, 1, 1), fig_name="Morphed %s (cond %s)" % (solver, condition), opacity=0.1) ############################################################################### # References # ---------- # .. footbibliography::
def test_stc_to_label(): """Test stc_to_label.""" src = read_source_spaces(fwd_fname) src_bad = read_source_spaces(src_bad_fname) stc = read_source_estimate(stc_fname, 'sample') os.environ['SUBJECTS_DIR'] = op.join(data_path, 'subjects') labels1 = _stc_to_label(stc, src='sample', smooth=3) labels2 = _stc_to_label(stc, src=src, smooth=3) assert_equal(len(labels1), len(labels2)) for l1, l2 in zip(labels1, labels2): assert_labels_equal(l1, l2, decimal=4) with pytest.warns(RuntimeWarning, match='have holes'): labels_lh, labels_rh = stc_to_label(stc, src=src, smooth=True, connected=True) pytest.raises(ValueError, stc_to_label, stc, 'sample', smooth=True, connected=True) pytest.raises(RuntimeError, stc_to_label, stc, smooth=True, src=src_bad, connected=True) assert_equal(len(labels_lh), 1) assert_equal(len(labels_rh), 1) # test getting tris tris = labels_lh[0].get_tris(src[0]['use_tris'], vertices=stc.vertices[0]) pytest.raises(ValueError, spatial_tris_adjacency, tris, remap_vertices=False) adjacency = spatial_tris_adjacency(tris, remap_vertices=True) assert (adjacency.shape[0] == len(stc.vertices[0])) # "src" as a subject name pytest.raises(TypeError, stc_to_label, stc, src=1, smooth=False, connected=False, subjects_dir=subjects_dir) pytest.raises(ValueError, stc_to_label, stc, src=SourceSpaces([src[0]]), smooth=False, connected=False, subjects_dir=subjects_dir) pytest.raises(ValueError, stc_to_label, stc, src='sample', smooth=False, connected=True, subjects_dir=subjects_dir) pytest.raises(ValueError, stc_to_label, stc, src='sample', smooth=True, connected=False, subjects_dir=subjects_dir) labels_lh, labels_rh = stc_to_label(stc, src='sample', smooth=False, connected=False, subjects_dir=subjects_dir) assert (len(labels_lh) > 1) assert (len(labels_rh) > 1) # with smooth='patch' with pytest.warns(RuntimeWarning, match='have holes'): labels_patch = stc_to_label(stc, src=src, smooth=True) assert len(labels_patch) == len(labels1) for l1, l2 in zip(labels1, labels2): assert_labels_equal(l1, l2, decimal=4)
ax.plot(x_line + x, gamma_power[i] + y, linewidth=0.5, color=color) # %% # We can project gamma power from the sensor data to the nearest locations on # the pial surface and visualize that: # # As shown in the plot, the epileptiform activity starts in the temporal lobe, # progressing posteriorly. The seizure becomes generalized eventually, after # this example short time section. This dataset is available using # :func:`mne.datasets.epilepsy_ecog.data_path` for you to examine. # sphinx_gallery_thumbnail_number = 3 xyz_pts = np.array([dig['r'] for dig in evoked.info['dig']]) src = mne.read_source_spaces(subjects_dir / 'fsaverage' / 'bem' / 'fsaverage-ico-5-src.fif') stc = mne.stc_near_sensors(gamma_power_t, trans='fsaverage', subject='fsaverage', subjects_dir=subjects_dir, src=src, surface='pial', mode='nearest', distance=0.02) vmin, vmid, vmax = np.percentile(gamma_power_t.data, [10, 25, 90]) clim = dict(kind='value', lims=[vmin, vmid, vmax]) brain = stc.plot(surface='pial', hemi='rh', colormap='inferno', colorbar=False, clim=clim,
def test_volume_source_morph_round_trip(tmpdir, subject_from, subject_to, lower, upper, dtype, morph_mat, monkeypatch): """Test volume source estimate morph round-trips well.""" import nibabel as nib from nibabel.processing import resample_from_to src = dict() if morph_mat: # ~1.5 minutes with pos=7. (4157 morphs!) for sample, so only test # morph_mat computation mode with a few labels label_names = sorted(get_volume_labels_from_aseg(fname_aseg))[1:2] if 'sample' in (subject_from, subject_to): src['sample'] = setup_volume_source_space( 'sample', subjects_dir=subjects_dir, volume_label=label_names, mri=fname_aseg) assert sum(s['nuse'] for s in src['sample']) == 12 if 'fsaverage' in (subject_from, subject_to): src['fsaverage'] = setup_volume_source_space( 'fsaverage', subjects_dir=subjects_dir, volume_label=label_names[:3], mri=fname_aseg_fs) assert sum(s['nuse'] for s in src['fsaverage']) == 16 else: assert not morph_mat if 'sample' in (subject_from, subject_to): src['sample'] = mne.read_source_spaces(fname_vol) src['sample'][0]['subject_his_id'] = 'sample' assert src['sample'][0]['nuse'] == 4157 if 'fsaverage' in (subject_from, subject_to): # Created to save space with: # # bem = op.join(op.dirname(mne.__file__), 'data', 'fsaverage', # 'fsaverage-inner_skull-bem.fif') # src_fsaverage = mne.setup_volume_source_space( # 'fsaverage', pos=7., bem=bem, mindist=0, # subjects_dir=subjects_dir, add_interpolator=False) # mne.write_source_spaces(fname_fs_vol, src_fsaverage, # overwrite=True) # # For speed we do it without the interpolator because it's huge. src['fsaverage'] = mne.read_source_spaces(fname_fs_vol) src['fsaverage'][0].update(vol_dims=np.array([23, 29, 25]), seg_name='brain') _add_interpolator(src['fsaverage']) assert src['fsaverage'][0]['nuse'] == 6379 src_to, src_from = src[subject_to], src[subject_from] del src # No SDR just for speed once everything works kwargs = dict(niter_sdr=(), niter_affine=(1, ), subjects_dir=subjects_dir, verbose=True) morph_from_to = compute_source_morph(src=src_from, src_to=src_to, subject_to=subject_to, **kwargs) morph_to_from = compute_source_morph(src=src_to, src_to=src_from, subject_to=subject_from, **kwargs) nuse = sum(s['nuse'] for s in src_from) assert nuse > 10 use = np.linspace(0, nuse - 1, 10).round().astype(int) data = np.eye(nuse)[:, use] if dtype is complex: data = data * 1j vertices = [s['vertno'] for s in src_from] stc_from = VolSourceEstimate(data, vertices, 0, 1) with catch_logging() as log: stc_from_rt = morph_to_from.apply( morph_from_to.apply(stc_from, verbose='debug')) log = log.getvalue() assert 'individual volume morph' in log maxs = np.argmax(stc_from_rt.data, axis=0) src_rr = np.concatenate([s['rr'][s['vertno']] for s in src_from]) dists = 1000 * np.linalg.norm(src_rr[use] - src_rr[maxs], axis=1) mu = np.mean(dists) # fsaverage=5.99; 7.97 without additional src_ras_t fix # fsaverage=7.97; 25.4 without src_ras_t fix assert lower <= mu < upper, f'round-trip distance {mu}' # check that pre_affine is close to identity when subject_to==subject_from if subject_to == subject_from: for morph in (morph_to_from, morph_from_to): assert_allclose(morph.pre_affine.affine, np.eye(4), atol=1e-2) # check that power is more or less preserved (labelizing messes with this) if morph_mat: if subject_to == 'fsaverage': limits = (14.0, 14.2) else: limits = (7, 7.5) else: limits = (1, 1.2) stc_from_unit = stc_from.copy().crop(0, 0) stc_from_unit._data.fill(1.) stc_from_unit_rt = morph_to_from.apply(morph_from_to.apply(stc_from_unit)) assert_power_preserved(stc_from_unit, stc_from_unit_rt, limits=limits) if morph_mat: fname = tmpdir.join('temp-morph.h5') morph_to_from.save(fname) morph_to_from = read_source_morph(fname) assert morph_to_from.vol_morph_mat is None morph_to_from.compute_vol_morph_mat(verbose=True) morph_to_from.save(fname, overwrite=True) morph_to_from = read_source_morph(fname) assert isinstance(morph_to_from.vol_morph_mat, csr_matrix), 'csr' # equivalence (plus automatic calling) assert morph_from_to.vol_morph_mat is None monkeypatch.setattr(mne.morph, '_VOL_MAT_CHECK_RATIO', 0.) with catch_logging() as log: with pytest.warns(RuntimeWarning, match=r'calling morph\.compute'): stc_from_rt_lin = morph_to_from.apply( morph_from_to.apply(stc_from, verbose='debug')) assert isinstance(morph_from_to.vol_morph_mat, csr_matrix), 'csr' log = log.getvalue() assert 'sparse volume morph matrix' in log assert_allclose(stc_from_rt.data, stc_from_rt_lin.data) del stc_from_rt_lin stc_from_unit_rt_lin = morph_to_from.apply( morph_from_to.apply(stc_from_unit)) assert_allclose(stc_from_unit_rt.data, stc_from_unit_rt_lin.data) del stc_from_unit_rt_lin del stc_from, stc_from_rt # before and after morph, check the proportion of vertices # that are inside and outside the brainmask.mgz brain = nib.load(op.join(subjects_dir, subject_from, 'mri', 'brain.mgz')) mask = _get_img_fdata(brain) > 0 if subject_from == subject_to == 'sample': for stc in [stc_from_unit, stc_from_unit_rt]: img = stc.as_volume(src_from, mri_resolution=True) img = nib.Nifti1Image( # abs to convert complex np.abs(_get_img_fdata(img)[:, :, :, 0]), img.affine) img = _get_img_fdata(resample_from_to(img, brain, order=1)) assert img.shape == mask.shape in_ = img[mask].astype(bool).mean() out = img[~mask].astype(bool).mean() if morph_mat: out_max = 0.001 in_min, in_max = 0.005, 0.007 else: out_max = 0.02 in_min, in_max = 0.97, 0.98 assert out < out_max, f'proportion out of volume {out}' assert in_min < in_ < in_max, f'proportion inside volume {in_}'
SOMA_all_stc_diff = stc_sum / len(sub_dict) # make data array for cluster permutation stats RELAX-TENSE stc vals X_diff = np.array(X_diff).squeeze() # calculate Pearson's r for each vertex to Behavioral variable of the subject X_Rval = np.empty(X_diff.shape[1]) X_R_Tval = np.empty(X_diff.shape[1]) for vert_idx in range(X_diff.shape[1]): X_Rval[vert_idx], p = stats.pearsonr(X_diff[:,vert_idx],Behav) # calculate an according t-value for each r X_R_Tval = (X_Rval * np.sqrt((len(sub_dict)-2))) / np.sqrt(1 - X_Rval**2) # setup for clustering -- t-thresholds n_subjects=len(sub_dict) p_threshold = 0.05 t_threshold = -stats.distributions.t.ppf(p_threshold / 2., n_subjects - 1) src = mne.read_source_spaces("{}fsaverage-src.fif".format(mri_dir)) connectivity = mne.spatial_src_connectivity(src) # find clusters in the T-vals clusters, cluster_stats = _find_clusters(X_R_Tval,threshold=t_threshold, connectivity=connectivity, tail=0) # plot uncorrected correlation t-values on fsaverage X_R_Tval = np.expand_dims(X_R_Tval, axis=1) SOMA_all_stc_diff.data = X_R_Tval SOMA_all_stc_diff.plot(subjects_dir=mri_dir,subject='fsaverage',surface='white',hemi='both',time_viewer=True,colormap='coolwarm',clim={'kind':'value','pos_lims':(0,1.5,3)}) # do the random sign flip permutation # setup n_perms = 500 cluster_H0 = np.zeros(n_perms) # here comes the loop
proc_dir = "../proc/" spacing = "ico4" conds = ["audio", "visselten", "visual"] wavs = ["4000Hz", "4000cheby", "7000Hz", "4000fftf"] band = "alpha_0" indep_var = "Angenehm" n_freqs = 1 n_srcs = 5124 n_subjs = len(subjs) ### permutation on betas # number of random samples perm_n = 100 # place holders for bootstrap samples cluster_H0 = np.zeros(perm_n) # setup connectivity fs_src = mne.read_source_spaces("{}{}_{}-src.fif".format( proc_dir, "fsaverage", spacing)) cnx = mne.spatial_src_connectivity(fs_src) del fs_src connectivity = _setup_connectivity(cnx, n_srcs, n_freqs) exclude = np.load("{}fsaverage_{}_exclude.npy".format(proc_dir, spacing)) include = np.ones(cnx.shape[0], dtype="bool") include[exclude] = 0 # threshold for clustering threshold = dict(start=0, step=0.2) #random_state = 42 random = np.random.RandomState() df_laut = pd.read_pickle("../behave/laut") df_ang = pd.read_pickle("../behave/ang")
# You can see that the fsaverage (purple) mesh is uniformly spaced, and the # mesh for subject "sample" (in cyan) has been deformed along the spherical # surface by # FreeSurfer. This deformation is designed to optimize the sulcal-gyral # alignment. # # Surface decimation # ------------------ # These surfaces have a lot of vertices, and in general we only need to use # a subset of these vertices for creating source spaces. A uniform sampling can # easily be achieved by subsampling in the spherical space. To do this, we # use a recursively subdivided icosahedron or octahedron. For example, let's # load a standard oct-6 source space, and at the same zoom level as before # visualize how it subsampled the dense mesh: src = mne.read_source_spaces( os.path.join(subjects_dir, 'sample', 'bem', 'sample-oct-6-src.fif')) print(src) # sphinx_gallery_thumbnail_number = 10 blue = '#4477AA' renderer = mne.viz.backends.renderer.create_3d_figure(size=(800, 800), scene=False, **renderer_kwargs) rr_sph, _ = mne.read_surface(fnames[0]) for tris, color in [(src[1]['tris'], cyan), (src[1]['use_tris'], blue)]: renderer.mesh(*rr_sph.T, triangles=tris, color=color, representation='wireframe') mne.viz.set_3d_view(figure=renderer.figure, distance=20, **view_kwargs) renderer.show()
# setting some parameters os.chdir(filedir + '/' + SubjList[0] + '/' + ExpID + '/Datafiles/EpochData') Epochs = mne.read_epochs('ProperEpochData-epo.fif', preload=True) epochs = Epochs.copy().pick_types(meg=True) conditions = list(epochs.event_id.keys()) conditions2 = [i for i in conditions if i != 'target'] times = epochs.times sfreq = epochs.info['sfreq'] del Epochs, epochs MRIsubject = 'fsaverage' subjects_dir = '' src = mne.read_source_spaces(subjects_dir + '/' + MRIsubject + '/bem/%s-%s-src.fif' % (MRIsubject, useFsaveModel)) nuseVerts = src[-1]['nuse'] Tmin = -0.1 Tmax = 0.3 timemask = np.where((Tmin <= times) & (times <= Tmax))[0] #--- load grand-average data ---# print('\n<< Load Grand-average Datasets (%s Exp, N=%d, method:%s) >>' % (ExpID, SubjN, usedMethod)) srcdir1 = 'SurfSrcEst_dSPM_forEvoked' if useFsaveModel == 'ico5': os.chdir(filedir + '/GrandAverage/Datafiles/forSurfaceSrcEstAnalysis/' + srcdir1 + '/%sexp_N%d' % (ExpID, SubjN)) else:
def test_scale_mri(tmp_path, few_surfaces, scale): """Test creating fsaverage and scaling it.""" # create fsaverage using the testing "fsaverage" instead of the FreeSurfer # one tempdir = str(tmp_path) fake_home = data_path create_default_subject(subjects_dir=tempdir, fs_home=fake_home, verbose=True) assert _is_mri_subject('fsaverage', tempdir), "Creating fsaverage failed" fid_path = op.join(tempdir, 'fsaverage', 'bem', 'fsaverage-fiducials.fif') os.remove(fid_path) create_default_subject(update=True, subjects_dir=tempdir, fs_home=fake_home) assert op.exists(fid_path), "Updating fsaverage" # copy MRI file from sample data (shouldn't matter that it's incorrect, # so here choose a small one) path_from = op.join(fake_home, 'subjects', 'sample', 'mri', 'T1.mgz') path_to = op.join(tempdir, 'fsaverage', 'mri', 'orig.mgz') copyfile(path_from, path_to) # remove redundant label files label_temp = op.join(tempdir, 'fsaverage', 'label', '*.label') label_paths = glob(label_temp) for label_path in label_paths[1:]: os.remove(label_path) # create source space print('Creating surface source space') path = op.join(tempdir, 'fsaverage', 'bem', 'fsaverage-%s-src.fif') src = mne.setup_source_space('fsaverage', 'ico0', subjects_dir=tempdir, add_dist=False) mri = op.join(tempdir, 'fsaverage', 'mri', 'orig.mgz') print('Creating volume source space') vsrc = mne.setup_volume_source_space('fsaverage', pos=50, mri=mri, subjects_dir=tempdir, add_interpolator=False) write_source_spaces(path % 'vol-50', vsrc) # scale fsaverage write_source_spaces(path % 'ico-0', src, overwrite=True) with _record_warnings(): # sometimes missing nibabel scale_mri('fsaverage', 'flachkopf', scale, True, subjects_dir=tempdir, verbose='debug') assert _is_mri_subject('flachkopf', tempdir), "Scaling failed" spath = op.join(tempdir, 'flachkopf', 'bem', 'flachkopf-%s-src.fif') assert op.exists(spath % 'ico-0'), "Source space ico-0 was not scaled" assert os.path.isfile( os.path.join(tempdir, 'flachkopf', 'surf', 'lh.sphere.reg')) vsrc_s = mne.read_source_spaces(spath % 'vol-50') for vox in ([0, 0, 0], [1, 0, 0], [0, 1, 0], [0, 0, 1], [1, 2, 3]): idx = np.ravel_multi_index(vox, vsrc[0]['shape'], order='F') err_msg = f'idx={idx} @ {vox}, scale={scale}' assert_allclose(apply_trans(vsrc[0]['src_mri_t'], vox), vsrc[0]['rr'][idx], err_msg=err_msg) assert_allclose(apply_trans(vsrc_s[0]['src_mri_t'], vox), vsrc_s[0]['rr'][idx], err_msg=err_msg) scale_labels('flachkopf', subjects_dir=tempdir) # add distances to source space after hacking the properties to make # it run *much* faster src_dist = src.copy() for s in src_dist: s.update(rr=s['rr'][s['vertno']], nn=s['nn'][s['vertno']], tris=s['use_tris']) s.update(np=len(s['rr']), ntri=len(s['tris']), vertno=np.arange(len(s['rr'])), inuse=np.ones(len(s['rr']), int)) mne.add_source_space_distances(src_dist) write_source_spaces(path % 'ico-0', src_dist, overwrite=True) # scale with distances os.remove(spath % 'ico-0') scale_source_space('flachkopf', 'ico-0', subjects_dir=tempdir) ssrc = mne.read_source_spaces(spath % 'ico-0') assert ssrc[0]['dist'] is not None assert ssrc[0]['nearest'] is not None # check patch info computation (only if SciPy is new enough to be fast) if check_version('scipy', '1.3'): for s in src_dist: for key in ('dist', 'dist_limit'): s[key] = None write_source_spaces(path % 'ico-0', src_dist, overwrite=True) # scale with distances os.remove(spath % 'ico-0') scale_source_space('flachkopf', 'ico-0', subjects_dir=tempdir) ssrc = mne.read_source_spaces(spath % 'ico-0') assert ssrc[0]['dist'] is None assert ssrc[0]['nearest'] is not None
# %% # Next, we'll get the epoch data and plot its amplitude over time. epochs.plot() # %% # We can visualize this raw data on the ``fsaverage`` brain (in MNI space) as # a heatmap. This works by first creating an ``Evoked`` data structure # from the data of interest (in this example, it is just the raw LFP). # Then one should generate a ``stc`` data structure, which will be able # to visualize source activity on the brain in various different formats. # get standard fsaverage volume (5mm grid) source space fname_src = op.join(subjects_dir, 'fsaverage', 'bem', 'fsaverage-vol-5-src.fif') vol_src = mne.read_source_spaces(fname_src) evoked = epochs.average() stc = mne.stc_near_sensors( evoked, trans, 'fsaverage', subjects_dir=subjects_dir, src=vol_src, verbose='error') # ignore missing electrode warnings stc = abs(stc) # just look at magnitude clim = dict(kind='value', lims=np.percentile(abs(evoked.data), [10, 50, 75])) # %% # Plot 3D source (brain region) visualization: # # By default, `stc.plot_3d() <mne.VolSourceEstimate.plot_3d>` will show a time # course of the source with the largest absolute value across any time point. # In this example, it is simply the source with the largest raw signal value. # Its location is marked on the brain by a small blue sphere.
lambda2=2) stcs_low_base = compute_source_psd_epochs(epochs_base["low"], inverse_operator, fmin=2, fmax=30, lambda2=2) stcs_high_base = compute_source_psd_epochs(epochs_base["high"], inverse_operator, fmin=2, fmax=30, lambda2=2) src_path = dirs.fsf_subjects / "fsaverage/bem/fsaverage-oct-6-src.fif" src = read_source_spaces(src_path) fsave_vertices = [s["vertno"] for s in src] morph = compute_source_morph( src=inverse_operator["src"], subject_to="fsaverage", spacing=fsave_vertices, subjects_dir=dirs.fsf_subjects, ) subj_dir = dirs.sources / f"sub-{subj}" subj_dir.mkdir(exist_ok=True) for i, s in enumerate(stcs_high_act): s /= stcs_high_base[i] morph.apply(s).save(subj_dir / f"sub-{subj}_cond-high_trial-{i}") for i, s in enumerate(stcs_low_act):
def test_setup_source_space(): """Test setting up ico, oct, and all source spaces.""" tempdir = _TempDir() fname_ico = op.join(data_path, 'subjects', 'fsaverage', 'bem', 'fsaverage-ico-5-src.fif') # first lets test some input params pytest.raises(ValueError, setup_source_space, 'sample', spacing='oct', add_dist=False, subjects_dir=subjects_dir) pytest.raises(ValueError, setup_source_space, 'sample', spacing='octo', add_dist=False, subjects_dir=subjects_dir) pytest.raises(ValueError, setup_source_space, 'sample', spacing='oct6e', add_dist=False, subjects_dir=subjects_dir) pytest.raises(ValueError, setup_source_space, 'sample', spacing='7emm', add_dist=False, subjects_dir=subjects_dir) pytest.raises(ValueError, setup_source_space, 'sample', spacing='alls', add_dist=False, subjects_dir=subjects_dir) # ico 5 (fsaverage) - write to temp file src = read_source_spaces(fname_ico) with pytest.warns(None): # sklearn equiv neighbors src_new = setup_source_space('fsaverage', spacing='ico5', subjects_dir=subjects_dir, add_dist=False) _compare_source_spaces(src, src_new, mode='approx') assert_equal(repr(src), repr(src_new)) assert_equal(repr(src).count('surface ('), 2) assert_array_equal(src[0]['vertno'], np.arange(10242)) assert_array_equal(src[1]['vertno'], np.arange(10242)) # oct-6 (sample) - auto filename + IO src = read_source_spaces(fname) temp_name = op.join(tempdir, 'temp-src.fif') with pytest.warns(None): # sklearn equiv neighbors src_new = setup_source_space('sample', spacing='oct6', subjects_dir=subjects_dir, add_dist=False) write_source_spaces(temp_name, src_new, overwrite=True) assert_equal(src_new[0]['nuse'], 4098) _compare_source_spaces(src, src_new, mode='approx', nearest=False) src_new = read_source_spaces(temp_name) _compare_source_spaces(src, src_new, mode='approx', nearest=False) # all source points - no file writing src_new = setup_source_space('sample', spacing='all', subjects_dir=subjects_dir, add_dist=False) assert src_new[0]['nuse'] == len(src_new[0]['rr']) assert src_new[1]['nuse'] == len(src_new[1]['rr']) # dense source space to hit surf['inuse'] lines of _create_surf_spacing pytest.raises(RuntimeError, setup_source_space, 'sample', spacing='ico6', subjects_dir=subjects_dir, add_dist=False)
def test_volume_stc(): """Test volume STCs """ tempdir = _TempDir() N = 100 data = np.arange(N)[:, np.newaxis] datas = [data, data, np.arange(2)[:, np.newaxis]] vertno = np.arange(N) vertnos = [vertno, vertno[:, np.newaxis], np.arange(2)[:, np.newaxis]] vertno_reads = [vertno, vertno, np.arange(2)] for data, vertno, vertno_read in zip(datas, vertnos, vertno_reads): stc = VolSourceEstimate(data, vertno, 0, 1) fname_temp = op.join(tempdir, 'temp-vl.stc') stc_new = stc for _ in range(2): stc_new.save(fname_temp) stc_new = read_source_estimate(fname_temp) assert_true(isinstance(stc_new, VolSourceEstimate)) assert_array_equal(vertno_read, stc_new.vertices) assert_array_almost_equal(stc.data, stc_new.data) # now let's actually read a MNE-C processed file stc = read_source_estimate(fname_vol, 'sample') assert_true(isinstance(stc, VolSourceEstimate)) assert_true('sample' in repr(stc)) stc_new = stc assert_raises(ValueError, stc.save, fname_vol, ftype='whatever') for _ in range(2): fname_temp = op.join(tempdir, 'temp-vol.w') stc_new.save(fname_temp, ftype='w') stc_new = read_source_estimate(fname_temp) assert_true(isinstance(stc_new, VolSourceEstimate)) assert_array_equal(stc.vertices, stc_new.vertices) assert_array_almost_equal(stc.data, stc_new.data) # save the stc as a nifti file and export try: import nibabel as nib with warnings.catch_warnings(record=True): warnings.simplefilter('always') src = read_source_spaces(fname_vsrc) vol_fname = op.join(tempdir, 'stc.nii.gz') stc.save_as_volume(vol_fname, src, dest='surf', mri_resolution=False) with warnings.catch_warnings(record=True): # nib<->numpy img = nib.load(vol_fname) assert_true(img.shape == src[0]['shape'] + (len(stc.times), )) with warnings.catch_warnings(record=True): # nib<->numpy t1_img = nib.load(fname_t1) stc.save_as_volume(op.join(tempdir, 'stc.nii.gz'), src, dest='mri', mri_resolution=True) with warnings.catch_warnings(record=True): # nib<->numpy img = nib.load(vol_fname) assert_true(img.shape == t1_img.shape + (len(stc.times), )) assert_array_almost_equal(img.get_affine(), t1_img.get_affine(), decimal=5) # export without saving img = stc.as_volume(src, dest='mri', mri_resolution=True) assert_true(img.shape == t1_img.shape + (len(stc.times), )) assert_array_almost_equal(img.get_affine(), t1_img.get_affine(), decimal=5) except ImportError: print('Save as nifti test skipped, needs NiBabel')
} sub_key = {v: k for k, v in mri_key.items()} sub = ["ATT_10"] subjects_dir = "/home/jeff/freesurfer/subjects/" proc_dir = "../proc/" all_conds = [["audio", "rest"], ["visual", "rest"], ["audio", "visual"], ["visselten", "audio"], ["zaehlen", "rest"]] all_conds = [["visselten", "audio"]] threshold = 0.99 lower = 3e-27 upper = 3e-26 avg_clim = {"kind": "value", "lims": [lower, (upper - lower) / 2, upper]} avg_clim = "auto" src = mne.read_source_spaces(proc_dir + sub + "-src.fif") filename = proc_dir + "stcs/" + sub for conds in all_conds: stc_a = mne.read_source_estimate("{dir}stcs/nc_{a}_{b}_mean-lh.stc".format( dir=proc_dir, a=sub, b=conds[0])) stc_b = mne.read_source_estimate("{dir}stcs/nc_{a}_{b}_mean-lh.stc".format( dir=proc_dir, a=sub, b=conds[1])) stc_c = stc_a - stc_b favg = mlab.figure() stc_c.plot(sub_key[sub], hemi="both", figure=favg, clim=avg_clim) cnx = mne.spatial_src_connectivity(src) X = [ np.load("{dir}stcs/nc_{a}_{b}_stc.npy".format(dir=proc_dir, a=sub, b=conds[0])), np.load("{dir}stcs/nc_{a}_{b}_stc.npy".format(dir=proc_dir,
def test_combine_source_spaces(): """Test combining source spaces.""" tempdir = _TempDir() aseg_fname = op.join(subjects_dir, 'sample', 'mri', 'aseg.mgz') label_names = get_volume_labels_from_aseg(aseg_fname) volume_labels = [ label_names[int(np.random.rand() * len(label_names))] for ii in range(2) ] # get a surface source space (no need to test creation here) srf = read_source_spaces(fname, patch_stats=False) # setup 2 volume source spaces vol = setup_volume_source_space('sample', subjects_dir=subjects_dir, volume_label=volume_labels[0], mri=aseg_fname, add_interpolator=False) # setup a discrete source space rr = rng.randint(0, 20, (100, 3)) * 1e-3 nn = np.zeros(rr.shape) nn[:, -1] = 1 pos = {'rr': rr, 'nn': nn} disc = setup_volume_source_space('sample', subjects_dir=subjects_dir, pos=pos, verbose='error') # combine source spaces src = srf + vol + disc # test addition of source spaces assert_equal(type(src), SourceSpaces) assert_equal(len(src), 4) # test reading and writing src_out_name = op.join(tempdir, 'temp-src.fif') src.save(src_out_name) src_from_file = read_source_spaces(src_out_name) _compare_source_spaces(src, src_from_file, mode='approx') assert_equal(repr(src), repr(src_from_file)) assert_equal(src.kind, 'mixed') # test that all source spaces are in MRI coordinates coord_frames = np.array([s['coord_frame'] for s in src]) assert (coord_frames == FIFF.FIFFV_COORD_MRI).all() # test errors for export_volume image_fname = op.join(tempdir, 'temp-image.mgz') # source spaces with no volume pytest.raises(ValueError, srf.export_volume, image_fname, verbose='error') # unrecognized source type disc2 = disc.copy() disc2[0]['type'] = 'kitty' src_unrecognized = src + disc2 pytest.raises(ValueError, src_unrecognized.export_volume, image_fname, verbose='error') # unrecognized file type bad_image_fname = op.join(tempdir, 'temp-image.png') # vertices outside vol space warning pytest.raises(ValueError, src.export_volume, bad_image_fname, verbose='error') # mixed coordinate frames disc3 = disc.copy() disc3[0]['coord_frame'] = 10 src_mixed_coord = src + disc3 pytest.raises(ValueError, src_mixed_coord.export_volume, image_fname, verbose='error')
############################################################################### # Morph the output to fsaverage # ----------------------------- # # We can also use volumetric morphing to get the data to fsaverage space. This # is for example necessary when comparing activity across subjects. Here, we # will use the scalar beamformer example. # We pass a :class:`mne.SourceMorph` as the ``src`` argument to # `mne.VolSourceEstimate.plot`. To save some computational load when applying # the morph, we will crop the ``stc``: fetch_fsaverage(subjects_dir) # ensure fsaverage src exists fname_fs_src = subjects_dir + '/fsaverage/bem/fsaverage-vol-5-src.fif' src_fs = mne.read_source_spaces(fname_fs_src) morph = mne.compute_source_morph( src, subject_from='sample', src_to=src_fs, subjects_dir=subjects_dir, niter_sdr=[10, 10, 5], niter_affine=[10, 10, 5], # just for speed verbose=True) stc_fs = morph.apply(stc) del stc stc_fs.plot(src=src_fs, mode='stat_map', initial_time=0.085, subjects_dir=subjects_dir,
def test_morphed_source_space_return(): """Test returning a morphed source space to the original subject.""" # let's create some random data on fsaverage data = rng.randn(20484, 1) tmin, tstep = 0, 1. src_fs = read_source_spaces(fname_fs) stc_fs = SourceEstimate(data, [s['vertno'] for s in src_fs], tmin, tstep, 'fsaverage') n_verts_fs = sum(len(s['vertno']) for s in src_fs) # Create our morph source space src_morph = morph_source_spaces(src_fs, 'sample', subjects_dir=subjects_dir) n_verts_sample = sum(len(s['vertno']) for s in src_morph) assert n_verts_fs == n_verts_sample # Morph the data over using standard methods stc_morph = compute_source_morph(src_fs, 'fsaverage', 'sample', spacing=[s['vertno'] for s in src_morph], smooth=1, subjects_dir=subjects_dir, warn=False).apply(stc_fs) assert stc_morph.data.shape[0] == n_verts_sample # We can now pretend like this was real data we got e.g. from an inverse. # To be complete, let's remove some vertices keeps = [ np.sort(rng.permutation(np.arange(len(v)))[:len(v) - 10]) for v in stc_morph.vertices ] stc_morph = SourceEstimate( np.concatenate([ stc_morph.lh_data[keeps[0]], stc_morph.rh_data[keeps[1]] ]), [v[k] for v, k in zip(stc_morph.vertices, keeps)], tmin, tstep, 'sample') # Return it to the original subject stc_morph_return = stc_morph.to_original_src(src_fs, subjects_dir=subjects_dir) # This should fail (has too many verts in SourceMorph) with pytest.warns(RuntimeWarning, match='vertices not included'): morph = compute_source_morph(src_morph, subject_from='sample', spacing=stc_morph_return.vertices, smooth=1, subjects_dir=subjects_dir) with pytest.raises(ValueError, match='vertices do not match'): morph.apply(stc_morph) # Compare to the original data with pytest.warns(RuntimeWarning, match='vertices not included'): stc_morph_morph = compute_source_morph( src=stc_morph, subject_from='sample', spacing=stc_morph_return.vertices, smooth=1, subjects_dir=subjects_dir).apply(stc_morph) assert_equal(stc_morph_return.subject, stc_morph_morph.subject) for ii in range(2): assert_array_equal(stc_morph_return.vertices[ii], stc_morph_morph.vertices[ii]) # These will not match perfectly because morphing pushes data around corr = np.corrcoef(stc_morph_return.data[:, 0], stc_morph_morph.data[:, 0])[0, 1] assert corr > 0.99, corr # Explicitly test having two vertices map to the same target vertex. We # simulate this by having two vertices be at the same position. src_fs2 = src_fs.copy() vert1, vert2 = src_fs2[0]['vertno'][:2] src_fs2[0]['rr'][vert1] = src_fs2[0]['rr'][vert2] stc_morph_return = stc_morph.to_original_src(src_fs2, subjects_dir=subjects_dir) # test to_original_src method result equality for ii in range(2): assert_array_equal(stc_morph_return.vertices[ii], stc_morph_morph.vertices[ii]) # These will not match perfectly because morphing pushes data around corr = np.corrcoef(stc_morph_return.data[:, 0], stc_morph_morph.data[:, 0])[0, 1] assert corr > 0.99, corr # Degenerate cases stc_morph.subject = None # no .subject provided pytest.raises(ValueError, stc_morph.to_original_src, src_fs, subject_orig='fsaverage', subjects_dir=subjects_dir) stc_morph.subject = 'sample' del src_fs[0]['subject_his_id'] # no name in src_fsaverage pytest.raises(ValueError, stc_morph.to_original_src, src_fs, subjects_dir=subjects_dir) src_fs[0]['subject_his_id'] = 'fsaverage' # name mismatch pytest.raises(ValueError, stc_morph.to_original_src, src_fs, subject_orig='foo', subjects_dir=subjects_dir) src_fs[0]['subject_his_id'] = 'sample' src = read_source_spaces(fname) # wrong source space pytest.raises(RuntimeError, stc_morph.to_original_src, src, subjects_dir=subjects_dir)
# band_info["gamma_0"] = {"freqs":list(np.arange(31,41)),"cycles":9} # band_info["gamma_1"] = {"freqs":list(np.arange(41,60)),"cycles":9} # band_info["gamma_2"] = {"freqs":list(np.arange(60,90)),"cycles":9} runs = ["rest", "audio", "visual", "visselten"] wavs = ["4000fftf", "4000Hz", "7000Hz", "4000cheby"] subjects_dir = "/home/jeff/freesurfer/subjects/" n_jobs = 1 spacing = "ico4" for k, v in band_info.items(): f = v["freqs"] c = v["cycles"] frequencies = [f for x in range(5)] print(frequencies) for sub in subjs: src = mne.read_source_spaces("{dir}{sub}_{sp}-src.fif".format( dir=proc_dir, sub=sub, sp=spacing)) all_bads = [] epos = [] epo_names = [] epo_conds = [] epo_cond_names = [] for run_idx, run in enumerate(runs): if run == "rest": epo_name = "{dir}nc_{sub}_{run}_hand-epo.fif".format( dir=proc_dir, sub=sub, run=run) epo = mne.read_epochs(epo_name) all_bads += epo.info["bads"] epo_conds.append(epo) epo_cond_names.append(run) else: wav_epos = []
# :class:`src <mne.SourceSpaces>`, # and ``subject_to`` is set to 'fsaverage' by default. ``subjects_dir`` can be # None when set in the environment. In that case SourceMorph can be initialized # taking ``src`` as only argument. See :class:`mne.SourceMorph` for more # details. # # The default parameter setting for *zooms* will cause the reference volumes # to be resliced before computing the transform. A value of '5' would cause # the function to reslice to an isotropic voxel size of 5 mm. The higher this # value the less accurate but faster the computation will be. # # The recommended way to use this is to morph to a specific destination source # space so that different ``subject_from`` morphs will go to the same space.` # A standard usage for volumetric data reads: src_fs = mne.read_source_spaces(fname_src_fsaverage) morph = mne.compute_source_morph( inverse_operator['src'], subject_from='sample', subjects_dir=subjects_dir, niter_affine=[10, 10, 5], niter_sdr=[10, 10, 5], # just for speed src_to=src_fs, verbose=True) # %% # Apply morph to VolSourceEstimate # -------------------------------- # # The morph can be applied to the source estimate data, by giving it as the # first argument to the :meth:`morph.apply() <mne.SourceMorph.apply>` method.
stc_sum = all_diff_plot.pop() for stc in all_diff_plot: stc_sum = stc_sum + stc NEM_all_stc_diff = stc_sum / len(subjs) # make data array for cluster permutation stats N-P stc vals X_diff = np.array(X_diff).squeeze() # calculate Pearson's r for each vertex to Behavioral variable of the subject X_Rval = np.empty(X_diff.shape[1]) X_R_Tval = np.empty(X_diff.shape[1]) for vert_idx in range(X_diff.shape[1]): X_Rval[vert_idx], p = stats.pearsonr(X_diff[:,vert_idx],Behav) # calculate an according t-value for each r X_R_Tval = (X_Rval * np.sqrt((len(subjs)-2))) / np.sqrt(1 - X_Rval**2) # setup for clustering threshold = 3.850 src = mne.read_source_spaces("{}fsaverage_ico5-src.fif".format(meg_dir)) connectivity = mne.spatial_src_connectivity(src) # find clusters in the T-vals clusters, cluster_stats = _find_clusters(X_R_Tval,threshold=threshold, connectivity=connectivity, tail=0) # plot uncorrected correlation t-values on fsaverage X_R_Tval = np.expand_dims(X_R_Tval, axis=1) NEM_all_stc_diff.data = X_R_Tval NEM_all_stc_diff.plot(subjects_dir=mri_dir,subject='fsaverage',surface='white',hemi='both',time_viewer=True,colormap='coolwarm',clim={'kind':'value','pos_lims':(2,4,6)}) # do the random sign flip permutation # setup n_perms = 1000 cluster_H0 = np.zeros(n_perms) # here comes the loop
temp_dest = '/mnt/scratch/NLR_MEG3/' + session1[n] + '/forward' shutil.copytree(temp_src, temp_dest) trans = session1[n] + '-trans.fif' # Take a look at the sensors # mne.viz.plot_trans(info, trans, subject=subs[n], dig=True, # meg_sensors=True, subjects_dir=fs_dir) ### Read source space # spacing='oct6' #'ico5' # 10242 * 2 fn2 = subject + '-' + 'ico-5' + '-src.fif' # ico-5 if s == '205_ac151123' or s == '205_ac160202' or s == 'nlr_jb227170811': # NLR_205 has too small head for ico-5 fn2 = subject + '-' + 'oct-6' + '-src.fif' os.chdir(os.path.join(fs_dir, subject, 'bem')) src = mne.read_source_spaces(fn2) os.chdir(os.path.join(raw_dir, session1[n])) os.chdir('forward') #import numpy as np # noqa #from mayavi import mlab # noqa #from surfer import Brain # noqa # #brain = Brain('sample', 'lh', 'inflated', subjects_dir=subjects_dir) #surf = brain._geo # #vertidx = np.where(src[0]['inuse'])[0] # #mlab.points3d(surf.x[vertidx], surf.y[vertidx], # surf.z[vertidx], color=(1, 1, 0), scale_factor=1.5)
# raw.add_proj(ssp_ecg_eog, remove_existing=True) # raw_erm.add_proj(ssp_ecg_eog) # mne.viz.plot_projs_topomap(raw.info['projs'], info=raw.info) ############################################################################## # Explore data fig = mne.viz.plot_projs_topomap(raw.info['projs'][:-1], info=raw.info) fig.subplots_adjust(0.07, 0.07, 0.9, 0.8, 0.2, .2) n_fft = next_fast_len(int(round(4 * new_sfreq))) print('Using n_fft=%d (%0.1f sec)' % (n_fft, n_fft / raw.info['sfreq'])) raw.plot_psd(n_fft=n_fft, proj=True, n_jobs=18) ############################################################################## # Make forward stack and get transformation matrix src = mne.read_source_spaces(src_fname) bem = mne.read_bem_solution(bem_fname) trans = mne.read_trans(trans_fname) # check alignment fig = mne.viz.plot_alignment(raw.info, trans=trans, subject=subject, subjects_dir=subjects_dir, dig=True, coord_frame='meg') mlab.view(0, 90, focalpoint=(0., 0., 0.), distance=0.6, figure=fig) fwd = mne.make_forward_solution(raw.info, trans, src=src, bem=bem,
def test_label_center_of_mass(): """Test computing the center of mass of a label.""" stc = read_source_estimate(stc_fname) stc.lh_data[:] = 0 vertex_stc = stc.center_of_mass('sample', subjects_dir=subjects_dir)[0] assert_equal(vertex_stc, 124791) label = Label(stc.vertices[1], pos=None, values=stc.rh_data.mean(axis=1), hemi='rh', subject='sample') vertex_label = label.center_of_mass(subjects_dir=subjects_dir) assert_equal(vertex_label, vertex_stc) labels = read_labels_from_annot('sample', parc='aparc.a2009s', subjects_dir=subjects_dir) src = read_source_spaces(src_fname) # Try a couple of random ones, one from left and one from right # Visually verified in about the right place using mne_analyze for label, expected in zip([labels[2], labels[3], labels[-5]], [141162, 145221, 55979]): label.values[:] = -1 assert_raises(ValueError, label.center_of_mass, subjects_dir=subjects_dir) label.values[:] = 0 assert_raises(ValueError, label.center_of_mass, subjects_dir=subjects_dir) label.values[:] = 1 assert_equal(label.center_of_mass(subjects_dir=subjects_dir), expected) assert_equal( label.center_of_mass(subjects_dir=subjects_dir, restrict_vertices=label.vertices), expected) # restrict to source space idx = 0 if label.hemi == 'lh' else 1 # this simple nearest version is not equivalent, but is probably # close enough for many labels (including the test ones): pos = label.pos[np.where(label.vertices == expected)[0][0]] pos = (src[idx]['rr'][src[idx]['vertno']] - pos) pos = np.argmin(np.sum(pos * pos, axis=1)) src_expected = src[idx]['vertno'][pos] # see if we actually get the same one src_restrict = np.intersect1d(label.vertices, src[idx]['vertno']) assert_equal( label.center_of_mass(subjects_dir=subjects_dir, restrict_vertices=src_restrict), src_expected) assert_equal( label.center_of_mass(subjects_dir=subjects_dir, restrict_vertices=src), src_expected) # degenerate cases assert_raises(ValueError, label.center_of_mass, subjects_dir=subjects_dir, restrict_vertices='foo') assert_raises(TypeError, label.center_of_mass, subjects_dir=subjects_dir, surf=1) assert_raises(IOError, label.center_of_mass, subjects_dir=subjects_dir, surf='foo')
os.chdir(filedir + '/' + SubjID + '/' + ExpID) covfile = 'NoiseCov_fromEmptyRoom-cov.fif' NoiseCov = mne.read_cov(covfile) #- forward solution setting -# os.chdir(filedir + '/' + SubjID) fwdfile = 'MixedSourceSpace_%s_3shell_forICA-fwd.fif' % SubjID fwd = mne.read_forward_solution(fwdfile) # convert to surface-based source orientation fwd = mne.convert_forward_solution(fwd, surf_ori=True, force_fixed=False) # preparation for subcortical sources srcfile = [i for i in os.listdir(os.getcwd()) if '_forICA-oct-6-src.fif' in i][0] srcspace = mne.read_source_spaces(srcfile) nvert_insurf = srcspace[0]['nuse'] + srcspace[1]['nuse'] vertices = [srcspace[0]['vertno'], srcspace[1]['vertno']] locVal = locals() vollabels = mne.get_volume_labels_from_src(srcspace, MRIsubject, subjects_dir) nVert = [len(i.vertices) for i in vollabels] vertNo = np.cumsum(nVert) # make inverse operators: InvOperator = make_inverse_operator(rawdata.info, forward=fwd, noise_cov=NoiseCov, loose='auto', depth=0.8, fixed=False,
def test_scale_mri_xfm(tmp_path, few_surfaces, subjects_dir_tmp_few): """Test scale_mri transforms and MRI scaling.""" # scale fsaverage tempdir = str(subjects_dir_tmp_few) sample_dir = subjects_dir_tmp_few / 'sample' subject_to = 'flachkopf' spacing = 'oct2' for subject_from in ('fsaverage', 'sample'): if subject_from == 'fsaverage': scale = 1. # single dim else: scale = [0.9, 2, .8] # separate src_from_fname = op.join(tempdir, subject_from, 'bem', '%s-%s-src.fif' % (subject_from, spacing)) src_from = mne.setup_source_space(subject_from, spacing, subjects_dir=tempdir, add_dist=False) write_source_spaces(src_from_fname, src_from) vertices_from = np.concatenate([s['vertno'] for s in src_from]) assert len(vertices_from) == 36 hemis = ([0] * len(src_from[0]['vertno']) + [1] * len(src_from[0]['vertno'])) mni_from = mne.vertex_to_mni(vertices_from, hemis, subject_from, subjects_dir=tempdir) if subject_from == 'fsaverage': # identity transform source_rr = np.concatenate( [s['rr'][s['vertno']] for s in src_from]) * 1e3 assert_allclose(mni_from, source_rr) if subject_from == 'fsaverage': overwrite = skip_fiducials = False else: with pytest.raises(IOError, match='No fiducials file'): scale_mri(subject_from, subject_to, scale, subjects_dir=tempdir) skip_fiducials = True with pytest.raises(IOError, match='already exists'): scale_mri(subject_from, subject_to, scale, subjects_dir=tempdir, skip_fiducials=skip_fiducials) overwrite = True if subject_from == 'sample': # support for not needing all surf files os.remove(op.join(sample_dir, 'surf', 'lh.curv')) scale_mri(subject_from, subject_to, scale, subjects_dir=tempdir, verbose='debug', overwrite=overwrite, skip_fiducials=skip_fiducials) if subject_from == 'fsaverage': assert _is_mri_subject(subject_to, tempdir), "Scaling failed" src_to_fname = op.join(tempdir, subject_to, 'bem', '%s-%s-src.fif' % (subject_to, spacing)) assert op.exists(src_to_fname), "Source space was not scaled" # Check MRI scaling fname_mri = op.join(tempdir, subject_to, 'mri', 'T1.mgz') assert op.exists(fname_mri), "MRI was not scaled" # Check MNI transform src = mne.read_source_spaces(src_to_fname) vertices = np.concatenate([s['vertno'] for s in src]) assert_array_equal(vertices, vertices_from) mni = mne.vertex_to_mni(vertices, hemis, subject_to, subjects_dir=tempdir) assert_allclose(mni, mni_from, atol=1e-3) # 0.001 mm # Check head_to_mni (the `trans` here does not really matter) trans = rotation(0.001, 0.002, 0.003) @ translation(0.01, 0.02, 0.03) trans = Transform('head', 'mri', trans) pos_head_from = np.random.RandomState(0).randn(4, 3) pos_mni_from = mne.head_to_mni(pos_head_from, subject_from, trans, tempdir) pos_mri_from = apply_trans(trans, pos_head_from) pos_mri = pos_mri_from * scale pos_head = apply_trans(invert_transform(trans), pos_mri) pos_mni = mne.head_to_mni(pos_head, subject_to, trans, tempdir) assert_allclose(pos_mni, pos_mni_from, atol=1e-3)
def test_scale_mri(): """Test creating fsaverage and scaling it.""" # create fsaverage using the testing "fsaverage" instead of the FreeSurfer # one tempdir = _TempDir() fake_home = testing.data_path() create_default_subject(subjects_dir=tempdir, fs_home=fake_home, verbose=True) assert _is_mri_subject('fsaverage', tempdir), "Creating fsaverage failed" fid_path = op.join(tempdir, 'fsaverage', 'bem', 'fsaverage-fiducials.fif') os.remove(fid_path) create_default_subject(update=True, subjects_dir=tempdir, fs_home=fake_home) assert op.exists(fid_path), "Updating fsaverage" # copy MRI file from sample data (shouldn't matter that it's incorrect, # so here choose a small one) path_from = op.join(testing.data_path(), 'subjects', 'sample', 'mri', 'T1.mgz') path_to = op.join(tempdir, 'fsaverage', 'mri', 'orig.mgz') copyfile(path_from, path_to) # remove redundant label files label_temp = op.join(tempdir, 'fsaverage', 'label', '*.label') label_paths = glob(label_temp) for label_path in label_paths[1:]: os.remove(label_path) # create source space print('Creating surface source space') path = op.join(tempdir, 'fsaverage', 'bem', 'fsaverage-%s-src.fif') src = mne.setup_source_space('fsaverage', 'ico0', subjects_dir=tempdir, add_dist=False) write_source_spaces(path % 'ico-0', src) mri = op.join(tempdir, 'fsaverage', 'mri', 'orig.mgz') print('Creating volume source space') vsrc = mne.setup_volume_source_space( 'fsaverage', pos=50, mri=mri, subjects_dir=tempdir, add_interpolator=False) write_source_spaces(path % 'vol-50', vsrc) # scale fsaverage os.environ['_MNE_FEW_SURFACES'] = 'true' scale = np.array([1, .2, .8]) scale_mri('fsaverage', 'flachkopf', scale, True, subjects_dir=tempdir, verbose='debug') del os.environ['_MNE_FEW_SURFACES'] assert _is_mri_subject('flachkopf', tempdir), "Scaling fsaverage failed" spath = op.join(tempdir, 'flachkopf', 'bem', 'flachkopf-%s-src.fif') assert op.exists(spath % 'ico-0'), "Source space ico-0 was not scaled" assert os.path.isfile(os.path.join(tempdir, 'flachkopf', 'surf', 'lh.sphere.reg')) vsrc_s = mne.read_source_spaces(spath % 'vol-50') pt = np.array([0.12, 0.41, -0.22]) assert_array_almost_equal(apply_trans(vsrc_s[0]['src_mri_t'], pt * scale), apply_trans(vsrc[0]['src_mri_t'], pt)) scale_labels('flachkopf', subjects_dir=tempdir) # add distances to source space mne.add_source_space_distances(src) src.save(path % 'ico-0', overwrite=True) # scale with distances os.remove(spath % 'ico-0') scale_source_space('flachkopf', 'ico-0', subjects_dir=tempdir) ssrc = mne.read_source_spaces(spath % 'ico-0') assert ssrc[0]['dist'] is not None
def SimulateRaw(amp1=50, amp2=100, freq=1., batch=1): """Create simulated raw data and events of two kinds Keyword Args: amp1 (float): amplitude of first condition effect amp2 (float): ampltiude of second condition effect, null hypothesis amp1=amp2 freq (float): Frequency of simulated signal 1. for ERP 10. for alpha batch (int): number of groups of 255 trials in each condition Returns: raw: simulated EEG MNE raw object with two event types event_id: dict of the two events for input to PreProcess() """ data_path = sample.data_path() raw_fname = data_path + '/MEG/sample/sample_audvis_raw.fif' trans_fname = data_path + '/MEG/sample/sample_audvis_raw-trans.fif' src_fname = data_path + '/subjects/sample/bem/sample-oct-6-src.fif' bem_fname = (data_path + '/subjects/sample/bem/sample-5120-5120-5120-bem-sol.fif') raw_single = mne.io.read_raw_fif(raw_fname, preload=True) raw_single.set_eeg_reference(projection=True) raw_single = raw_single.crop(0., 255.) raw_single = raw_single.copy().pick_types(meg=False, eeg=True, eog=True, stim=True) #concatenate 4 raws together to make 1000 trials raw = [] for i in range(batch): raw.append(raw_single) raw = concatenate_raws(raw) epoch_duration = 1. def data_fun(amp, freq): """Create function to create fake signal""" def data_fun_inner(times): """Create fake signal with no noise""" n_samp = len(times) window = np.zeros(n_samp) start, stop = [int(ii * float(n_samp) / 2) for ii in (0, 1)] window[start:stop] = np.hamming(stop - start) data = amp * 1e-9 * np.sin(2. * np.pi * freq * times) data *= window return data return data_fun_inner times = raw.times[:int(raw.info['sfreq'] * epoch_duration)] src = read_source_spaces(src_fname) stc_zero = simulate_sparse_stc(src, n_dipoles=1, times=times, data_fun=data_fun(amp1, freq), random_state=0) stc_one = simulate_sparse_stc(src, n_dipoles=1, times=times, data_fun=data_fun(amp2, freq), random_state=0) raw_sim_zero = simulate_raw(raw, stc_zero, trans_fname, src, bem_fname, cov='simple', blink=True, n_jobs=1, verbose=True) raw_sim_one = simulate_raw(raw, stc_one, trans_fname, src, bem_fname, cov='simple', blink=True, n_jobs=1, verbose=True) stim_pick = raw_sim_one.info['ch_names'].index('STI 014') raw_sim_one._data[stim_pick][np.where( raw_sim_one._data[stim_pick] == 1)] = 2 raw = concatenate_raws([raw_sim_zero, raw_sim_one]) event_id = {'CondZero': 1, 'CondOne': 2} return raw, event_id