def test_plot_evoked_field(renderer): """Test plotting evoked field.""" evoked = read_evokeds(evoked_fname, condition='Left Auditory', baseline=(-0.2, 0.0)) evoked = pick_channels_evoked(evoked, evoked.ch_names[::10]) # speed for t in ['meg', None]: with pytest.warns(RuntimeWarning, match='projection'): maps = make_field_map(evoked, trans_fname, subject='sample', subjects_dir=subjects_dir, n_jobs=1, ch_type=t) evoked.plot_field(maps, time=0.1)
def test_plot_evoked_field(): """Test plotting evoked field.""" evoked = read_evokeds(evoked_fname, condition='Left Auditory', baseline=(-0.2, 0.0)) evoked = pick_channels_evoked(evoked, evoked.ch_names[::10]) # speed for t in ['meg', None]: with pytest.warns(RuntimeWarning, match='projection'): maps = make_field_map(evoked, trans_fname, subject='sample', subjects_dir=subjects_dir, n_jobs=1, ch_type=t) evoked.plot_field(maps, time=0.1)
def test_plot_evoked_field(): """Test plotting evoked field.""" evoked = read_evokeds(evoked_fname, condition='Left Auditory', baseline=(-0.2, 0.0)) evoked = pick_channels_evoked(evoked, evoked.ch_names[::10]) # speed for t in ['meg', None]: with warnings.catch_warnings(record=True): # bad proj maps = make_field_map(evoked, trans_fname, subject='sample', subjects_dir=subjects_dir, n_jobs=1, ch_type=t) evoked.plot_field(maps, time=0.1)
def test_plot_evoked_field(): """Test plotting evoked field """ trans_fname = op.join(data_dir, "MEG", "sample", "sample_audvis_raw-trans.fif") evoked = read_evokeds(evoked_fname, condition="Left Auditory", baseline=(-0.2, 0.0)) evoked = pick_channels_evoked(evoked, evoked.ch_names[::10]) # speed for t in ["meg", None]: maps = make_field_map( evoked, trans_fname=trans_fname, subject="sample", subjects_dir=subjects_dir, n_jobs=1, ch_type=t ) evoked.plot_field(maps, time=0.1)
def test_plot_evoked_field(): """Test plotting evoked field """ evoked = read_evokeds(evoked_fname, condition='Left Auditory', baseline=(-0.2, 0.0)) evoked = pick_channels_evoked(evoked, evoked.ch_names[::10]) # speed for t in ['meg', None]: with warnings.catch_warnings(record=True): # bad proj maps = make_field_map(evoked, trans_fname, subject='sample', subjects_dir=subjects_dir, n_jobs=1, ch_type=t) evoked.plot_field(maps, time=0.1)
def test_plot_evoked_field(): trans_fname = op.join(data_dir, 'MEG', 'sample', 'sample_audvis_raw-trans.fif') evoked = io.read_evokeds(evoked_fname, condition='Left Auditory', baseline=(-0.2, 0.0)) evoked = pick_channels_evoked(evoked, evoked.ch_names[::10]) # speed for t in ['meg', None]: maps = make_field_map(evoked, trans_fname=trans_fname, subject='sample', subjects_dir=subjects_dir, n_jobs=1, ch_type=t) evoked.plot_field(maps, time=0.1)
def test_plot_evoked_field(): """Test plotting evoked field """ trans_fname = op.join(data_dir, 'MEG', 'sample', 'sample_audvis_raw-trans.fif') evoked = read_evokeds(evoked_fname, condition='Left Auditory', baseline=(-0.2, 0.0)) evoked = pick_channels_evoked(evoked, evoked.ch_names[::10]) # speed for t in ['meg', None]: maps = make_field_map(evoked, trans_fname=trans_fname, subject='sample', subjects_dir=subjects_dir, n_jobs=1, ch_type=t) evoked.plot_field(maps, time=0.1)
def test_plot_evoked_field(renderer): """Test plotting evoked field.""" evoked = read_evokeds(evoked_fname, condition='Left Auditory', baseline=(-0.2, 0.0)) evoked = pick_channels_evoked(evoked, evoked.ch_names[::10]) # speed for t in ['meg', None]: with pytest.warns(RuntimeWarning, match='projection'): maps = make_field_map(evoked, trans_fname, subject='sample', subjects_dir=subjects_dir, n_jobs=1, ch_type=t) fig = evoked.plot_field(maps, time=0.1) if renderer.get_3d_backend() == 'mayavi': import mayavi # noqa: F401 analysis:ignore assert isinstance(fig, mayavi.core.scene.Scene)
def test_plot_evoked_field(backends_3d): """Test plotting evoked field.""" backend_name = get_3d_backend() evoked = read_evokeds(evoked_fname, condition='Left Auditory', baseline=(-0.2, 0.0)) evoked = pick_channels_evoked(evoked, evoked.ch_names[::10]) # speed for t in ['meg', None]: with pytest.warns(RuntimeWarning, match='projection'): maps = make_field_map(evoked, trans_fname, subject='sample', subjects_dir=subjects_dir, n_jobs=1, ch_type=t) fig = evoked.plot_field(maps, time=0.1) if backend_name == 'mayavi': import mayavi # noqa: F401 analysis:ignore assert isinstance(fig, mayavi.core.scene.Scene)
plt.show() # estimate noise covarariance noise_cov = mne.compute_covariance(epochs, tmax=0, method='shrunk', rank=None) ############################################################################### # Visualize fields on MEG helmet # The transformation here was aligned using the dig-montage. It's included in # the spm_faces dataset and is named SPM_dig_montage.fif. trans_fname = data_path + ('/MEG/spm/SPM_CTF_MEG_example_faces1_3D_' 'raw-trans.fif') maps = mne.make_field_map(evoked[0], trans_fname, subject='spm', subjects_dir=subjects_dir, n_jobs=1) evoked[0].plot_field(maps, time=0.170) ############################################################################### # Look at the whitened evoked daat evoked[0].plot_white(noise_cov) ############################################################################### # Compute forward model src = data_path + '/subjects/spm/bem/spm-oct-6-src.fif' bem = data_path + '/subjects/spm/bem/spm-5120-5120-5120-bem-sol.fif' forward = mne.make_forward_solution(contrast.info, trans_fname, src, bem)
This process can be computationally intensive. """ # Authors: Eric Larson <*****@*****.**> # Denis A. Engemann <*****@*****.**> # Alexandre Gramfort <*****@*****.**> # License: BSD (3-clause) print(__doc__) from mne.datasets import sample from mne import make_field_map, read_evokeds data_path = sample.data_path() subjects_dir = data_path + '/subjects' evoked_fname = data_path + '/MEG/sample/sample_audvis-ave.fif' trans_fname = data_path + '/MEG/sample/sample_audvis_raw-trans.fif' # If trans_fname is set to None then only MEG estimates can be visualized condition = 'Left Auditory' evoked = read_evokeds(evoked_fname, condition=condition, baseline=(-0.2, 0.0)) # Compute the field maps to project MEG and EEG data to MEG helmet # and scalp surface maps = make_field_map(evoked, trans_fname=trans_fname, subject='sample', subjects_dir=subjects_dir, n_jobs=1) # explore several points in time [evoked.plot_field(maps, time=time) for time in [0.09, .11]]
# coordinate systems of the MEG device and the head surface (based on the MRI). # You *can* compute 3D field maps without a ``trans`` file, but it will only # work for calculating the field *on the MEG helmet from the MEG sensors*. subjects_dir = os.path.join(sample_data_folder, 'subjects') sample_data_trans_file = os.path.join(sample_data_folder, 'MEG', 'sample', 'sample_audvis_raw-trans.fif') ############################################################################### # By default, MEG sensors will be used to estimate the field on the helmet # surface, while EEG sensors will be used to estimate the field on the scalp. # Once the maps are computed, you can plot them with :meth:`evoked.plot_field() # <mne.Evoked.plot_field>`: maps = mne.make_field_map(evks['aud/left'], trans=sample_data_trans_file, subject='sample', subjects_dir=subjects_dir) evks['aud/left'].plot_field(maps, time=0.1) ############################################################################### # You can also use MEG sensors to estimate the *scalp* field by passing # ``meg_surf='head'``. By selecting each sensor type in turn, you can compare # the scalp field estimates from each. for ch_type in ('mag', 'grad', 'eeg'): evk = evks['aud/right'].copy().pick(ch_type) _map = mne.make_field_map(evk, trans=sample_data_trans_file, subject='sample', subjects_dir=subjects_dir, meg_surf='head')
# we plot only the ``left_auditory``, and then we plot them all in the same # figure for comparison. Click on the individual plots to open them bigger. left_auditory.plot_topo() colors = 'yellow', 'green', 'red', 'blue' mne.viz.plot_evoked_topo(evoked, color=colors) ############################################################################### # Visualizing field lines in 3D # ----------------------------- # # We now compute the field maps to project MEG and EEG data to MEG helmet # and scalp surface. # # To do this we'll need coregistration information. See # :ref:`tut_forward` for more details. # # Here we just illustrate usage. subjects_dir = data_path + '/subjects' trans_fname = data_path + '/MEG/sample/sample_audvis_raw-trans.fif' maps = mne.make_field_map(left_auditory, trans=trans_fname, subject='sample', subjects_dir=subjects_dir, n_jobs=1) # explore several points in time field_map = left_auditory.plot_field(maps, time=.1) ############################################################################### # .. note:: # If trans_fname is set to None then only MEG estimates can be visualized
if plot_steps_preproc==True: evoked.plot(spatial_colors=True) evoked.plot(picks=['C3'],xlim=[-.1, .3]) evoked.plot_joint(times=[.01, .03, .05]) times = np.linspace(0, 0.06, 10) evoked.plot_topomap(times=times, colorbar=True) mne.viz.plot_compare_evokeds(evoked, picks='C3') mne.viz.plot_compare_evokeds(evoked, picks='eeg', axes='topo') maps = mne.make_field_map(evoked, trans,subject=mri_partic, subjects_dir=shared_dir) evoked.plot_field(maps, time=0.07) ## ================================ # COVARIANCE ## ================================ cov = mne.compute_covariance(epochs, tmax=0., method='auto',rank=None) #save #mne.write_cov(fname=raw_dir+'\\'+eeg_partic+'_cov_preproc2020.fif',cov=cov) if plot_steps_preproc==True: evoked.plot_white(cov, time_unit='s', verbose=False) ### if you wana try different methods: #method_params = dict(diagonal_fixed=dict(eeg=0.01))
title = 'MNE sample data (condition : %s)' evoked_l_aud.plot_topo(title=title % evoked_l_aud.comment) colors = 'yellow', 'green', 'red', 'blue' mne.viz.plot_evoked_topo(evoked, color=colors, title=title % 'Left/Right Auditory/Visual') ############################################################################### # Visualizing field lines in 3D # ----------------------------- # # We now compute the field maps to project MEG and EEG data to MEG helmet # and scalp surface. # # To do this we'll need coregistration information. See # :ref:`tut_forward` for more details. # # Here we just illustrate usage. subjects_dir = data_path + '/subjects' trans_fname = data_path + '/MEG/sample/sample_audvis_raw-trans.fif' maps = mne.make_field_map(evoked_l_aud, trans=trans_fname, subject='sample', subjects_dir=subjects_dir, n_jobs=1) # explore several points in time field_map = evoked_l_aud.plot_field(maps, time=.1) ############################################################################### # .. note:: # If trans_fname is set to None then only MEG estimates can be visualized.
for e in evoked: e.plot(ylim=dict(mag=[-400, 400])) plt.show() # estimate noise covarariance noise_cov = mne.compute_covariance(epochs.crop(None, 0, copy=True)) ############################################################################### # Visualize fields on MEG helmet trans_fname = data_path + '/MEG/spm/SPM_CTF_MEG_example_faces1_3D_raw-trans.fif' maps = mne.make_field_map(evoked[0], trans_fname=trans_fname, subject='spm', subjects_dir=subjects_dir, n_jobs=1) evoked[0].plot_field(maps, time=0.170) ############################################################################### # Compute forward model # Make source space src = mne.setup_source_space('spm', spacing='oct6', subjects_dir=subjects_dir, overwrite=True) mri = trans_fname bem = data_path + '/subjects/spm/bem/spm-5120-5120-5120-bem-sol.fif' forward = mne.make_forward_solution(contrast.info, mri=mri, src=src, bem=bem)
sample_path = mne.datasets.sample.data_path() subjects_dir = op.join(sample_path, 'subjects') fname_evoked = op.join(sample_path, 'MEG', 'sample', 'sample_audvis-ave.fif') fname_inv = op.join(sample_path, 'MEG', 'sample', 'sample_audvis-meg-oct-6-meg-inv.fif') fname_trans = op.join(sample_path, 'MEG', 'sample', 'sample_audvis_raw-trans.fif') inv = mne.minimum_norm.read_inverse_operator(fname_inv) evoked = mne.read_evokeds(fname_evoked, baseline=(None, 0), proj=True, verbose=False, condition='Left Auditory') maps = mne.make_field_map(evoked, trans=fname_trans, ch_type='meg', subject='sample', subjects_dir=subjects_dir) time = 0.083 fig = mne.viz.create_3d_figure((256, 256)) mne.viz.plot_alignment(evoked.info, subject='sample', subjects_dir=subjects_dir, fig=fig, trans=fname_trans, meg='sensors', eeg=False, surfaces='pial', coord_frame='mri') evoked.plot_field(maps, time=time, fig=fig, time_label=None, vmax=5e-13) mne.viz.set_3d_view(fig,
from mne.datasets import sample from mne import make_field_map, read_evokeds print(__doc__) data_path = sample.data_path() subjects_dir = data_path + '/subjects' evoked_fname = data_path + '/MEG/sample/sample_audvis-ave.fif' trans_fname = data_path + '/MEG/sample/sample_audvis_raw-trans.fif' # If trans_fname is set to None then only MEG estimates can be visualized condition = 'Left Auditory' evoked = read_evokeds(evoked_fname, condition=condition, baseline=(-0.2, 0.0)) # Compute the field maps to project MEG and EEG data to MEG helmet # and scalp surface maps = make_field_map(evoked, trans_fname, subject='sample', subjects_dir=subjects_dir, n_jobs=1) # Plot MEG and EEG fields in the helmet and scalp surface in the same figure. evoked.plot_field(maps, time=0.11) # Compute the MEG fields in the scalp surface evoked.pick_types(meg=True, eeg=False) maps_head = make_field_map(evoked, trans_fname, subject='sample', subjects_dir=subjects_dir, n_jobs=1, meg_surf='head') # Plot MEG fields both in scalp surface and the helmet in the same figure. evoked.plot_field([maps_head[0], maps[1]], time=0.11)
args = parser.parse_args() ##script for creating average field map data_path = "/home/custine/MEG/data/krns_kr3/9367/s5/" evoked_fname = data_path + "ave_projon/9367_s5_Noun_Place_All-ave.fif" subjects_dir = "/mnt/file1/binder/KRNS/anatomies/surfaces/" trans_fname = "/mnt/file1/binder/KRNS/anatomies/surfaces/9367/mri/T1-neuromag/sets/COR-custine-140827-120918.fif" # If trans_fname is set to None then only MEG estimates can be visualized condition = 1 evoked = read_evoked(evoked_fname, baseline=(-0.2, 0.0)) # Compute the field maps to project MEG and EEG data to MEG helmet # and scalp surface maps = make_field_map(evoked, trans_fname=trans_fname, subject="9367", subjects_dir=subjects_dir, n_jobs=1) # explore several points in time [evoked.plot_field(maps, time=time) for time in [0.09, 0.11]] evoked.save( "/home/custine/MEG/data/krns_kr3/9367/s5/results/fieldmaps/9367_s5_" + str(args.ave_name) + "_All-" + "-" + str(args.time1) + "-" + str(args.time2) + "-ave.fif" ) # evoked.save(data_path + args.prefix+'-'+str(args.set)+'-'+str(args.time1)+'-'+str(args.time2)+'-ave.fif')
# Authors: Eric Larson <*****@*****.**> # Denis A. Engemann <*****@*****.**> # Alexandre Gramfort <*****@*****.**> # License: BSD (3-clause) print(__doc__) from mne.datasets import sample from mne import make_field_map, read_evokeds data_path = sample.data_path() subjects_dir = data_path + '/subjects' evoked_fname = data_path + '/MEG/sample/sample_audvis-ave.fif' trans_fname = data_path + '/MEG/sample/sample_audvis_raw-trans.fif' # If trans_fname is set to None then only MEG estimates can be visualized condition = 'Left Auditory' evoked = read_evokeds(evoked_fname, condition=condition, baseline=(-0.2, 0.0)) # Compute the field maps to project MEG and EEG data to MEG helmet # and scalp surface maps = make_field_map(evoked, trans_fname=trans_fname, subject='sample', subjects_dir=subjects_dir, n_jobs=1) # explore several points in time [evoked.plot_field(maps, time=time) for time in [0.09, .11]]