def test_info_no_rename_no_reorder_no_pdf(): """Test private renaming, reordering and partial construction option.""" for pdf, config, hs in zip(pdf_fnames, config_fnames, hs_fnames): info, bti_info = _get_bti_info( pdf_fname=pdf, config_fname=config, head_shape_fname=hs, rotation_x=0.0, translation=(0.0, 0.02, 0.11), convert=False, ecg_ch='E31', eog_ch=('E63', 'E64'), rename_channels=False, sort_by_ch_name=False) info2, bti_info = _get_bti_info( pdf_fname=None, config_fname=config, head_shape_fname=hs, rotation_x=0.0, translation=(0.0, 0.02, 0.11), convert=False, ecg_ch='E31', eog_ch=('E63', 'E64'), rename_channels=False, sort_by_ch_name=False) assert_equal(info['ch_names'], [ch['ch_name'] for ch in info['chs']]) assert_equal([n for n in info['ch_names'] if n.startswith('A')][:5], ['A22', 'A2', 'A104', 'A241', 'A138']) assert_equal([n for n in info['ch_names'] if n.startswith('A')][-5:], ['A133', 'A158', 'A44', 'A134', 'A216']) info = pick_info(info, pick_types(info, meg=True, stim=True, resp=True)) info2 = pick_info(info2, pick_types(info2, meg=True, stim=True, resp=True)) assert_true(info['sfreq'] is not None) assert_true(info['lowpass'] is not None) assert_true(info['highpass'] is not None) assert_true(info['meas_date'] is not None) assert_equal(info2['sfreq'], None) assert_equal(info2['lowpass'], None) assert_equal(info2['highpass'], None) assert_equal(info2['meas_date'], None) assert_equal(info['ch_names'], info2['ch_names']) assert_equal(info['ch_names'], info2['ch_names']) for key in ['dev_ctf_t', 'dev_head_t', 'ctf_head_t']: assert_array_equal(info[key]['trans'], info2[key]['trans']) assert_array_equal( np.array([ch['loc'] for ch in info['chs']]), np.array([ch['loc'] for ch in info2['chs']])) # just check reading data | corner case raw1 = read_raw_bti( pdf_fname=pdf, config_fname=config, head_shape_fname=None, sort_by_ch_name=False, preload=True) # just check reading data | corner case raw2 = read_raw_bti( pdf_fname=pdf, config_fname=config, head_shape_fname=None, rename_channels=False, sort_by_ch_name=True, preload=True) sort_idx = [raw1.bti_ch_labels.index(ch) for ch in raw2.bti_ch_labels] raw1._data = raw1._data[sort_idx] assert_array_equal(raw1._data, raw2._data) assert_array_equal(raw2.bti_ch_labels, raw2.ch_names)
def test_no_conversion(): """ Test bti no-conversion option """ get_info = partial(_get_bti_info, rotation_x=0.0, translation=(0.0, 0.02, 0.11), convert=False, ecg_ch='E31', eog_ch=('E63', 'E64'), rename_channels=False, sort_by_ch_name=False) for pdf, config, hs in zip(pdf_fnames, config_fnames, hs_fnames): raw_info, _ = get_info(pdf, config, hs, convert=False) raw_info_con = read_raw_bti(pdf_fname=pdf, config_fname=config, head_shape_fname=hs, convert=True, preload=False).info pick_info(raw_info_con, pick_types(raw_info_con, meg=True, ref_meg=True), copy=False) pick_info(raw_info, pick_types(raw_info, meg=True, ref_meg=True), copy=False) bti_info = _read_bti_header(pdf, config) dev_ctf_t = _correct_trans(bti_info['bti_transform'][0]) assert_array_equal(dev_ctf_t, raw_info['dev_ctf_t']['trans']) assert_array_equal(raw_info['dev_head_t']['trans'], np.eye(4)) assert_array_equal(raw_info['ctf_head_t']['trans'], np.eye(4)) dig, t = _process_bti_headshape(hs, convert=False, use_hpi=False) assert_array_equal(t['trans'], np.eye(4)) for ii, (old, new, con) in enumerate( zip(dig, raw_info['dig'], raw_info_con['dig'])): assert_equal(old['ident'], new['ident']) assert_array_equal(old['r'], new['r']) assert_true(not np.allclose(old['r'], con['r'])) if ii > 10: break ch_map = dict((ch['chan_label'], ch['loc']) for ch in bti_info['chs']) for ii, ch_label in enumerate(raw_info['ch_names']): if not ch_label.startswith('A'): continue t1 = ch_map[ch_label] # correction already performed in bti_info t2 = raw_info['chs'][ii]['loc'] t3 = raw_info_con['chs'][ii]['loc'] assert_allclose(t1, t2, atol=1e-15) assert_true(not np.allclose(t1, t3)) idx_a = raw_info_con['ch_names'].index('MEG 001') idx_b = raw_info['ch_names'].index('A22') assert_equal(raw_info_con['chs'][idx_a]['coord_frame'], FIFF.FIFFV_COORD_DEVICE) assert_equal(raw_info['chs'][idx_b]['coord_frame'], FIFF.FIFFV_MNE_COORD_4D_HEAD)
def test_info_no_rename_no_reorder_no_pdf(): """Test private renaming, reordering and partial construction option.""" for pdf, config, hs in zip(pdf_fnames, config_fnames, hs_fnames): info, bti_info = _get_bti_info( pdf_fname=pdf, config_fname=config, head_shape_fname=hs, rotation_x=0.0, translation=(0.0, 0.02, 0.11), convert=False, ecg_ch='E31', eog_ch=('E63', 'E64'), rename_channels=False, sort_by_ch_name=False) info2, bti_info = _get_bti_info( pdf_fname=None, config_fname=config, head_shape_fname=hs, rotation_x=0.0, translation=(0.0, 0.02, 0.11), convert=False, ecg_ch='E31', eog_ch=('E63', 'E64'), rename_channels=False, sort_by_ch_name=False) assert_equal(info['ch_names'], [ch['ch_name'] for ch in info['chs']]) assert_equal([n for n in info['ch_names'] if n.startswith('A')][:5], ['A22', 'A2', 'A104', 'A241', 'A138']) assert_equal([n for n in info['ch_names'] if n.startswith('A')][-5:], ['A133', 'A158', 'A44', 'A134', 'A216']) info = pick_info(info, pick_types(info, meg=True, stim=True, resp=True)) info2 = pick_info(info2, pick_types(info2, meg=True, stim=True, resp=True)) assert (info['sfreq'] is not None) assert (info['lowpass'] is not None) assert (info['highpass'] is not None) assert (info['meas_date'] is not None) assert_equal(info2['sfreq'], None) assert_equal(info2['lowpass'], None) assert_equal(info2['highpass'], None) assert_equal(info2['meas_date'], None) assert_equal(info['ch_names'], info2['ch_names']) assert_equal(info['ch_names'], info2['ch_names']) for key in ['dev_ctf_t', 'dev_head_t', 'ctf_head_t']: assert_array_equal(info[key]['trans'], info2[key]['trans']) assert_array_equal( np.array([ch['loc'] for ch in info['chs']]), np.array([ch['loc'] for ch in info2['chs']])) # just check reading data | corner case raw1 = read_raw_bti( pdf_fname=pdf, config_fname=config, head_shape_fname=None, sort_by_ch_name=False, preload=True) # just check reading data | corner case raw2 = read_raw_bti( pdf_fname=pdf, config_fname=config, head_shape_fname=None, rename_channels=False, sort_by_ch_name=True, preload=True) sort_idx = [raw1.bti_ch_labels.index(ch) for ch in raw2.bti_ch_labels] raw1._data = raw1._data[sort_idx] assert_array_equal(raw1._data, raw2._data) assert_array_equal(raw2.bti_ch_labels, raw2.ch_names)
def test_no_conversion(): """ Test bti no-conversion option """ get_info = partial( _get_bti_info, rotation_x=0.0, translation=(0.0, 0.02, 0.11), convert=False, ecg_ch='E31', eog_ch=('E63', 'E64'), rename_channels=False, sort_by_ch_name=False) for pdf, config, hs in zip(pdf_fnames, config_fnames, hs_fnames): with warnings.catch_warnings(record=True): # weight tables raw_info, _ = get_info(pdf, config, hs, convert=False) with warnings.catch_warnings(record=True): # weight tables raw_info_con = read_raw_bti( pdf_fname=pdf, config_fname=config, head_shape_fname=hs, convert=True, preload=False).info pick_info(raw_info_con, pick_types(raw_info_con, meg=True, ref_meg=True), copy=False) pick_info(raw_info, pick_types(raw_info, meg=True, ref_meg=True), copy=False) bti_info = _read_bti_header(pdf, config) dev_ctf_t = _correct_trans(bti_info['bti_transform'][0]) assert_array_equal(dev_ctf_t, raw_info['dev_ctf_t']['trans']) assert_array_equal(raw_info['dev_head_t']['trans'], np.eye(4)) assert_array_equal(raw_info['ctf_head_t']['trans'], np.eye(4)) dig, t = _process_bti_headshape(hs, convert=False, use_hpi=False) assert_array_equal(t['trans'], np.eye(4)) for ii, (old, new, con) in enumerate(zip( dig, raw_info['dig'], raw_info_con['dig'])): assert_equal(old['ident'], new['ident']) assert_array_equal(old['r'], new['r']) assert_true(not np.allclose(old['r'], con['r'])) if ii > 10: break ch_map = dict((ch['chan_label'], ch['loc']) for ch in bti_info['chs']) for ii, ch_label in enumerate(raw_info['ch_names']): if not ch_label.startswith('A'): continue t1 = ch_map[ch_label] # correction already performed in bti_info t2 = raw_info['chs'][ii]['loc'] t3 = raw_info_con['chs'][ii]['loc'] assert_allclose(t1, t2, atol=1e-15) assert_true(not np.allclose(t1, t3)) idx_a = raw_info_con['ch_names'].index('MEG 001') idx_b = raw_info['ch_names'].index('A22') assert_equal( raw_info_con['chs'][idx_a]['coord_frame'], FIFF.FIFFV_COORD_DEVICE) assert_equal( raw_info['chs'][idx_b]['coord_frame'], FIFF.FIFFV_MNE_COORD_4D_HEAD)
def map_meg_loocv_channels(inst, pick_from, pick_to, self_dots=None, cross_dots=None, mode='fast'): from mne.io.pick import pick_info from mne.forward._lead_dots import _do_self_dots, _do_cross_dots from mne.forward._make_forward import _create_meg_coils, _read_coil_defs from mne.forward._field_interpolation import _setup_dots from mne.forward._field_interpolation import _compute_mapping_matrix from mne.bem import _check_origin info_from = pick_info(inst.info, pick_from, copy=True) info_to = pick_info(inst.info, pick_to, copy=True) # no need to apply trans because both from and to coils are in device # coordinates templates = _read_coil_defs(verbose=False) coils_from = _create_meg_coils(info_from['chs'], 'normal', info_from['dev_head_t'], templates) coils_to = _create_meg_coils(info_to['chs'], 'normal', info_to['dev_head_t'], templates) miss = 1e-4 # Smoothing criterion for MEG int_rad, noise, lut_fun, n_fact = _setup_dots(mode, coils_from, 'meg') my_origin = _check_origin((0., 0., 0.04), info_from) if self_dots is None: self_dots = _do_self_dots(int_rad, False, coils_from, my_origin, 'meg', lut_fun, n_fact, n_jobs=1) if cross_dots is None: cross_dots = _do_cross_dots(int_rad, False, coils_from, coils_to, my_origin, 'meg', lut_fun, n_fact).T ch_names = [c['ch_name'] for c in info_from['chs']] fmd = dict(kind='meg', ch_names=ch_names, origin=my_origin, noise=noise, self_dots=self_dots, surface_dots=cross_dots, int_rad=int_rad, miss=miss) fmd['data'] = _compute_mapping_matrix(fmd, info_from) return fmd['data'], self_dots, cross_dots
def _interpolate_bads_meg(inst, mode='accurate', origin=None, verbose=None): """Interpolate bad channels from data in good channels. Parameters ---------- inst : mne.io.Raw, mne.Epochs or mne.Evoked The data to interpolate. Must be preloaded. mode : str Either `'accurate'` or `'fast'`, determines the quality of the Legendre polynomial expansion used for interpolation. `'fast'` should be sufficient for most applications. origin : None | list If None, origin is set to sensor center of mass, otherwise use the coordinates provided as origin. The old standard value is (0., 0., 0.04) verbose : bool, str, int, or None If not None, override default verbose level (see :func:`mne.verbose` and :ref:`Logging documentation <tut_logging>` for more). """ from mne.channels.interpolation import _do_interp_dots picks_meg = pick_types(inst.info, meg=True, eeg=False, exclude=[]) picks_good = pick_types(inst.info, meg=True, eeg=False, exclude='bads') meg_ch_names = [inst.info['ch_names'][p] for p in picks_meg] bads_meg = [ch for ch in inst.info['bads'] if ch in meg_ch_names] # select the bad meg channel to be interpolated if len(bads_meg) == 0: picks_bad = [] else: picks_bad = pick_channels(inst.info['ch_names'], bads_meg, exclude=[]) # return without doing anything if there are no meg channels if len(picks_meg) == 0 or len(picks_bad) == 0: return info_from = pick_info(inst.info, picks_good) info_to = pick_info(inst.info, picks_bad) if check_version('mne', min_version='0.21'): from mne.forward import _map_meg_or_eeg_channels mapping = _map_meg_or_eeg_channels(info_from, info_to, mode=mode, origin=origin) else: from mne.forward import _map_meg_channels mapping = _map_meg_channels(info_from, info_to, mode=mode, origin=origin) _do_interp_dots(inst, mapping, picks_good, picks_bad)
def test_plot_tfr_topomap(): """Test plotting of TFR data.""" import matplotlib as mpl import matplotlib.pyplot as plt raw = read_raw_fif(raw_fname) times = np.linspace(-0.1, 0.1, 200) res = 8 n_freqs = 3 nave = 1 rng = np.random.RandomState(42) picks = [93, 94, 96, 97, 21, 22, 24, 25, 129, 130, 315, 316, 2, 5, 8, 11] info = pick_info(raw.info, picks) data = rng.randn(len(picks), n_freqs, len(times)) tfr = AverageTFR(info, data, times, np.arange(n_freqs), nave) tfr.plot_topomap(ch_type='mag', tmin=0.05, tmax=0.150, fmin=0, fmax=10, res=res, contours=0) eclick = mpl.backend_bases.MouseEvent('button_press_event', plt.gcf().canvas, 0, 0, 1) eclick.xdata = eclick.ydata = 0.1 eclick.inaxes = plt.gca() erelease = mpl.backend_bases.MouseEvent('button_release_event', plt.gcf().canvas, 0.9, 0.9, 1) erelease.xdata = 0.3 erelease.ydata = 0.2 pos = [[0.11, 0.11], [0.25, 0.5], [0.0, 0.2], [0.2, 0.39]] _onselect(eclick, erelease, tfr, pos, 'grad', 1, 3, 1, 3, 'RdBu_r', list()) _onselect(eclick, erelease, tfr, pos, 'mag', 1, 3, 1, 3, 'RdBu_r', list()) eclick.xdata = eclick.ydata = 0. erelease.xdata = erelease.ydata = 0.9 tfr._onselect(eclick, erelease, None, 'mean', None) plt.close('all') # test plot_psds_topomap info = raw.info.copy() chan_inds = channel_indices_by_type(info) info = pick_info(info, chan_inds['grad'][:4]) fig, axes = plt.subplots() freqs = np.arange(3., 9.5) bands = [(4, 8, 'Theta')] psd = np.random.rand(len(info['ch_names']), freqs.shape[0]) plot_psds_topomap(psd, freqs, info, bands=bands, axes=[axes])
def fit(self, inst): ''' Fit Peakachu to erp data. This leads to selection of channels that maximize peak strength. These channels are then used during `transform` to search for peak. Different data can be passed during `fit` and `transform` - for example `fit` could use condition-average while transform can be used on separate conditions. ''' from mne.evoked import Evoked from mne.io.pick import _pick_data_channels, pick_info assert isinstance(inst, (Evoked, np.ndarray)), 'inst must be either' \ ' Evoked or numpy array, got {}.'.format(type(inst)) # deal with bad channels and non-data channels picks = _pick_data_channels(inst.info) self._info = pick_info(inst.info, picks) self._all_ch_names = [inst.ch_names[i] for i in picks] # get peaks peak_val, peak_ind = self._get_peaks(inst, select=picks) # select n_channels vals = peak_val if 'max' in self.select else -peak_val chan_ind = select_channels(vals, N=self.n_channels, connectivity=self.connectivity) self._chan_ind = [picks[i] for i in chan_ind] self._chan_names = [inst.ch_names[ch] for ch in self._chan_ind] self._peak_vals = peak_val return self
def pre_whiten(data, info, picks, pre_whitener=None): """Aux function based on ica._pre_whiten from mne v0.15 pre_whitener[this_picks] = np.std(data[this_picks], axis=1)[:, None] """ if pre_whitener is None: # use standardization as whitener # Scale (z-score) the data by channel type info = pick_info(info, picks) pre_whitener = np.empty([len(data), 1]) for ch_type in _DATA_CH_TYPES_SPLIT + ['eog']: if _contains_ch_type(info, ch_type): if ch_type == 'seeg': this_picks = pick_types(info, meg=False, seeg=True) elif ch_type == 'ecog': this_picks = pick_types(info, meg=False, ecog=True) elif ch_type == 'eeg': this_picks = pick_types(info, meg=False, eeg=True) elif ch_type in ('mag', 'grad'): this_picks = pick_types(info, meg=ch_type) elif ch_type == 'eog': this_picks = pick_types(info, meg=False, eog=True) elif ch_type in ('hbo', 'hbr'): this_picks = pick_types(info, meg=False, fnirs=ch_type) else: raise RuntimeError('Should not be reached.' 'Unsupported channel {0}' .format(ch_type)) pre_whitener[this_picks] = np.std(data[this_picks], axis=1)[:, None] data /= pre_whitener return data, pre_whitener
def get_data_as_epoch(self, n_samples=1024, picks=None): """Return last n_samples from current time. Parameters ---------- n_samples : int Number of samples to fetch. %(picks_all)s Returns ------- epoch : instance of Epochs The samples fetched as an Epochs object. See Also -------- mne.Epochs.iter_evoked """ # set up timeout in case LSL process hang. wait arb 5x expected time wait_time = n_samples * 5. / self.info['sfreq'] # create an event at the start of the data collection events = np.expand_dims(np.array([0, 1, 1]), axis=0) samples, _ = self.client.pull_chunk(max_samples=n_samples, timeout=wait_time) data = np.vstack(samples).T picks = _picks_to_idx(self.info, picks, 'all', exclude=()) info = pick_info(self.info, picks) return EpochsArray(data[picks][np.newaxis], info, events)
def plot_coregistration(subject, subjects_dir, hcp_path, recordings_path, info_from=(('data_type', 'rest'), ('run_index', 0)), view_init=(('azim', 0), ('elev', 0))): """A diagnostic plot to show the HCP coregistration Parameters ---------- subject : str The subject subjects_dir : str The path corresponding to MNE/freesurfer SUBJECTS_DIR (to be created) hcp_path : str The path where the HCP files can be found. recordings_path : str The path to converted data (including the head<->device transform). info_from : tuple of tuples | dict The reader info concerning the data from which sensor positions should be read. Must not be empty room as sensor positions are in head coordinates for 4D systems, hence not available in that case. Note that differences between the sensor positions across runs are smaller than 12 digits, hence negligible. view_init : tuple of tuples | dict The initival view, defaults to azimuth and elevation of 0, a simple lateral view Returns ------- fig : matplotlib.figure.Figure The figure object. """ import matplotlib.pyplot as plt from mpl_toolkits.mplot3d import Axes3D # noqa if isinstance(info_from, tuple): info_from = dict(info_from) if isinstance(view_init, tuple): view_init = dict(view_init) head_mri_t = read_trans( op.join(recordings_path, subject, '{}-head_mri-trans.fif'.format(subject))) info = read_info(subject=subject, hcp_path=hcp_path, **info_from) info = pick_info(info, _pick_data_channels(info, with_ref_meg=False)) sens_pnts = np.array([c['loc'][:3] for c in info['chs']]) sens_pnts = apply_trans(head_mri_t, sens_pnts) sens_pnts *= 1e3 # put in mm scale pnts, tris = read_surface( op.join(subjects_dir, subject, 'bem', 'inner_skull.surf')) fig = plt.figure() ax = fig.add_subplot(111, projection='3d') ax.scatter(*sens_pnts.T, color='purple', marker='o') ax.scatter(*pnts.T, color='green', alpha=0.3) ax.view_init(**view_init) fig.tight_layout() return fig
def get_pca_mean_and_pre_whitener_raw(raw, picks, start, stop, decim, reject, flat, tstep, pre_whitener, reject_by_annotation): """Aux method based on ica._fit_raw from mne v0.15""" if picks is None: # just use good data channels picks = _pick_data_channels(raw.info, exclude='bads', with_ref_meg=False) info = pick_info(raw.info, picks) if info['comps']: info['comps'] = [] start, stop = _check_start_stop(raw, start, stop) reject_by_annotation = 'omit' if reject_by_annotation else None # this will be a copy data = raw.get_data(picks, start, stop, reject_by_annotation) # this will be a view if decim is not None: data = data[:, ::decim] # this will make a copy if (reject is not None) or (flat is not None): data, drop_inds_ = _reject_data_segments(data, reject, flat, decim, info, tstep) # this may operate inplace or make a copy data, pre_whitener = pre_whiten(data, raw.info, picks, pre_whitener) pca_mean_ = np.mean(data, axis=1) return pca_mean_, pre_whitener
def get_pca_mean_and_pre_whitener_epochs(epochs, picks, decim, pre_whitener): """Aux method based on ica._fit_epochs from mne v0.15""" if picks is None: picks = _pick_data_channels(epochs.info, exclude='bads', with_ref_meg=False) # filter out all the channels the raw wouldn't have initialized info = pick_info(epochs.info, picks) if info['comps']: info['comps'] = [] # this should be a copy (picks a list of int) data = epochs.get_data()[:, picks] # this will be a view if decim is not None: data = data[:, :, ::decim] # This will make at least one copy (one from hstack, maybe one more from _pre_whiten) data, pre_whitener = pre_whiten(np.hstack(data), epochs.info, picks, pre_whitener) pca_mean_ = np.mean(data, axis=1) return pca_mean_, pre_whitener
def test_compute_whitener_rank(): """Test risky rank options.""" info = read_info(ave_fname) info = pick_info(info, pick_types(info, meg=True)) with info._unlock(): info['projs'] = [] # need a square version because the diag one takes shortcuts in # compute_whitener (users shouldn't even need this function so it's # private) cov = make_ad_hoc_cov(info)._as_square() assert len(cov['names']) == 306 _, _, rank = compute_whitener(cov, info, rank=None, return_rank=True) assert rank == 306 assert compute_rank(cov, info=info, verbose=True) == dict(meg=rank) cov['data'][-1] *= 1e-14 # trivially rank-deficient _, _, rank = compute_whitener(cov, info, rank=None, return_rank=True) assert rank == 305 assert compute_rank(cov, info=info, verbose=True) == dict(meg=rank) # this should emit a warning with pytest.warns(RuntimeWarning, match='exceeds the estimated'): _, _, rank = compute_whitener(cov, info, rank=dict(meg=306), return_rank=True) assert rank == 306
def test_plot_alignment_meg(renderer, system): """Test plotting of MEG sensors + helmet.""" if system == 'Neuromag': this_info = read_info(evoked_fname) elif system == 'CTF': this_info = read_raw_ctf(ctf_fname).info elif system == 'BTi': this_info = read_raw_bti(pdf_fname, config_fname, hs_fname, convert=True, preload=False).info else: assert system == 'KIT' this_info = read_raw_kit(sqd_fname).info meg = ['helmet', 'sensors'] if system == 'KIT': meg.append('ref') fig = plot_alignment(this_info, read_trans(trans_fname), subject='sample', subjects_dir=subjects_dir, meg=meg, eeg=False) # count the number of objects: should be n_meg_ch + 1 (helmet) + 1 (head) use_info = pick_info( this_info, pick_types(this_info, meg=True, eeg=False, ref_meg='ref' in meg, exclude=())) n_actors = use_info['nchan'] + 2 _assert_n_actors(fig, renderer, n_actors)
def test_plot_projs_topomap(): """Test plot_projs_topomap.""" projs = read_proj(ecg_fname) info = read_info(raw_fname) fast_test = {"res": 8, "contours": 0, "sensors": False} plot_projs_topomap(projs, info=info, colorbar=True, **fast_test) plt.close('all') ax = plt.subplot(111) projs[3].plot_topomap(info) plot_projs_topomap(projs[:1], info, axes=ax, **fast_test) # test axes plt.close('all') triux_info = read_info(triux_fname) plot_projs_topomap(triux_info['projs'][-1:], triux_info, **fast_test) plt.close('all') plot_projs_topomap(triux_info['projs'][:1], triux_info, **fast_test) plt.close('all') eeg_avg = make_eeg_average_ref_proj(info) eeg_avg.plot_topomap(info, **fast_test) plt.close('all') # test vlims for vlim in ('joint', (-1, 1), (None, 0.5), (0.5, None), (None, None)): plot_projs_topomap(projs[:-1], info, vlim=vlim, colorbar=True) plt.close('all') eeg_proj = make_eeg_average_ref_proj(info) info_meg = pick_info(info, pick_types(info, meg=True, eeg=False)) with pytest.raises(ValueError, match='No channel names in info match p'): plot_projs_topomap([eeg_proj], info_meg)
def get_data_as_epoch(self, n_samples=1024, picks=None): """Return last n_samples from current time. Parameters ---------- n_samples : int Number of samples to fetch. %(picks_all)s Returns ------- epoch : instance of Epochs The samples fetched as an Epochs object. See Also -------- mne.Epochs.iter_evoked """ ft_header = self.ft_client.getHeader() last_samp = ft_header.nSamples - 1 start = last_samp - n_samples + 1 stop = last_samp events = np.expand_dims(np.array([start, 1, 1]), axis=0) # get the data data = self.ft_client.getData([start, stop]).transpose() # create epoch from data picks = _picks_to_idx(self.info, picks, 'all', exclude=()) info = pick_info(self.info, picks) return EpochsArray(data[picks][np.newaxis], info, events)
def _find_bad_channels(epochs, picks, use_metrics, thresh, max_iter): """Implements the first step of the FASTER algorithm. This function attempts to automatically mark bad EEG channels by performing outlier detection. It operated on epoched data, to make sure only relevant data is analyzed. Additional Parameters --------------------- use_metrics : list of str List of metrics to use. Can be any combination of: 'variance', 'correlation', 'hurst', 'kurtosis', 'line_noise' Defaults to all of them. thresh : float The threshold value, in standard deviations, to apply. A channel crossing this threshold value is marked as bad. Defaults to 3. max_iter : int The maximum number of iterations performed during outlier detection (defaults to 1, as in the original FASTER paper). """ from scipy.stats import kurtosis metrics = { 'variance': lambda x: np.var(x, axis=1), 'correlation': lambda x: np.mean(np.ma.masked_array(np.corrcoef(x), np.identity(len(x), dtype=bool)), axis=0), 'hurst': lambda x: _hurst(x), 'kurtosis': lambda x: kurtosis(x, axis=1), 'line_noise': lambda x: _freqs_power(x, epochs.info['sfreq'], [50, 60]), } if use_metrics is None: use_metrics = metrics.keys() # Concatenate epochs in time data = epochs.get_data()[:, picks] data = data.transpose(1, 0, 2).reshape(data.shape[1], -1) # Find bad channels bads = defaultdict(list) info = pick_info(epochs.info, picks, copy=True) for ch_type, chs in _picks_by_type(info): logger.info('Bad channel detection on %s channels:' % ch_type.upper()) for metric in use_metrics: scores = metrics[metric](data[chs]) bad_channels = [ epochs.ch_names[picks[chs[i]]] for i in find_outliers(scores, thresh, max_iter) ] logger.info('\tBad by %s: %s' % (metric, bad_channels)) bads[metric].append(bad_channels) bads = dict((k, np.concatenate(v).tolist()) for k, v in bads.items()) return bads
def _fast_map_meg_channels(inst, pick_from, pick_to, mode='fast'): from mne.io.pick import pick_info from mne.forward._field_interpolation import _setup_dots from mne.forward._field_interpolation import _compute_mapping_matrix from mne.forward._make_forward import _create_meg_coils, _read_coil_defs from mne.forward._lead_dots import _do_self_dots, _do_cross_dots from mne.bem import _check_origin miss = 1e-4 # Smoothing criterion for MEG def _compute_dots(info, mode='fast'): """Compute all-to-all dots. """ templates = _read_coil_defs() coils = _create_meg_coils(info['chs'], 'normal', info['dev_head_t'], templates) my_origin = _check_origin((0., 0., 0.04), info_from) int_rad, noise, lut_fun, n_fact = _setup_dots(mode, coils, 'meg') self_dots = _do_self_dots(int_rad, False, coils, my_origin, 'meg', lut_fun, n_fact, n_jobs=1) cross_dots = _do_cross_dots(int_rad, False, coils, coils, my_origin, 'meg', lut_fun, n_fact).T return self_dots, cross_dots _compute_fast_dots = mem.cache(_compute_dots) info = inst.info.copy() info['bads'] = [] # if bads is different, hash will be different info_from = pick_info(info, pick_from, copy=True) templates = _read_coil_defs() coils_from = _create_meg_coils(info_from['chs'], 'normal', info_from['dev_head_t'], templates) my_origin = _check_origin((0., 0., 0.04), info_from) int_rad, noise, lut_fun, n_fact = _setup_dots(mode, coils_from, 'meg') self_dots, cross_dots = _compute_fast_dots(info, mode=mode) cross_dots = cross_dots[pick_to, :][:, pick_from] self_dots = self_dots[pick_from, :][:, pick_from] ch_names = [c['ch_name'] for c in info_from['chs']] fmd = dict(kind='meg', ch_names=ch_names, origin=my_origin, noise=noise, self_dots=self_dots, surface_dots=cross_dots, int_rad=int_rad, miss=miss) fmd['data'] = _compute_mapping_matrix(fmd, info_from) return fmd['data']
def test_info_no_rename_no_reorder_no_pdf(): """ Test private renaming, reordering and partial construction option """ for pdf, config, hs in zip(pdf_fnames, config_fnames, hs_fnames): with warnings.catch_warnings(record=True): # weight tables info, bti_info = _get_bti_info( pdf_fname=pdf, config_fname=config, head_shape_fname=hs, rotation_x=0.0, translation=(0.0, 0.02, 0.11), convert=False, ecg_ch='E31', eog_ch=('E63', 'E64'), rename_channels=False, sort_by_ch_name=False) info2, bti_info = _get_bti_info( pdf_fname=None, config_fname=config, head_shape_fname=hs, rotation_x=0.0, translation=(0.0, 0.02, 0.11), convert=False, ecg_ch='E31', eog_ch=('E63', 'E64'), rename_channels=False, sort_by_ch_name=False) assert_equal(info['ch_names'], [ch['ch_name'] for ch in info['chs']]) assert_equal([n for n in info['ch_names'] if n.startswith('A')][:5], ['A22', 'A2', 'A104', 'A241', 'A138']) assert_equal([n for n in info['ch_names'] if n.startswith('A')][-5:], ['A133', 'A158', 'A44', 'A134', 'A216']) info = pick_info(info, pick_types(info, meg=True, stim=True, resp=True)) info2 = pick_info(info2, pick_types(info2, meg=True, stim=True, resp=True)) assert_true(info['sfreq'] is not None) assert_true(info['lowpass'] is not None) assert_true(info['highpass'] is not None) assert_true(info['meas_date'] is not None) assert_equal(info2['sfreq'], None) assert_equal(info2['lowpass'], None) assert_equal(info2['highpass'], None) assert_equal(info2['meas_date'], None) assert_equal(info['ch_names'], info2['ch_names']) assert_equal(info['ch_names'], info2['ch_names']) for key in ['dev_ctf_t', 'dev_head_t', 'ctf_head_t']: assert_array_equal(info[key]['trans'], info2[key]['trans']) assert_array_equal( np.array([ch['loc'] for ch in info['chs']]), np.array([ch['loc'] for ch in info2['chs']]))
def _handle_overlaps(info, t, sphere, estimates): """Prepare for topomap including merging channels""" from mne.viz.topomap import _prepare_topomap_plot picks = _picks_to_idx(info, t, exclude=[], allow_empty=True) info_subset = pick_info(info, picks) _, pos, merge_channels, ch_names, ch_type, sphere, clip_origin = \ _prepare_topomap_plot(info_subset, t, sphere=sphere) estmrg, ch_names = _merge_ch_data(estimates.copy()[picks], t, ch_names) return estmrg, pos, ch_names, sphere
def test_plot_topomap_bads_grad(): """Test plotting topomap with bad gradiometer channels (gh-8802).""" import matplotlib.pyplot as plt data = np.random.RandomState(0).randn(203) info = read_info(evoked_fname) info['bads'] = ['MEG 2242'] picks = pick_types(info, meg='grad') info = pick_info(info, picks) assert len(info['chs']) == 203 plot_topomap(data, info, res=8) plt.close('all')
def pick(self, picks, exclude=()): """Pick a subset of channels. Parameters ---------- %(picks_all)s exclude : list | str Set of channels to exclude, only used when picking based on types (e.g., exclude="bads" when picks="meg"). Returns ------- inst : instance of ResultsGLM The modified instance. """ picks = _picks_to_idx(self.info, picks, 'all', exclude, allow_empty=False) pick_info(self.info, picks, copy=False) self._data = {key: self._data[key] for key in self.info.ch_names} return self
def epochs_compute_cnv(epochs, tmin=None, tmax=None): """Compute contingent negative variation (CNV) Parameters ---------- epochs : instance of Epochs The input data. tmin : float | None The first time point to include, if None, all samples form the first sample of the epoch will be used. Defaults to None. tmax : float | None The last time point to include, if None, all samples up to the last sample of the epoch wi ll be used. Defaults to None. return_epochs : bool Whether to compute an average or not. If False, data will be averaged and put in an Evoked object. Defaults to False. Returns ------- cnv : ndarray of float (n_channels, n_epochs) | instance of Evoked The regression slopes (betas) represewnting contingent negative variation. """ picks = mne.pick_types(epochs.info, meg=True, eeg=True) n_epochs = len(epochs.events) n_channels = len(picks) # we reduce over time samples slopes = np.zeros((n_epochs, n_channels)) intercepts = np.zeros((n_epochs, n_channels)) if tmax is None: tmax = epochs.times[-1] if tmin is None: tmin = epochs.times[0] fit_range = np.where(_time_mask(epochs.times, tmin, tmax))[0] # design: intercept + increasing time design_matrix = np.c_[np.ones(len(fit_range)), epochs.times[fit_range] - tmin] # estimate single trial regression over time samples scales = np.zeros(n_channels) info_ = pick_info(epochs.info, picks) for this_type, this_picks in _picks_by_type(info_): scales[this_picks] = _handle_default('scalings')[this_type] for ii, epoch in enumerate(epochs): y = epoch[picks][:, fit_range].T # time is samples betas, _, _, _ = linalg.lstsq(a=design_matrix, b=y * scales) intercepts[ii] = betas[0] slopes[ii] = betas[1] return slopes, intercepts
def _find_bad_channels(epochs, picks, use_metrics, thresh, max_iter): """Implements the first step of the FASTER algorithm. This function attempts to automatically mark bad EEG channels by performing outlier detection. It operated on epoched data, to make sure only relevant data is analyzed. Additional Parameters --------------------- use_metrics : list of str List of metrics to use. Can be any combination of: 'variance', 'correlation', 'hurst', 'kurtosis', 'line_noise' Defaults to all of them. thresh : float The threshold value, in standard deviations, to apply. A channel crossing this threshold value is marked as bad. Defaults to 3. max_iter : int The maximum number of iterations performed during outlier detection (defaults to 1, as in the original FASTER paper). """ from scipy.stats import kurtosis metrics = { 'variance': lambda x: np.var(x, axis=1), 'correlation': lambda x: np.mean( np.ma.masked_array(np.corrcoef(x), np.identity(len(x), dtype=bool)), axis=0), 'hurst': lambda x: _hurst(x), 'kurtosis': lambda x: kurtosis(x, axis=1), 'line_noise': lambda x: _freqs_power(x, epochs.info['sfreq'], [50, 60]), } if use_metrics is None: use_metrics = metrics.keys() # Concatenate epochs in time data = epochs.get_data()[:, picks] data = data.transpose(1, 0, 2).reshape(data.shape[1], -1) # Find bad channels bads = defaultdict(list) info = pick_info(epochs.info, picks, copy=True) for ch_type, chs in _picks_by_type(info): logger.info('Bad channel detection on %s channels:' % ch_type.upper()) for metric in use_metrics: scores = metrics[metric](data[chs]) bad_channels = [epochs.ch_names[picks[chs[i]]] for i in find_outliers(scores, thresh, max_iter)] logger.info('\tBad by %s: %s' % (metric, bad_channels)) bads[metric].append(bad_channels) bads = dict((k, np.concatenate(v).tolist()) for k, v in bads.items()) return bads
def _find_bad_channels_in_epochs(epochs, picks, use_metrics, thresh, max_iter): """Implements the fourth step of the FASTER algorithm. This function attempts to automatically mark bad channels in each epochs by performing outlier detection. Additional Parameters --------------------- use_metrics : list of str List of metrics to use. Can be any combination of: 'amplitude', 'variance', 'deviation', 'median_gradient' Defaults to all of them. thresh : float The threshold value, in standard deviations, to apply. A channel crossing this threshold value is marked as bad. Defaults to 3. max_iter : int The maximum number of iterations performed during outlier detection (defaults to 1, as in the original FASTER paper). """ metrics = { 'amplitude': lambda x: np.ptp(x, axis=2), 'deviation': lambda x: _deviation(x), 'variance': lambda x: np.var(x, axis=2), 'median_gradient': lambda x: np.median(np.abs(np.diff(x)), axis=2), 'line_noise': lambda x: _freqs_power(x, epochs.info['sfreq'], [50, 60]), } if use_metrics is None: use_metrics = metrics.keys() info = pick_info(epochs.info, picks, copy=True) data = epochs.get_data()[:, picks] bads = dict((m, np.zeros((len(data), len(picks)), dtype=bool)) for m in metrics) for ch_type, chs in _picks_by_type(info): ch_names = [info['ch_names'][k] for k in chs] chs = np.array(chs) for metric in use_metrics: logger.info('Bad channel-in-epoch detection on %s channels:' % ch_type.upper()) s_epochs = metrics[metric](data[:, chs]) for i_epochs, epoch in enumerate(s_epochs): outliers = find_outliers(epoch, thresh, max_iter) if len(outliers) > 0: bad_segment = [ch_names[k] for k in outliers] logger.info('Epoch %d, Bad by %s:\n\t%s' % ( i_epochs, metric, bad_segment)) bads[metric][i_epochs, chs[outliers]] = True return bads
def _find_bad_channels_in_epochs(epochs, picks, use_metrics, thresh, max_iter): """Implements the fourth step of the FASTER algorithm. This function attempts to automatically mark bad channels in each epochs by performing outlier detection. Additional Parameters --------------------- use_metrics : list of str List of metrics to use. Can be any combination of: 'amplitude', 'variance', 'deviation', 'median_gradient' Defaults to all of them. thresh : float The threshold value, in standard deviations, to apply. A channel crossing this threshold value is marked as bad. Defaults to 3. max_iter : int The maximum number of iterations performed during outlier detection (defaults to 1, as in the original FASTER paper). """ metrics = { 'amplitude': lambda x: np.ptp(x, axis=2), 'deviation': lambda x: _deviation(x), 'variance': lambda x: np.var(x, axis=2), 'median_gradient': lambda x: np.median(np.abs(np.diff(x)), axis=2), 'line_noise': lambda x: _freqs_power(x, epochs.info['sfreq'], [50, 60]), } if use_metrics is None: use_metrics = metrics.keys() info = pick_info(epochs.info, picks, copy=True) data = epochs.get_data()[:, picks] bads = dict( (m, np.zeros((len(data), len(picks)), dtype=bool)) for m in metrics) for ch_type, chs in _picks_by_type(info): ch_names = [info['ch_names'][k] for k in chs] chs = np.array(chs) for metric in use_metrics: logger.info('Bad channel-in-epoch detection on %s channels:' % ch_type.upper()) s_epochs = metrics[metric](data[:, chs]) for i_epochs, epoch in enumerate(s_epochs): outliers = find_outliers(epoch, thresh, max_iter) if len(outliers) > 0: bad_segment = [ch_names[k] for k in outliers] logger.info('Epoch %d, Bad by %s:\n\t%s' % (i_epochs, metric, bad_segment)) bads[metric][i_epochs, chs[outliers]] = True return bads
def plot_components(A, raw, picks, **kwargs): p, q = A.shape ica = ICA_(n_components=q, method='fastica', random_state=0, fit_params=dict(max_iter=1)) ica.info = pick_info(raw.info, picks) if ica.info['comps']: ica.info['comps'] = [] ica.ch_names = ica.info['ch_names'] ica.mixing_matrix_ = A ica.pca_components_ = np.eye(p) ica.pca_mean_ = None ica.unmixing_matrix_ = np.linalg.pinv(A) ica.n_components_ = p ica._update_ica_names() ica.plot_components(**kwargs)
def _pre_whiten(self, data, info, picks): """Aux function.""" has_pre_whitener = hasattr(self, 'pre_whitener_') if not has_pre_whitener and self.noise_cov is None: # use standardization as whitener # Scale (z-score) the data by channel info = pick_info(info, picks) pre_whitener = np.empty([len(data), 1]) for ch_type in _DATA_CH_TYPES_SPLIT + ('eog', "ref_meg"): if _contains_ch_type(info, ch_type): if ch_type == 'seeg': this_picks = pick_types(info, meg=False, seeg=True) elif ch_type == 'ecog': this_picks = pick_types(info, meg=False, ecog=True) elif ch_type == 'eeg': this_picks = pick_types(info, meg=False, eeg=True) elif ch_type in ('mag', 'grad'): this_picks = pick_types(info, meg=ch_type) elif ch_type == 'eog': this_picks = pick_types(info, meg=False, eog=True) elif ch_type in ('hbo', 'hbr'): this_picks = pick_types(info, meg=False, fnirs=ch_type) elif ch_type == 'ref_meg': this_picks = pick_types(info, meg=False, ref_meg=True) else: raise RuntimeError( 'Should not be reached.' 'Unsupported channel {}'.format(ch_type)) pre_whitener[this_picks] = np.std(data[this_picks], axis=1)[:, None] data /= pre_whitener elif not has_pre_whitener and self.noise_cov is not None: from mne.cov import compute_whitener pre_whitener, _ = compute_whitener(self.noise_cov, info, picks) assert data.shape[0] == pre_whitener.shape[1] data = np.dot(pre_whitener, data) elif has_pre_whitener and self.noise_cov is None: data /= self.pre_whitener_ pre_whitener = self.pre_whitener_ else: data = np.dot(self.pre_whitener_, data) pre_whitener = self.pre_whitener_ return data, pre_whitener
def _fit_evoked(self, raw, picks, start, stop, decim, reject, flat, tstep, verbose): """Aux method """ if self.current_fit != 'unfitted': self._reset() if picks is None: # just use good data channels picks = pick_types(raw.info, meg=True, eeg=True, eog=False, ecg=False, misc=False, stim=False, ref_meg=False, exclude='bads') logger.info('Fitting ICA to data using %i channels. \n' 'Please be patient, this may take some time' % len(picks)) if self.max_pca_components is None: self.max_pca_components = len(picks) logger.info('Inferring max_pca_components from picks.') self.info = pick_info(raw.info, picks) if self.info['comps']: self.info['comps'] = [] self.ch_names = self.info['ch_names'] start, stop = _check_start_stop(raw, start, stop) data = raw.data[picks, start:stop] print data.shape if decim is not None: data = data[:, ::decim].copy() if (reject is not None) or (flat is not None): data, self.drop_inds_ = _reject_data_segments( data, reject, flat, decim, self.info, tstep) self.n_samples_ = data.shape[1] data, self._pre_whitener = self._pre_whiten(data, raw.info, picks) self._fit(data, self.max_pca_components, 'evoked') #'raw') return self
def _prep_eeg_channels(info, exclude=(), verbose=None): """Prepare EEG electrode definitions for forward calculation. Parameters ---------- info : instance of Info The measurement information dictionary exclude : list of str | str List of channels to exclude. If 'bads', exclude channels in info['bads'] verbose : bool, str, int, or None If not None, override default verbose level (see :func:`mne.verbose` and :ref:`Logging documentation <tut_logging>` for more). Returns ------- eegels : list of dict Information for each prepped EEG electrode eegnames : list of str Name of each prepped EEG electrode """ eegnames, eegels = [], [] info_extra = 'info' # Find EEG electrodes picks = pick_types(info, meg=False, eeg=True, ref_meg=False, exclude=exclude) # Make sure EEG electrodes exist neeg = len(picks) if neeg <= 0: raise RuntimeError('Could not find any EEG channels') # Get channel info and names for EEG channels eegchs = pick_info(info, picks)['chs'] eegnames = [info['ch_names'][p] for p in picks] logger.info('Read %3d EEG channels from %s' % (len(picks), info_extra)) # Create EEG electrode descriptions eegels = _create_eeg_els(eegchs) logger.info('Head coordinate coil definitions created.') return eegels, eegnames
def transfer_to_mne(A, raw, picks): ''' Hack to use the MNE ICA class providing the estimated mixing matrix A. ''' p, q = A.shape ica = ICA_(n_components=q, method='fastica', random_state=0, fit_params=dict(max_iter=1)) ica.info = pick_info(raw.info, picks) if ica.info['comps']: ica.info['comps'] = [] ica.ch_names = ica.info['ch_names'] ica.mixing_matrix_ = A ica.pca_components_ = np.eye(p) ica.pca_mean_ = None ica.unmixing_matrix_ = np.linalg.pinv(A) ica.n_components_ = p ica._update_ica_names() return ica
def _find_bad_epochs(epochs, picks, use_metrics, thresh, max_iter): """Implements the second step of the FASTER algorithm. This function attempts to automatically mark bad epochs by performing outlier detection. Additional Parameters --------------------- use_metrics : list of str List of metrics to use. Can be any combination of: 'amplitude', 'variance', 'deviation'. Defaults to all of them. thresh : float The threshold value, in standard deviations, to apply. A channel crossing this threshold value is marked as bad. Defaults to 3. max_iter : int The maximum number of iterations performed during outlier detection (defaults to 1, as in the original FASTER paper). """ metrics = { 'amplitude': lambda x: np.mean(np.ptp(x, axis=2), axis=1), 'deviation': lambda x: np.mean(_deviation(x), axis=1), 'variance': lambda x: np.mean(np.var(x, axis=2), axis=1), } if use_metrics is None: use_metrics = metrics.keys() info = pick_info(epochs.info, picks, copy=True) data = epochs.get_data()[:, picks] bads = defaultdict(list) for ch_type, chs in _picks_by_type(info): logger.info('Bad epoch detection on %s channels:' % ch_type.upper()) for metric in use_metrics: scores = metrics[metric](data[:, chs]) bad_epochs = find_outliers(scores, thresh, max_iter) logger.info('\tBad by %s: %s' % (metric, bad_epochs)) bads[metric].append(bad_epochs) bads = dict((k, np.concatenate(v).tolist()) for k, v in bads.items()) return bads
def _fit_evoked(self, raw, picks, start, stop, decim, reject, flat, tstep, verbose): """Aux method """ if self.current_fit != 'unfitted': self._reset() if picks is None: # just use good data channels picks = pick_types(raw.info, meg=True, eeg=True, eog=False, ecg=False, misc=False, stim=False, ref_meg=False, exclude='bads') logger.info('Fitting ICA to data using %i channels. \n' 'Please be patient, this may take some time' % len(picks)) if self.max_pca_components is None: self.max_pca_components = len(picks) logger.info('Inferring max_pca_components from picks.') self.info = pick_info(raw.info, picks) if self.info['comps']: self.info['comps'] = [] self.ch_names = self.info['ch_names'] start, stop = _check_start_stop(raw, start, stop) data = raw.data[picks, start:stop] print data.shape if decim is not None: data = data[:, ::decim].copy() if (reject is not None) or (flat is not None): data, self.drop_inds_ = _reject_data_segments(data, reject, flat, decim, self.info, tstep) self.n_samples_ = data.shape[1] data, self._pre_whitener = self._pre_whiten(data, raw.info, picks) self._fit(data, self.max_pca_components, 'evoked') #'raw') return self
def test_info_no_rename_no_reorder_no_pdf(): """ Test private renaming, reordering and partial construction option """ for pdf, config, hs in zip(pdf_fnames, config_fnames, hs_fnames): with warnings.catch_warnings(record=True): # weight tables info, bti_info = _get_bti_info( pdf_fname=pdf, config_fname=config, head_shape_fname=hs, rotation_x=0.0, translation=(0.0, 0.02, 0.11), convert=False, ecg_ch="E31", eog_ch=("E63", "E64"), rename_channels=False, sort_by_ch_name=False, ) info2, bti_info = _get_bti_info( pdf_fname=None, config_fname=config, head_shape_fname=hs, rotation_x=0.0, translation=(0.0, 0.02, 0.11), convert=False, ecg_ch="E31", eog_ch=("E63", "E64"), rename_channels=False, sort_by_ch_name=False, ) assert_equal(info["ch_names"], [ch["ch_name"] for ch in info["chs"]]) assert_equal([n for n in info["ch_names"] if n.startswith("A")][:5], ["A22", "A2", "A104", "A241", "A138"]) assert_equal([n for n in info["ch_names"] if n.startswith("A")][-5:], ["A133", "A158", "A44", "A134", "A216"]) info = pick_info(info, pick_types(info, meg=True, stim=True, resp=True)) info2 = pick_info(info2, pick_types(info2, meg=True, stim=True, resp=True)) assert_true(info["sfreq"] is not None) assert_true(info["lowpass"] is not None) assert_true(info["highpass"] is not None) assert_true(info["meas_date"] is not None) assert_equal(info2["sfreq"], None) assert_equal(info2["lowpass"], None) assert_equal(info2["highpass"], None) assert_equal(info2["meas_date"], None) assert_equal(info["ch_names"], info2["ch_names"]) assert_equal(info["ch_names"], info2["ch_names"]) for key in ["dev_ctf_t", "dev_head_t", "ctf_head_t"]: assert_array_equal(info[key]["trans"], info2[key]["trans"]) assert_array_equal(np.array([ch["loc"] for ch in info["chs"]]), np.array([ch["loc"] for ch in info2["chs"]])) # just check reading data | corner case with warnings.catch_warnings(record=True): # weight tables raw1 = read_raw_bti( pdf_fname=pdf, config_fname=config, head_shape_fname=None, sort_by_ch_name=False, preload=True ) # just check reading data | corner case raw2 = read_raw_bti( pdf_fname=pdf, config_fname=config, head_shape_fname=None, rename_channels=False, sort_by_ch_name=True, preload=True, ) sort_idx = [raw1.bti_ch_labels.index(ch) for ch in raw2.bti_ch_labels] raw1._data = raw1._data[sort_idx] assert_array_equal(raw1._data, raw2._data) assert_array_equal(raw2.bti_ch_labels, raw2.ch_names)
def test_plot_alignment_basic(tmpdir, renderer, mixed_fwd_cov_evoked): """Test plotting of -trans.fif files and MEG sensor layouts.""" # generate fiducials file for testing tempdir = str(tmpdir) fiducials_path = op.join(tempdir, 'fiducials.fif') fid = [{ 'coord_frame': 5, 'ident': 1, 'kind': 1, 'r': [-0.08061612, -0.02908875, -0.04131077] }, { 'coord_frame': 5, 'ident': 2, 'kind': 1, 'r': [0.00146763, 0.08506715, -0.03483611] }, { 'coord_frame': 5, 'ident': 3, 'kind': 1, 'r': [0.08436285, -0.02850276, -0.04127743] }] write_dig(fiducials_path, fid, 5) evoked = read_evokeds(evoked_fname)[0] info = evoked.info sample_src = read_source_spaces(src_fname) pytest.raises(TypeError, plot_alignment, 'foo', trans_fname, subject='sample', subjects_dir=subjects_dir) pytest.raises(OSError, plot_alignment, info, trans_fname, subject='sample', subjects_dir=subjects_dir, src='foo') pytest.raises(ValueError, plot_alignment, info, trans_fname, subject='fsaverage', subjects_dir=subjects_dir, src=sample_src) sample_src.plot(subjects_dir=subjects_dir, head=True, skull=True, brain='white') # mixed source space mixed_src = mixed_fwd_cov_evoked[0]['src'] assert mixed_src.kind == 'mixed' plot_alignment(info, meg=['helmet', 'sensors'], dig=True, coord_frame='head', trans=Path(trans_fname), subject='sample', mri_fiducials=fiducials_path, subjects_dir=subjects_dir, src=mixed_src) renderer.backend._close_all() # no-head version renderer.backend._close_all() # trans required with pytest.raises(ValueError, match='transformation matrix is required'): plot_alignment(info, trans=None, src=src_fname) with pytest.raises(ValueError, match='transformation matrix is required'): plot_alignment(info, trans=None, mri_fiducials=True) with pytest.raises(ValueError, match='transformation matrix is required'): plot_alignment(info, trans=None, surfaces=['brain']) # all coord frames plot_alignment(info) # works: surfaces='auto' default for coord_frame in ('meg', 'head', 'mri'): fig = plot_alignment(info, meg=['helmet', 'sensors'], dig=True, coord_frame=coord_frame, trans=Path(trans_fname), subject='sample', mri_fiducials=fiducials_path, subjects_dir=subjects_dir, src=src_fname) renderer.backend._close_all() # EEG only with strange options evoked_eeg_ecog_seeg = evoked.copy().pick_types(meg=False, eeg=True) evoked_eeg_ecog_seeg.info['projs'] = [] # "remove" avg proj evoked_eeg_ecog_seeg.set_channel_types({ 'EEG 001': 'ecog', 'EEG 002': 'seeg' }) with catch_logging() as log: plot_alignment(evoked_eeg_ecog_seeg.info, subject='sample', trans=trans_fname, subjects_dir=subjects_dir, surfaces=['white', 'outer_skin', 'outer_skull'], meg=['helmet', 'sensors'], eeg=['original', 'projected'], ecog=True, seeg=True, verbose=True) log = log.getvalue() assert 'ecog: 1' in log assert 'seeg: 1' in log renderer.backend._close_all() sphere = make_sphere_model(info=info, r0='auto', head_radius='auto') bem_sol = read_bem_solution( op.join(subjects_dir, 'sample', 'bem', 'sample-1280-1280-1280-bem-sol.fif')) bem_surfs = read_bem_surfaces( op.join(subjects_dir, 'sample', 'bem', 'sample-1280-1280-1280-bem.fif')) sample_src[0]['coord_frame'] = 4 # hack for coverage plot_alignment( info, trans_fname, subject='sample', eeg='projected', meg='helmet', bem=sphere, dig=True, surfaces=['brain', 'inner_skull', 'outer_skull', 'outer_skin']) plot_alignment(info, subject='sample', meg='helmet', subjects_dir=subjects_dir, eeg='projected', bem=sphere, surfaces=['head', 'brain'], src=sample_src) # no trans okay, no mri surfaces plot_alignment(info, bem=sphere, surfaces=['brain']) with pytest.raises(ValueError, match='A head surface is required'): plot_alignment(info, trans=trans_fname, subject='sample', subjects_dir=subjects_dir, eeg='projected', surfaces=[]) with pytest.raises(RuntimeError, match='No brain surface found'): plot_alignment(info, trans=trans_fname, subject='foo', subjects_dir=subjects_dir, surfaces=['brain']) assert all(surf['coord_frame'] == FIFF.FIFFV_COORD_MRI for surf in bem_sol['surfs']) plot_alignment(info, trans_fname, subject='sample', meg=[], subjects_dir=subjects_dir, bem=bem_sol, eeg=True, surfaces=['head', 'inflated', 'outer_skull', 'inner_skull']) assert all(surf['coord_frame'] == FIFF.FIFFV_COORD_MRI for surf in bem_sol['surfs']) plot_alignment(info, trans_fname, subject='sample', meg=True, subjects_dir=subjects_dir, surfaces=['head', 'inner_skull'], bem=bem_surfs) # single-layer BEM can still plot head surface assert bem_surfs[-1]['id'] == FIFF.FIFFV_BEM_SURF_ID_BRAIN bem_sol_homog = read_bem_solution( op.join(subjects_dir, 'sample', 'bem', 'sample-1280-bem-sol.fif')) for use_bem in (bem_surfs[-1:], bem_sol_homog): with catch_logging() as log: plot_alignment(info, trans_fname, subject='sample', meg=True, subjects_dir=subjects_dir, surfaces=['head', 'inner_skull'], bem=use_bem, verbose=True) log = log.getvalue() assert 'not find the surface for head in the provided BEM model' in log # sphere model sphere = make_sphere_model('auto', 'auto', info) src = setup_volume_source_space(sphere=sphere) plot_alignment( info, trans=Transform('head', 'mri'), eeg='projected', meg='helmet', bem=sphere, src=src, dig=True, surfaces=['brain', 'inner_skull', 'outer_skull', 'outer_skin']) sphere = make_sphere_model('auto', None, info) # one layer # if you ask for a brain surface with a 1-layer sphere model it's an error with pytest.raises(RuntimeError, match='Sphere model does not have'): fig = plot_alignment(trans=trans_fname, subject='sample', subjects_dir=subjects_dir, surfaces=['brain'], bem=sphere) # but you can ask for a specific brain surface, and # no info is permitted fig = plot_alignment(trans=trans_fname, subject='sample', meg=False, coord_frame='mri', subjects_dir=subjects_dir, surfaces=['white'], bem=sphere, show_axes=True) renderer.backend._close_all() if renderer._get_3d_backend() == 'mayavi': import mayavi # noqa: F401 analysis:ignore assert isinstance(fig, mayavi.core.scene.Scene) # 3D coil with no defined draw (ConvexHull) info_cube = pick_info(info, np.arange(6)) info['dig'] = None info_cube['chs'][0]['coil_type'] = 9999 info_cube['chs'][1]['coil_type'] = 9998 with pytest.raises(RuntimeError, match='coil definition not found'): plot_alignment(info_cube, meg='sensors', surfaces=()) coil_def_fname = op.join(tempdir, 'temp') with open(coil_def_fname, 'w') as fid: fid.write(coil_3d) # make sure our other OPMs can be plotted, too for ii, kind in enumerate( ('QUSPIN_ZFOPM_MAG', 'QUSPIN_ZFOPM_MAG2', 'FIELDLINE_OPM_MAG_GEN1', 'KERNEL_OPM_MAG_GEN1'), 2): info_cube['chs'][ii]['coil_type'] = getattr(FIFF, f'FIFFV_COIL_{kind}') with use_coil_def(coil_def_fname): with catch_logging() as log: plot_alignment(info_cube, meg='sensors', surfaces=(), dig=True, verbose='debug') log = log.getvalue() assert 'planar geometry' in log # one layer bem with skull surfaces: with pytest.raises(RuntimeError, match='Sphere model does not.*boundary'): plot_alignment(info=info, trans=trans_fname, subject='sample', subjects_dir=subjects_dir, surfaces=['brain', 'head', 'inner_skull'], bem=sphere) # wrong eeg value: with pytest.raises(ValueError, match='Invalid value for the .eeg'): plot_alignment(info=info, trans=trans_fname, subject='sample', subjects_dir=subjects_dir, eeg='foo') # wrong meg value: with pytest.raises(ValueError, match='Invalid value for the .meg'): plot_alignment(info=info, trans=trans_fname, subject='sample', subjects_dir=subjects_dir, meg='bar') # multiple brain surfaces: with pytest.raises(ValueError, match='Only one brain surface can be plot'): plot_alignment(info=info, trans=trans_fname, subject='sample', subjects_dir=subjects_dir, surfaces=['white', 'pial']) with pytest.raises(TypeError, match='surfaces.*must be'): plot_alignment(info=info, trans=trans_fname, subject='sample', subjects_dir=subjects_dir, surfaces=[1]) with pytest.raises(ValueError, match='Unknown surface type'): plot_alignment(info=info, trans=trans_fname, subject='sample', subjects_dir=subjects_dir, surfaces=['foo']) with pytest.raises(TypeError, match="must be an instance of "): plot_alignment(info=info, trans=trans_fname, subject='sample', subjects_dir=subjects_dir, surfaces=dict(brain='super clear')) with pytest.raises(ValueError, match="must be between 0 and 1"): plot_alignment(info=info, trans=trans_fname, subject='sample', subjects_dir=subjects_dir, surfaces=dict(brain=42)) fwd_fname = op.join(data_dir, 'MEG', 'sample', 'sample_audvis_trunc-meg-eeg-oct-4-fwd.fif') fwd = read_forward_solution(fwd_fname) plot_alignment(subject='sample', subjects_dir=subjects_dir, trans=trans_fname, fwd=fwd, surfaces='white', coord_frame='head') fwd = convert_forward_solution(fwd, force_fixed=True) plot_alignment(subject='sample', subjects_dir=subjects_dir, trans=trans_fname, fwd=fwd, surfaces='white', coord_frame='head') fwd['coord_frame'] = FIFF.FIFFV_COORD_MRI # check required to get to MRI with pytest.raises(ValueError, match='transformation matrix is required'): plot_alignment(info, trans=None, fwd=fwd) # surfaces as dict plot_alignment(subject='sample', coord_frame='head', trans=trans_fname, subjects_dir=subjects_dir, surfaces={ 'white': 0.4, 'outer_skull': 0.6, 'head': None })
def compute_forward_stack(subjects_dir, subject, recordings_path, info_from=(('data_type', 'rest'), ('run_index', 0)), fwd_params=None, src_params=None, hcp_path=op.curdir, n_jobs=1, verbose=None): """ Convenience function for conducting standard MNE analyses. .. note:: this function computes bem solutions, source spaces and forward models optimized for connectivity computation, i.e., the fsaverage space is morphed onto the subject's space. Parameters ---------- subject : str The subject name. hcp_path : str The directory containing the HCP data. recordings_path : str The path where MEG data and transformations are stored. subjects_dir : str The directory containing the extracted HCP subject data. info_from : tuple of tuples | dict The reader info concerning the data from which sensor positions should be read. Must not be empty room as sensor positions are in head coordinates for 4D systems, hence not available in that case. Note that differences between the sensor positions across runs are smaller than 12 digits, hence negligible. fwd_params : None | dict The forward parameters src_params : None | dict The src params. Defaults to: dict(subject='fsaverage', fname=None, spacing='oct6', n_jobs=2, surface='white', subjects_dir=subjects_dir, add_dist=True) hcp_path : str The prefix of the path of the HCP data. n_jobs : int The number of jobs to use in parallel. verbose : bool, str, int, or None If not None, override default verbose level (see mne.verbose) Returns ------- out : dict A dictionary with the following keys: fwd : instance of mne.Forward The forward solution. src_subject : instance of mne.SourceSpace The source model on the subject's surface src_fsaverage : instance of mne.SourceSpace The source model on fsaverage's surface bem_sol : dict The BEM. info : instance of mne.io.meas_info.Info The actual measurement info used. """ if isinstance(info_from, tuple): info_from = dict(info_from) head_mri_t = mne.read_trans( op.join(recordings_path, subject, '{}-head_mri-trans.fif'.format( subject))) src_defaults = dict(subject='fsaverage', spacing='oct6', n_jobs=n_jobs, surface='white', subjects_dir=subjects_dir, add_dist=True) if 'fname' in mne.fixes._get_args(mne.setup_source_space): # needed for mne-0.14 and below src_defaults.update(dict(fname=None)) else: # remove 'fname' argument (if necessary) when using mne-0.15+ if 'fname' in src_params: del src_params['fname'] src_params = _update_dict_defaults(src_params, src_defaults) add_source_space_distances = False if src_params['add_dist']: # we want the distances on the morphed space src_params['add_dist'] = False add_source_space_distances = True src_fsaverage = mne.setup_source_space(**src_params) src_subject = mne.morph_source_spaces( src_fsaverage, subject, subjects_dir=subjects_dir) if add_source_space_distances: # and here we compute them post hoc. src_subject = mne.add_source_space_distances( src_subject, n_jobs=n_jobs) bems = mne.make_bem_model(subject, conductivity=(0.3,), subjects_dir=subjects_dir, ico=None) # ico = None for morphed SP. bem_sol = mne.make_bem_solution(bems) bem_sol['surfs'][0]['coord_frame'] = 5 info = read_info(subject=subject, hcp_path=hcp_path, **info_from) picks = _pick_data_channels(info, with_ref_meg=False) info = pick_info(info, picks) # here we assume that as a result of our MNE-HCP processing # all other transforms in info are identity for trans in ['dev_head_t', 'ctf_head_t']: # 'dev_ctf_t' is not identity assert np.sum(info[trans]['trans'] - np.eye(4)) == 0 fwd = mne.make_forward_solution( info, trans=head_mri_t, bem=bem_sol, src=src_subject, n_jobs=n_jobs) return dict(fwd=fwd, src_subject=src_subject, src_fsaverage=src_fsaverage, bem_sol=bem_sol, info=info)
def _plot_topomap(data, pos, vmin=None, vmax=None, cmap=None, sensors=True, res=64, axes=None, names=None, show_names=False, mask=None, mask_params=None, outlines='head', contours=6, image_interp='bilinear', show=True, head_pos=None, onselect=None, extrapolate='box', border=0): import matplotlib.pyplot as plt from matplotlib.widgets import RectangleSelector data = np.asarray(data) if isinstance(pos, Info): # infer pos from Info object picks = _pick_data_channels(pos) # pick only data channels pos = pick_info(pos, picks) # check if there is only 1 channel type, and n_chans matches the data ch_type = {channel_type(pos, idx) for idx, _ in enumerate(pos["chs"])} info_help = ("Pick Info with e.g. mne.pick_info and " "mne.io.pick.channel_indices_by_type.") if len(ch_type) > 1: raise ValueError("Multiple channel types in Info structure. " + info_help) elif len(pos["chs"]) != data.shape[0]: raise ValueError("Number of channels in the Info object and " "the data array does not match. " + info_help) else: ch_type = ch_type.pop() if any(type_ in ch_type for type_ in ('planar', 'grad')): # deal with grad pairs from mne.channels.layout import (_merge_grad_data, find_layout, _pair_grad_sensors) picks, pos = _pair_grad_sensors(pos, find_layout(pos)) data = _merge_grad_data(data[picks]).reshape(-1) else: picks = list(range(data.shape[0])) pos = _find_topomap_coords(pos, picks=picks) if data.ndim > 1: raise ValueError("Data needs to be array of shape (n_sensors,); got " "shape %s." % str(data.shape)) # Give a helpful error message for common mistakes regarding the position # matrix. pos_help = ("Electrode positions should be specified as a 2D array with " "shape (n_channels, 2). Each row in this matrix contains the " "(x, y) position of an electrode.") if pos.ndim != 2: error = ("{ndim}D array supplied as electrode positions, where a 2D " "array was expected").format(ndim=pos.ndim) raise ValueError(error + " " + pos_help) elif pos.shape[1] == 3: error = ("The supplied electrode positions matrix contains 3 columns. " "Are you trying to specify XYZ coordinates? Perhaps the " "mne.channels.create_eeg_layout function is useful for you.") raise ValueError(error + " " + pos_help) # No error is raised in case of pos.shape[1] == 4. In this case, it is # assumed the position matrix contains both (x, y) and (width, height) # values, such as Layout.pos. elif pos.shape[1] == 1 or pos.shape[1] > 4: raise ValueError(pos_help) if len(data) != len(pos): raise ValueError("Data and pos need to be of same length. Got data of " "length %s, pos of length %s" % (len(data), len(pos))) norm = min(data) >= 0 vmin, vmax = _setup_vmin_vmax(data, vmin, vmax, norm) if cmap is None: cmap = 'Reds' if norm else 'RdBu_r' pos, outlines = _check_outlines(pos, outlines, head_pos) assert isinstance(outlines, dict) ax = axes if axes else plt.gca() _prepare_topomap(pos, ax) _use_default_outlines = any(k.startswith('head') for k in outlines) if _use_default_outlines: # prepare masking _autoshrink(outlines, pos, res) mask_params = _handle_default('mask_params', mask_params) # find mask limits xlim = np.inf, -np.inf, ylim = np.inf, -np.inf, mask_ = np.c_[outlines['mask_pos']] xmin, xmax = (np.min(np.r_[xlim[0], mask_[:, 0]]), np.max(np.r_[xlim[1], mask_[:, 0]])) ymin, ymax = (np.min(np.r_[ylim[0], mask_[:, 1]]), np.max(np.r_[ylim[1], mask_[:, 1]])) # interpolate the data, we multiply clip radius by 1.06 so that pixelated # edges of the interpolated image would appear under the mask head_radius = (None if extrapolate == 'local' else outlines['clip_radius'][0] * 1.06) xi = np.linspace(xmin, xmax, res) yi = np.linspace(ymin, ymax, res) Xi, Yi = np.meshgrid(xi, yi) interp = _GridData(pos, extrapolate, head_radius, border).set_values(data) Zi = interp.set_locations(Xi, Yi)() # plot outline patch_ = None if 'patch' in outlines: patch_ = outlines['patch'] patch_ = patch_() if callable(patch_) else patch_ patch_.set_clip_on(False) ax.add_patch(patch_) ax.set_transform(ax.transAxes) ax.set_clip_path(patch_) if _use_default_outlines: from matplotlib import patches patch_ = patches.Ellipse((0, 0), 2 * outlines['clip_radius'][0], 2 * outlines['clip_radius'][1], clip_on=True, transform=ax.transData) # plot interpolated map im = ax.imshow(Zi, cmap=cmap, vmin=vmin, vmax=vmax, origin='lower', aspect='equal', extent=(xmin, xmax, ymin, ymax), interpolation=image_interp) # This tackles an incomprehensible matplotlib bug if no contours are # drawn. To avoid rescalings, we will always draw contours. # But if no contours are desired we only draw one and make it invisible. linewidth = mask_params['markeredgewidth'] no_contours = False if isinstance(contours, (np.ndarray, list)): pass # contours precomputed elif contours == 0: contours, no_contours = 1, True if (Zi == Zi[0, 0]).all(): cont = None # can't make contours for constant-valued functions else: with warnings.catch_warnings(record=True): warnings.simplefilter('ignore') cont = ax.contour(Xi, Yi, Zi, contours, colors='k', linewidths=linewidth / 2.) if no_contours and cont is not None: for col in cont.collections: col.set_visible(False) if patch_ is not None: im.set_clip_path(patch_) if cont is not None: for col in cont.collections: col.set_clip_path(patch_) pos_x, pos_y = pos.T if sensors is not False and mask is None: _plot_sensors(pos_x, pos_y, sensors=sensors, ax=ax) elif sensors and mask is not None: idx = np.where(mask)[0] ax.plot(pos_x[idx], pos_y[idx], **mask_params) idx = np.where(~mask)[0] _plot_sensors(pos_x[idx], pos_y[idx], sensors=sensors, ax=ax) elif not sensors and mask is not None: idx = np.where(mask)[0] ax.plot(pos_x[idx], pos_y[idx], **mask_params) if isinstance(outlines, dict): _draw_outlines(ax, outlines) if show_names: if names is None: raise ValueError("To show names, a list of names must be provided" " (see `names` keyword).") if show_names is True: def _show_names(x): return x else: _show_names = show_names show_idx = np.arange(len(names)) if mask is None else np.where(mask)[0] for ii, (p, ch_id) in enumerate(zip(pos, names)): if ii not in show_idx: continue ch_id = _show_names(ch_id) ax.text(p[0], p[1], ch_id, horizontalalignment='center', verticalalignment='center', size='x-small') if onselect is not None: ax.RS = RectangleSelector(ax, onselect=onselect) plt_show(show) return im, cont, interp, patch_
def plot_topomap(data, pos, vmin=None, vmax=None, cmap=None, sensors=True, res=64, axes=None, names=None, show_names=False, mask=None, mask_params=None, outlines='head', image_mask=None, contours=6, image_interp='bilinear', show=True, head_pos=None, onselect=None, axis=None): ''' see the docstring for mne.viz.plot_topomap, which i've simply modified to return more objects ''' from matplotlib.widgets import RectangleSelector from mne.io.pick import (channel_type, pick_info, _pick_data_channels) from mne.utils import warn from mne.viz.utils import (_setup_vmin_vmax, plt_show) from mne.defaults import _handle_default from mne.channels.layout import _find_topomap_coords from mne.io.meas_info import Info from mne.viz.topomap import _check_outlines, _prepare_topomap, _griddata, _make_image_mask, _plot_sensors, \ _draw_outlines data = np.asarray(data) if isinstance(pos, Info): # infer pos from Info object picks = _pick_data_channels(pos) # pick only data channels pos = pick_info(pos, picks) # check if there is only 1 channel type, and n_chans matches the data ch_type = set(channel_type(pos, idx) for idx, _ in enumerate(pos["chs"])) info_help = ("Pick Info with e.g. mne.pick_info and " "mne.channels.channel_indices_by_type.") if len(ch_type) > 1: raise ValueError("Multiple channel types in Info structure. " + info_help) elif len(pos["chs"]) != data.shape[0]: raise ValueError("Number of channels in the Info object and " "the data array does not match. " + info_help) else: ch_type = ch_type.pop() if any(type_ in ch_type for type_ in ('planar', 'grad')): # deal with grad pairs from ..channels.layout import (_merge_grad_data, find_layout, _pair_grad_sensors) picks, pos = _pair_grad_sensors(pos, find_layout(pos)) data = _merge_grad_data(data[picks]).reshape(-1) else: picks = list(range(data.shape[0])) pos = _find_topomap_coords(pos, picks=picks) if data.ndim > 1: raise ValueError("Data needs to be array of shape (n_sensors,); got " "shape %s." % str(data.shape)) # Give a helpful error message for common mistakes regarding the position # matrix. pos_help = ("Electrode positions should be specified as a 2D array with " "shape (n_channels, 2). Each row in this matrix contains the " "(x, y) position of an electrode.") if pos.ndim != 2: error = ("{ndim}D array supplied as electrode positions, where a 2D " "array was expected").format(ndim=pos.ndim) raise ValueError(error + " " + pos_help) elif pos.shape[1] == 3: error = ("The supplied electrode positions matrix contains 3 columns. " "Are you trying to specify XYZ coordinates? Perhaps the " "mne.channels.create_eeg_layout function is useful for you.") raise ValueError(error + " " + pos_help) # No error is raised in case of pos.shape[1] == 4. In this case, it is # assumed the position matrix contains both (x, y) and (width, height) # values, such as Layout.pos. elif pos.shape[1] == 1 or pos.shape[1] > 4: raise ValueError(pos_help) if len(data) != len(pos): raise ValueError("Data and pos need to be of same length. Got data of " "length %s, pos of length %s" % (len(data), len(pos))) norm = min(data) >= 0 vmin, vmax = _setup_vmin_vmax(data, vmin, vmax, norm) if cmap is None: cmap = 'Reds' if norm else 'RdBu_r' pos, outlines = _check_outlines(pos, outlines, head_pos) if axis is not None: axes = axis warn('axis parameter is deprecated and will be removed in 0.13. ' 'Use axes instead.', DeprecationWarning) ax = axes if axes else plt.gca() pos_x, pos_y = _prepare_topomap(pos, ax) if outlines is None: xmin, xmax = pos_x.min(), pos_x.max() ymin, ymax = pos_y.min(), pos_y.max() else: xlim = np.inf, -np.inf, ylim = np.inf, -np.inf, mask_ = np.c_[outlines['mask_pos']] xmin, xmax = (np.min(np.r_[xlim[0], mask_[:, 0]]), np.max(np.r_[xlim[1], mask_[:, 0]])) ymin, ymax = (np.min(np.r_[ylim[0], mask_[:, 1]]), np.max(np.r_[ylim[1], mask_[:, 1]])) # interpolate data xi = np.linspace(xmin, xmax, res) yi = np.linspace(ymin, ymax, res) Xi, Yi = np.meshgrid(xi, yi) Zi = _griddata(pos_x, pos_y, data, Xi, Yi) if outlines is None: _is_default_outlines = False elif isinstance(outlines, dict): _is_default_outlines = any(k.startswith('head') for k in outlines) if _is_default_outlines and image_mask is None: # prepare masking image_mask, pos = _make_image_mask(outlines, pos, res) mask_params = _handle_default('mask_params', mask_params) # plot outline linewidth = mask_params['markeredgewidth'] patch = None if 'patch' in outlines: patch = outlines['patch'] patch_ = patch() if callable(patch) else patch patch_.set_clip_on(False) ax.add_patch(patch_) ax.set_transform(ax.transAxes) ax.set_clip_path(patch_) # plot map and countour im = ax.imshow(Zi, cmap=cmap, vmin=vmin, vmax=vmax, origin='lower', aspect='equal', extent=(xmin, xmax, ymin, ymax), interpolation=image_interp) # This tackles an incomprehensible matplotlib bug if no contours are # drawn. To avoid rescalings, we will always draw contours. # But if no contours are desired we only draw one and make it invisible . no_contours = False if contours in (False, None): contours, no_contours = 1, True cont = ax.contour(Xi, Yi, Zi, contours, colors='k', linewidths=linewidth) if no_contours is True: for col in cont.collections: col.set_visible(False) if _is_default_outlines: from matplotlib import patches patch_ = patches.Ellipse((0, 0), 2 * outlines['clip_radius'][0], 2 * outlines['clip_radius'][1], clip_on=True, transform=ax.transData) if _is_default_outlines or patch is not None: im.set_clip_path(patch_) if cont is not None: for col in cont.collections: col.set_clip_path(patch_) if sensors is not False and mask is None: _plot_sensors(pos_x, pos_y, sensors=sensors, ax=ax) elif sensors and mask is not None: idx = np.where(mask)[0] ax.plot(pos_x[idx], pos_y[idx], **mask_params) idx = np.where(~mask)[0] _plot_sensors(pos_x[idx], pos_y[idx], sensors=sensors, ax=ax) elif not sensors and mask is not None: idx = np.where(mask)[0] ax.plot(pos_x[idx], pos_y[idx], **mask_params) if isinstance(outlines, dict): _draw_outlines(ax, outlines) if show_names: if names is None: raise ValueError("To show names, a list of names must be provided" " (see `names` keyword).") if show_names is True: def _show_names(x): return x else: _show_names = show_names show_idx = np.arange(len(names)) if mask is None else np.where(mask)[0] for ii, (p, ch_id) in enumerate(zip(pos, names)): if ii not in show_idx: continue ch_id = _show_names(ch_id) ax.text(p[0], p[1], ch_id, horizontalalignment='center', verticalalignment='center', size='x-small') plt.subplots_adjust(top=.95) if onselect is not None: ax.RS = RectangleSelector(ax, onselect=onselect) plt_show(show) return ax, im, cont, pos_x, pos_y
def test_plot_alignment(tmpdir, renderer): """Test plotting of -trans.fif files and MEG sensor layouts.""" # generate fiducials file for testing tempdir = str(tmpdir) fiducials_path = op.join(tempdir, 'fiducials.fif') fid = [{ 'coord_frame': 5, 'ident': 1, 'kind': 1, 'r': [-0.08061612, -0.02908875, -0.04131077] }, { 'coord_frame': 5, 'ident': 2, 'kind': 1, 'r': [0.00146763, 0.08506715, -0.03483611] }, { 'coord_frame': 5, 'ident': 3, 'kind': 1, 'r': [0.08436285, -0.02850276, -0.04127743] }] write_dig(fiducials_path, fid, 5) renderer._close_all() evoked = read_evokeds(evoked_fname)[0] sample_src = read_source_spaces(src_fname) bti = read_raw_bti(pdf_fname, config_fname, hs_fname, convert=True, preload=False).info infos = dict( Neuromag=evoked.info, CTF=read_raw_ctf(ctf_fname).info, BTi=bti, KIT=read_raw_kit(sqd_fname).info, ) for system, info in infos.items(): meg = ['helmet', 'sensors'] if system == 'KIT': meg.append('ref') fig = plot_alignment(info, trans_fname, subject='sample', subjects_dir=subjects_dir, meg=meg) rend = renderer._Renderer(fig=fig) rend.close() # KIT ref sensor coil def is defined renderer._close_all() info = infos['Neuromag'] pytest.raises(TypeError, plot_alignment, 'foo', trans_fname, subject='sample', subjects_dir=subjects_dir) pytest.raises(OSError, plot_alignment, info, trans_fname, subject='sample', subjects_dir=subjects_dir, src='foo') pytest.raises(ValueError, plot_alignment, info, trans_fname, subject='fsaverage', subjects_dir=subjects_dir, src=sample_src) sample_src.plot(subjects_dir=subjects_dir, head=True, skull=True, brain='white') renderer._close_all() # no-head version renderer._close_all() # all coord frames pytest.raises(ValueError, plot_alignment, info) plot_alignment(info, surfaces=[]) for coord_frame in ('meg', 'head', 'mri'): fig = plot_alignment(info, meg=['helmet', 'sensors'], dig=True, coord_frame=coord_frame, trans=trans_fname, subject='sample', mri_fiducials=fiducials_path, subjects_dir=subjects_dir, src=src_fname) renderer._close_all() # EEG only with strange options evoked_eeg_ecog_seeg = evoked.copy().pick_types(meg=False, eeg=True) evoked_eeg_ecog_seeg.info['projs'] = [] # "remove" avg proj evoked_eeg_ecog_seeg.set_channel_types({ 'EEG 001': 'ecog', 'EEG 002': 'seeg' }) with pytest.warns(RuntimeWarning, match='Cannot plot MEG'): plot_alignment(evoked_eeg_ecog_seeg.info, subject='sample', trans=trans_fname, subjects_dir=subjects_dir, surfaces=['white', 'outer_skin', 'outer_skull'], meg=['helmet', 'sensors'], eeg=['original', 'projected'], ecog=True, seeg=True) renderer._close_all() sphere = make_sphere_model(info=evoked.info, r0='auto', head_radius='auto') bem_sol = read_bem_solution( op.join(subjects_dir, 'sample', 'bem', 'sample-1280-1280-1280-bem-sol.fif')) bem_surfs = read_bem_surfaces( op.join(subjects_dir, 'sample', 'bem', 'sample-1280-1280-1280-bem.fif')) sample_src[0]['coord_frame'] = 4 # hack for coverage plot_alignment( info, subject='sample', eeg='projected', meg='helmet', bem=sphere, dig=True, surfaces=['brain', 'inner_skull', 'outer_skull', 'outer_skin']) plot_alignment(info, trans_fname, subject='sample', meg='helmet', subjects_dir=subjects_dir, eeg='projected', bem=sphere, surfaces=['head', 'brain'], src=sample_src) assert all(surf['coord_frame'] == FIFF.FIFFV_COORD_MRI for surf in bem_sol['surfs']) plot_alignment(info, trans_fname, subject='sample', meg=[], subjects_dir=subjects_dir, bem=bem_sol, eeg=True, surfaces=['head', 'inflated', 'outer_skull', 'inner_skull']) assert all(surf['coord_frame'] == FIFF.FIFFV_COORD_MRI for surf in bem_sol['surfs']) plot_alignment(info, trans_fname, subject='sample', meg=True, subjects_dir=subjects_dir, surfaces=['head', 'inner_skull'], bem=bem_surfs) # single-layer BEM can still plot head surface assert bem_surfs[-1]['id'] == FIFF.FIFFV_BEM_SURF_ID_BRAIN bem_sol_homog = read_bem_solution( op.join(subjects_dir, 'sample', 'bem', 'sample-1280-bem-sol.fif')) for use_bem in (bem_surfs[-1:], bem_sol_homog): with catch_logging() as log: plot_alignment(info, trans_fname, subject='sample', meg=True, subjects_dir=subjects_dir, surfaces=['head', 'inner_skull'], bem=use_bem, verbose=True) log = log.getvalue() assert 'not find the surface for head in the provided BEM model' in log # sphere model sphere = make_sphere_model('auto', 'auto', evoked.info) src = setup_volume_source_space(sphere=sphere) plot_alignment( info, eeg='projected', meg='helmet', bem=sphere, src=src, dig=True, surfaces=['brain', 'inner_skull', 'outer_skull', 'outer_skin']) sphere = make_sphere_model('auto', None, evoked.info) # one layer # no info is permitted fig = plot_alignment(trans=trans_fname, subject='sample', meg=False, coord_frame='mri', subjects_dir=subjects_dir, surfaces=['brain'], bem=sphere, show_axes=True) renderer._close_all() if renderer.get_3d_backend() == 'mayavi': import mayavi # noqa: F401 analysis:ignore assert isinstance(fig, mayavi.core.scene.Scene) # 3D coil with no defined draw (ConvexHull) info_cube = pick_info(info, [0]) info['dig'] = None info_cube['chs'][0]['coil_type'] = 9999 with pytest.raises(RuntimeError, match='coil definition not found'): plot_alignment(info_cube, meg='sensors', surfaces=()) coil_def_fname = op.join(tempdir, 'temp') with open(coil_def_fname, 'w') as fid: fid.write(coil_3d) with use_coil_def(coil_def_fname): plot_alignment(info_cube, meg='sensors', surfaces=(), dig=True) # one layer bem with skull surfaces: with pytest.raises(ValueError, match='sphere conductor model must have'): plot_alignment(info=info, trans=trans_fname, subject='sample', subjects_dir=subjects_dir, surfaces=['brain', 'head', 'inner_skull'], bem=sphere) # wrong eeg value: with pytest.raises(ValueError, match='eeg must only contain'): plot_alignment(info=info, trans=trans_fname, subject='sample', subjects_dir=subjects_dir, eeg='foo') # wrong meg value: with pytest.raises(ValueError, match='meg must only contain'): plot_alignment(info=info, trans=trans_fname, subject='sample', subjects_dir=subjects_dir, meg='bar') # multiple brain surfaces: with pytest.raises(ValueError, match='Only one brain surface can be plot'): plot_alignment(info=info, trans=trans_fname, subject='sample', subjects_dir=subjects_dir, surfaces=['white', 'pial']) with pytest.raises(TypeError, match='all entries in surfaces must be'): plot_alignment(info=info, trans=trans_fname, subject='sample', subjects_dir=subjects_dir, surfaces=[1]) with pytest.raises(ValueError, match='Unknown surface type'): plot_alignment(info=info, trans=trans_fname, subject='sample', subjects_dir=subjects_dir, surfaces=['foo']) fwd_fname = op.join(data_dir, 'MEG', 'sample', 'sample_audvis_trunc-meg-eeg-oct-4-fwd.fif') fwd = read_forward_solution(fwd_fname) plot_alignment(subject='sample', subjects_dir=subjects_dir, trans=trans_fname, fwd=fwd, surfaces='white', coord_frame='head') fwd = convert_forward_solution(fwd, force_fixed=True) plot_alignment(subject='sample', subjects_dir=subjects_dir, trans=trans_fname, fwd=fwd, surfaces='white', coord_frame='head') renderer._close_all()
def test_plot_topomap(): """Test topomap plotting.""" # evoked res = 8 fast_test = dict(res=res, contours=0, sensors=False, time_unit='s') fast_test_noscale = dict(res=res, contours=0, sensors=False) evoked = read_evokeds(evoked_fname, 'Left Auditory', baseline=(None, 0)) # Test animation _, anim = evoked.animate_topomap(ch_type='grad', times=[0, 0.1], butterfly=False, time_unit='s') anim._func(1) # _animate has to be tested separately on 'Agg' backend. plt.close('all') ev_bad = evoked.copy().pick_types(meg=False, eeg=True) ev_bad.pick_channels(ev_bad.ch_names[:2]) plt_topomap = partial(ev_bad.plot_topomap, **fast_test) plt_topomap(times=ev_bad.times[:2] - 1e-6) # auto, plots EEG pytest.raises(ValueError, plt_topomap, ch_type='mag') pytest.raises(TypeError, plt_topomap, head_pos='foo') pytest.raises(KeyError, plt_topomap, head_pos=dict(foo='bar')) pytest.raises(ValueError, plt_topomap, head_pos=dict(center=0)) pytest.raises(ValueError, plt_topomap, times=[-100]) # bad time pytest.raises(ValueError, plt_topomap, times=[[0]]) # bad time evoked.plot_topomap([0.1], ch_type='eeg', scalings=1, res=res, contours=[-100, 0, 100], time_unit='ms') # extrapolation to the edges of the convex hull or the head circle evoked.plot_topomap([0.1], ch_type='eeg', scalings=1, res=res, contours=[-100, 0, 100], time_unit='ms', extrapolate='local') evoked.plot_topomap([0.1], ch_type='eeg', scalings=1, res=res, contours=[-100, 0, 100], time_unit='ms', extrapolate='head') evoked.plot_topomap([0.1], ch_type='eeg', scalings=1, res=res, contours=[-100, 0, 100], time_unit='ms', extrapolate='head', outlines='skirt') # extrapolation options when < 4 channels: temp_data = np.random.random(3) picks = channel_indices_by_type(evoked.info)['mag'][:3] info_sel = pick_info(evoked.info, picks) plot_topomap(temp_data, info_sel, extrapolate='local', res=res) plot_topomap(temp_data, info_sel, extrapolate='head', res=res) plt_topomap = partial(evoked.plot_topomap, **fast_test) plt_topomap(0.1, layout=layout, scalings=dict(mag=0.1)) plt.close('all') axes = [plt.subplot(221), plt.subplot(222)] plt_topomap(axes=axes, colorbar=False) plt.close('all') plt_topomap(times=[-0.1, 0.2]) plt.close('all') evoked_grad = evoked.copy().crop(0, 0).pick_types(meg='grad') mask = np.zeros((204, 1), bool) mask[[0, 3, 5, 6]] = True names = [] def proc_names(x): names.append(x) return x[4:] evoked_grad.plot_topomap(ch_type='grad', times=[0], mask=mask, show_names=proc_names, **fast_test) assert_equal(sorted(names), ['MEG 011x', 'MEG 012x', 'MEG 013x', 'MEG 014x']) mask = np.zeros_like(evoked.data, dtype=bool) mask[[1, 5], :] = True plt_topomap(ch_type='mag', outlines=None) times = [0.1] plt_topomap(times, ch_type='grad', mask=mask) plt_topomap(times, ch_type='planar1') plt_topomap(times, ch_type='planar2') plt_topomap(times, ch_type='grad', mask=mask, show_names=True, mask_params={'marker': 'x'}) plt.close('all') pytest.raises(ValueError, plt_topomap, times, ch_type='eeg', average=-1e3) pytest.raises(ValueError, plt_topomap, times, ch_type='eeg', average='x') p = plt_topomap(times, ch_type='grad', image_interp='bilinear', show_names=lambda x: x.replace('MEG', '')) subplot = [x for x in p.get_children() if 'Subplot' in str(type(x))] assert len(subplot) >= 1, [type(x) for x in p.get_children()] subplot = subplot[0] assert (all('MEG' not in x.get_text() for x in subplot.get_children() if isinstance(x, matplotlib.text.Text))) # Plot array for ch_type in ('mag', 'grad'): evoked_ = evoked.copy().pick_types(eeg=False, meg=ch_type) plot_topomap(evoked_.data[:, 0], evoked_.info, **fast_test_noscale) # fail with multiple channel types pytest.raises(ValueError, plot_topomap, evoked.data[0, :], evoked.info) # Test title def get_texts(p): return [x.get_text() for x in p.get_children() if isinstance(x, matplotlib.text.Text)] p = plt_topomap(times, ch_type='eeg', average=0.01) assert_equal(len(get_texts(p)), 0) p = plt_topomap(times, ch_type='eeg', title='Custom') texts = get_texts(p) assert_equal(len(texts), 1) assert_equal(texts[0], 'Custom') plt.close('all') # delaunay triangulation warning plt_topomap(times, ch_type='mag', layout=None) # projs have already been applied pytest.raises(RuntimeError, plot_evoked_topomap, evoked, 0.1, 'mag', proj='interactive', time_unit='s') # change to no-proj mode evoked = read_evokeds(evoked_fname, 'Left Auditory', baseline=(None, 0), proj=False) fig1 = evoked.plot_topomap('interactive', 'mag', proj='interactive', **fast_test) _fake_click(fig1, fig1.axes[1], (0.5, 0.5)) # click slider data_max = np.max(fig1.axes[0].images[0]._A) fig2 = plt.gcf() _fake_click(fig2, fig2.axes[0], (0.075, 0.775)) # toggle projector # make sure projector gets toggled assert (np.max(fig1.axes[0].images[0]._A) != data_max) pytest.raises(RuntimeError, plot_evoked_topomap, evoked, np.repeat(.1, 50), time_unit='s') pytest.raises(ValueError, plot_evoked_topomap, evoked, [-3e12, 15e6], time_unit='s') for ch in evoked.info['chs']: if ch['coil_type'] == FIFF.FIFFV_COIL_EEG: ch['loc'].fill(0) # Remove extra digitization point, so EEG digitization points # correspond with the EEG electrodes del evoked.info['dig'][85] pos = make_eeg_layout(evoked.info).pos[:, :2] pos, outlines = _check_outlines(pos, 'head') assert ('head' in outlines.keys()) assert ('nose' in outlines.keys()) assert ('ear_left' in outlines.keys()) assert ('ear_right' in outlines.keys()) assert ('autoshrink' in outlines.keys()) assert (outlines['autoshrink']) assert ('clip_radius' in outlines.keys()) assert_array_equal(outlines['clip_radius'], 0.5) pos, outlines = _check_outlines(pos, 'skirt') assert ('head' in outlines.keys()) assert ('nose' in outlines.keys()) assert ('ear_left' in outlines.keys()) assert ('ear_right' in outlines.keys()) assert ('autoshrink' in outlines.keys()) assert (not outlines['autoshrink']) assert ('clip_radius' in outlines.keys()) assert_array_equal(outlines['clip_radius'], 0.625) pos, outlines = _check_outlines(pos, 'skirt', head_pos={'scale': [1.2, 1.2]}) assert_array_equal(outlines['clip_radius'], 0.75) # Plot skirt evoked.plot_topomap(times, ch_type='eeg', outlines='skirt', **fast_test) # Pass custom outlines without patch evoked.plot_topomap(times, ch_type='eeg', outlines=outlines, **fast_test) plt.close('all') # Test interactive cmap fig = plot_evoked_topomap(evoked, times=[0., 0.1], ch_type='eeg', cmap=('Reds', True), title='title', **fast_test) fig.canvas.key_press_event('up') fig.canvas.key_press_event(' ') fig.canvas.key_press_event('down') cbar = fig.get_axes()[0].CB # Fake dragging with mouse. ax = cbar.cbar.ax _fake_click(fig, ax, (0.1, 0.1)) _fake_click(fig, ax, (0.1, 0.2), kind='motion') _fake_click(fig, ax, (0.1, 0.3), kind='release') _fake_click(fig, ax, (0.1, 0.1), button=3) _fake_click(fig, ax, (0.1, 0.2), button=3, kind='motion') _fake_click(fig, ax, (0.1, 0.3), kind='release') fig.canvas.scroll_event(0.5, 0.5, -0.5) # scroll down fig.canvas.scroll_event(0.5, 0.5, 0.5) # scroll up plt.close('all') # Pass custom outlines with patch callable def patch(): return Circle((0.5, 0.4687), radius=.46, clip_on=True, transform=plt.gca().transAxes) outlines['patch'] = patch plot_evoked_topomap(evoked, times, ch_type='eeg', outlines=outlines, **fast_test) # Remove digitization points. Now topomap should fail evoked.info['dig'] = None pytest.raises(RuntimeError, plot_evoked_topomap, evoked, times, ch_type='eeg', time_unit='s') plt.close('all') # Error for missing names n_channels = len(pos) data = np.ones(n_channels) pytest.raises(ValueError, plot_topomap, data, pos, show_names=True) # Test error messages for invalid pos parameter pos_1d = np.zeros(n_channels) pos_3d = np.zeros((n_channels, 2, 2)) pytest.raises(ValueError, plot_topomap, data, pos_1d) pytest.raises(ValueError, plot_topomap, data, pos_3d) pytest.raises(ValueError, plot_topomap, data, pos[:3, :]) pos_x = pos[:, :1] pos_xyz = np.c_[pos, np.zeros(n_channels)[:, np.newaxis]] pytest.raises(ValueError, plot_topomap, data, pos_x) pytest.raises(ValueError, plot_topomap, data, pos_xyz) # An #channels x 4 matrix should work though. In this case (x, y, width, # height) is assumed. pos_xywh = np.c_[pos, np.zeros((n_channels, 2))] plot_topomap(data, pos_xywh) plt.close('all') # Test peak finder axes = [plt.subplot(131), plt.subplot(132)] evoked.plot_topomap(times='peaks', axes=axes, **fast_test) plt.close('all') evoked.data = np.zeros(evoked.data.shape) evoked.data[50][1] = 1 assert_array_equal(_find_peaks(evoked, 10), evoked.times[1]) evoked.data[80][100] = 1 assert_array_equal(_find_peaks(evoked, 10), evoked.times[[1, 100]]) evoked.data[2][95] = 2 assert_array_equal(_find_peaks(evoked, 10), evoked.times[[1, 95]]) assert_array_equal(_find_peaks(evoked, 1), evoked.times[95]) # Test excluding bads channels evoked_grad.info['bads'] += [evoked_grad.info['ch_names'][0]] orig_bads = evoked_grad.info['bads'] evoked_grad.plot_topomap(ch_type='grad', times=[0], time_unit='ms') assert_array_equal(evoked_grad.info['bads'], orig_bads) plt.close('all')
def make_mne_forward(anatomy_path, subject, recordings_path, info_from=(('data_type', 'rest'), ('run_index', 0)), fwd_params=None, src_params=None, hcp_path=op.curdir, n_jobs=1): """" Convenience script for conducting standard MNE analyses. Parameters ---------- subject : str The subject name. hcp_path : str The directory containing the HCP data. recordings_path : str The path where MEG data and transformations are stored. anatomy_path : str The directory containing the extracted HCP subject data. info_from : tuple of tuples | dict The reader info concerning the data from which sensor positions should be read. Must not be empty room as sensor positions are in head coordinates for 4D systems, hence not available in that case. Note that differences between the sensor positions across runs are smaller than 12 digits, hence negligible. fwd_params : None | dict The forward parameters src_params : None | dict The src params. Defaults to: dict(subject='fsaverage', fname=None, spacing='oct6', n_jobs=2, surface='white', subjects_dir=anatomy_path, add_dist=True) hcp_path : str The prefix of the path of the HCP data. n_jobs : int The number of jobs to use in parallel. """ if isinstance(info_from, tuple): info_from = dict(info_from) head_mri_t = mne.read_trans( op.join(recordings_path, subject, '{}-head_mri-trans.fif'.format( subject))) src_params = _update_dict_defaults( src_params, dict(subject='fsaverage', fname=None, spacing='oct6', n_jobs=n_jobs, surface='white', subjects_dir=anatomy_path, add_dist=True)) add_source_space_distances = False if src_params['add_dist']: # we want the distances on the morphed space src_params['add_dist'] = False add_source_space_distances = True src_fsaverage = mne.setup_source_space(**src_params) src_subject = mne.morph_source_spaces( src_fsaverage, subject, subjects_dir=anatomy_path) if add_source_space_distances: # and here we compute them post hoc. src_subject = mne.add_source_space_distances( src_subject, n_jobs=n_jobs) bems = mne.make_bem_model(subject, conductivity=(0.3,), subjects_dir=anatomy_path, ico=None) # ico = None for morphed SP. bem_sol = mne.make_bem_solution(bems) info = read_info_hcp(subject=subject, hcp_path=hcp_path, **info_from) picks = _pick_data_channels(info, with_ref_meg=False) info = pick_info(info, picks) # here we assume that as a result of our MNE-HCP processing # all other transforms in info are identity for trans in ['dev_head_t', 'ctf_head_t']: # 'dev_ctf_t' is not identity assert np.sum(info[trans]['trans'] - np.eye(4)) == 0 fwd = mne.make_forward_solution( info, trans=head_mri_t, bem=bem_sol, src=src_subject, n_jobs=n_jobs) return dict(fwd=fwd, src_subject=src_subject, src_fsaverage=src_fsaverage, bem_sol=bem_sol, info=info)
def find_bad_channels_in_epochs(epochs, picks=None, method='faster', method_params=None, return_by_metric=False): """Implements the fourth step of the FASTER algorithm. This function attempts to automatically mark bad channels in each epochs by performing outlier detection. Parameters ---------- epochs : Instance of Epochs The epochs to analyze. picks : list of int | None Channels to operate on. Defaults to EEG channels. method : {'faster'} The detection algorithm. method_params : dict | None The method parameters in a dict. If ``method`` equals 'faster', and ``method_params``is None, defaults to the following parameters. Partial updates are supported. use_metrics : list of str List of metrics to use. Can be any combination of: 'amplitude', 'variance', 'deviation', 'median_gradient' Defaults to all of them. thresh : float The threshold value, in standard deviations, to apply. A channel crossing this threshold value is marked as bad. Defaults to 3. max_iter : int The maximum number of iterations performed during outlier detection (defaults to 1, as in the original FASTER paper). return_by_metric : bool Whether to return the bad channels as a flat list (False, default) or as a dictionary with the names of the used metrics as keys and the bad channels found by this metric as values. Is ignored if not supported by method. Returns ------- bads : list of lists of int For each epoch, the indices of the bad channels. """ if picks is None: picks = pick_types(epochs.info, meg=True, eeg=True, exclude=[]) _method_params = _handle_default('bads' + '_' + method, method_params) if method == 'faster': bads = _find_bad_channels_in_epochs(epochs, picks, **_method_params) else: raise NotImplementedError( 'Come back later, for now there is only "FASTER"') info = pick_info(epochs.info, picks, copy=True) if return_by_metric: bads = dict((m, _bad_mask_to_names(info, v)) for m, v in bads.items()) else: bads = np.sum(bads.values(), axis=0).astype(bool) bads = _bad_mask_to_names(info, bads) return bads
def test_plot_alignment(tmpdir): """Test plotting of -trans.fif files and MEG sensor layouts.""" # generate fiducials file for testing tempdir = str(tmpdir) fiducials_path = op.join(tempdir, 'fiducials.fif') fid = [{'coord_frame': 5, 'ident': 1, 'kind': 1, 'r': [-0.08061612, -0.02908875, -0.04131077]}, {'coord_frame': 5, 'ident': 2, 'kind': 1, 'r': [0.00146763, 0.08506715, -0.03483611]}, {'coord_frame': 5, 'ident': 3, 'kind': 1, 'r': [0.08436285, -0.02850276, -0.04127743]}] write_dig(fiducials_path, fid, 5) mlab = _import_mlab() evoked = read_evokeds(evoked_fname)[0] sample_src = read_source_spaces(src_fname) bti = read_raw_bti(pdf_fname, config_fname, hs_fname, convert=True, preload=False).info infos = dict( Neuromag=evoked.info, CTF=read_raw_ctf(ctf_fname).info, BTi=bti, KIT=read_raw_kit(sqd_fname).info, ) for system, info in infos.items(): meg = ['helmet', 'sensors'] if system == 'KIT': meg.append('ref') plot_alignment(info, trans_fname, subject='sample', subjects_dir=subjects_dir, meg=meg) mlab.close(all=True) # KIT ref sensor coil def is defined mlab.close(all=True) info = infos['Neuromag'] pytest.raises(TypeError, plot_alignment, 'foo', trans_fname, subject='sample', subjects_dir=subjects_dir) pytest.raises(TypeError, plot_alignment, info, trans_fname, subject='sample', subjects_dir=subjects_dir, src='foo') pytest.raises(ValueError, plot_alignment, info, trans_fname, subject='fsaverage', subjects_dir=subjects_dir, src=sample_src) sample_src.plot(subjects_dir=subjects_dir, head=True, skull=True, brain='white') mlab.close(all=True) # no-head version mlab.close(all=True) # all coord frames pytest.raises(ValueError, plot_alignment, info) plot_alignment(info, surfaces=[]) for coord_frame in ('meg', 'head', 'mri'): plot_alignment(info, meg=['helmet', 'sensors'], dig=True, coord_frame=coord_frame, trans=trans_fname, subject='sample', mri_fiducials=fiducials_path, subjects_dir=subjects_dir, src=sample_src) mlab.close(all=True) # EEG only with strange options evoked_eeg_ecog_seeg = evoked.copy().pick_types(meg=False, eeg=True) evoked_eeg_ecog_seeg.info['projs'] = [] # "remove" avg proj evoked_eeg_ecog_seeg.set_channel_types({'EEG 001': 'ecog', 'EEG 002': 'seeg'}) with pytest.warns(RuntimeWarning, match='Cannot plot MEG'): plot_alignment(evoked_eeg_ecog_seeg.info, subject='sample', trans=trans_fname, subjects_dir=subjects_dir, surfaces=['white', 'outer_skin', 'outer_skull'], meg=['helmet', 'sensors'], eeg=['original', 'projected'], ecog=True, seeg=True) mlab.close(all=True) sphere = make_sphere_model(info=evoked.info, r0='auto', head_radius='auto') bem_sol = read_bem_solution(op.join(subjects_dir, 'sample', 'bem', 'sample-1280-1280-1280-bem-sol.fif')) bem_surfs = read_bem_surfaces(op.join(subjects_dir, 'sample', 'bem', 'sample-1280-1280-1280-bem.fif')) sample_src[0]['coord_frame'] = 4 # hack for coverage plot_alignment(info, subject='sample', eeg='projected', meg='helmet', bem=sphere, dig=True, surfaces=['brain', 'inner_skull', 'outer_skull', 'outer_skin']) plot_alignment(info, trans_fname, subject='sample', meg='helmet', subjects_dir=subjects_dir, eeg='projected', bem=sphere, surfaces=['head', 'brain'], src=sample_src) assert all(surf['coord_frame'] == FIFF.FIFFV_COORD_MRI for surf in bem_sol['surfs']) plot_alignment(info, trans_fname, subject='sample', meg=[], subjects_dir=subjects_dir, bem=bem_sol, eeg=True, surfaces=['head', 'inflated', 'outer_skull', 'inner_skull']) assert all(surf['coord_frame'] == FIFF.FIFFV_COORD_MRI for surf in bem_sol['surfs']) plot_alignment(info, trans_fname, subject='sample', meg=True, subjects_dir=subjects_dir, surfaces=['head', 'inner_skull'], bem=bem_surfs) sphere = make_sphere_model('auto', 'auto', evoked.info) src = setup_volume_source_space(sphere=sphere) plot_alignment(info, eeg='projected', meg='helmet', bem=sphere, src=src, dig=True, surfaces=['brain', 'inner_skull', 'outer_skull', 'outer_skin']) sphere = make_sphere_model('auto', None, evoked.info) # one layer plot_alignment(info, trans_fname, subject='sample', meg=False, coord_frame='mri', subjects_dir=subjects_dir, surfaces=['brain'], bem=sphere, show_axes=True) # 3D coil with no defined draw (ConvexHull) info_cube = pick_info(info, [0]) info['dig'] = None info_cube['chs'][0]['coil_type'] = 9999 with pytest.raises(RuntimeError, match='coil definition not found'): plot_alignment(info_cube, meg='sensors', surfaces=()) coil_def_fname = op.join(tempdir, 'temp') with open(coil_def_fname, 'w') as fid: fid.write(coil_3d) with use_coil_def(coil_def_fname): plot_alignment(info_cube, meg='sensors', surfaces=(), dig=True) # one layer bem with skull surfaces: pytest.raises(ValueError, plot_alignment, info=info, trans=trans_fname, subject='sample', subjects_dir=subjects_dir, surfaces=['brain', 'head', 'inner_skull'], bem=sphere) # wrong eeg value: pytest.raises(ValueError, plot_alignment, info=info, trans=trans_fname, subject='sample', subjects_dir=subjects_dir, eeg='foo') # wrong meg value: pytest.raises(ValueError, plot_alignment, info=info, trans=trans_fname, subject='sample', subjects_dir=subjects_dir, meg='bar') # multiple brain surfaces: pytest.raises(ValueError, plot_alignment, info=info, trans=trans_fname, subject='sample', subjects_dir=subjects_dir, surfaces=['white', 'pial']) pytest.raises(TypeError, plot_alignment, info=info, trans=trans_fname, subject='sample', subjects_dir=subjects_dir, surfaces=[1]) pytest.raises(ValueError, plot_alignment, info=info, trans=trans_fname, subject='sample', subjects_dir=subjects_dir, surfaces=['foo']) mlab.close(all=True)