def test_eximia_nxe(): """Test reading Eximia NXE files""" fname = op.join(data_path(), 'eximia', 'test_eximia.nxe') raw = read_raw_eximia(fname, preload=True) assert_true('RawEximia' in repr(raw)) _test_raw_reader(read_raw_eximia, fname=fname) fname_mat = op.join(data_path(), 'eximia', 'test_eximia.mat') mc = sio.loadmat(fname_mat) m_data = mc['data'] m_header = mc['header'] assert_equal(raw._data.shape, m_data.shape) assert_equal(m_header['Fs'][0, 0][0, 0], raw.info['sfreq']) m_names = [x[0][0] for x in m_header['label'][0, 0]] m_names = list( map(lambda x: x.replace('GATE', 'GateIn').replace('TRIG', 'Trig'), m_names)) assert_equal(raw.ch_names, m_names) m_ch_types = [x[0][0] for x in m_header['chantype'][0, 0]] m_ch_types = list( map(lambda x: x.replace('unknown', 'stim').replace('trigger', 'stim'), m_ch_types)) types_dict = {2: 'eeg', 3: 'stim', 202: 'eog'} ch_types = [types_dict[raw.info['chs'][x]['kind']] for x in range(len(raw.ch_names))] assert_equal(ch_types, m_ch_types) assert_array_equal(m_data, raw._data)
def test_io_egi_pns_mff_bug(): """Test importing EGI MFF with PNS data (BUG).""" egi_fname_mff = op.join(data_path(), 'EGI', 'test_egi_pns_bug.mff') with pytest.warns(RuntimeWarning, match='EGI PSG sample bug'): raw = read_raw_egi(egi_fname_mff, include=None, preload=True, verbose='warning') egi_fname_mat = op.join(data_path(), 'EGI', 'test_egi_pns.mat') mc = sio.loadmat(egi_fname_mat) pns_chans = pick_types(raw.info, ecg=True, bio=True, emg=True) pns_names = ['Resp. Temperature'[:15], 'Resp. Pressure', 'ECG', 'Body Position', 'Resp. Effort Chest'[:15], 'Resp. Effort Abdomen'[:15], 'EMG-Leg'] mat_names = [ 'Resp_Temperature'[:15], 'Resp_Pressure', 'ECG', 'Body_Position', 'Resp_Effort_Chest'[:15], 'Resp_Effort_Abdomen'[:15], 'EMGLeg' ] for ch_name, ch_idx, mat_name in zip(pns_names, pns_chans, mat_names): print('Testing {}'.format(ch_name)) mc_key = [x for x in mc.keys() if mat_name in x][0] cal = raw.info['chs'][ch_idx]['cal'] mat_data = mc[mc_key] * cal mat_data[:, -1] = 0 # The MFF has one less sample, the last one raw_data = raw[ch_idx][0] assert_array_equal(mat_data, raw_data)
def test_maxwell_filter_additional(): """Test processing of Maxwell filtered data""" # TODO: Future tests integrate with mne/io/tests/test_proc_history # Load testing data (raw, SSS std origin, SSS non-standard origin) data_path = op.join(testing.data_path(download=False)) file_name = 'test_move_anon' raw_fname = op.join(data_path, 'SSS', file_name + '_raw.fif') with warnings.catch_warnings(record=True): # maxshield raw = Raw(raw_fname, preload=False, proj=False, allow_maxshield=True).crop(0., 1., False) raw_sss = maxwell.maxwell_filter(raw) # Test io on processed data tempdir = _TempDir() test_outname = op.join(tempdir, 'test_raw_sss.fif') raw_sss.save(test_outname) raw_sss_loaded = Raw(test_outname, preload=True, proj=False, allow_maxshield=True) # Some numerical imprecision since save uses 'single' fmt assert_allclose(raw_sss_loaded._data[:, :], raw_sss._data[:, :], rtol=1e-6, atol=1e-20)
def test_io_egi_mff(): """Test importing EGI MFF simple binary files.""" egi_fname_mff = op.join(data_path(), 'EGI', 'test_egi.mff') raw = read_raw_egi(egi_fname_mff, include=None) assert ('RawMff' in repr(raw)) include = ['DIN1', 'DIN2', 'DIN3', 'DIN4', 'DIN5', 'DIN7'] raw = _test_raw_reader(read_raw_egi, input_fname=egi_fname_mff, include=include, channel_naming='EEG %03d') assert_equal('eeg' in raw, True) eeg_chan = [c for c in raw.ch_names if 'EEG' in c] assert_equal(len(eeg_chan), 129) picks = pick_types(raw.info, eeg=True) assert_equal(len(picks), 129) assert_equal('STI 014' in raw.ch_names, True) events = find_events(raw, stim_channel='STI 014') assert_equal(len(events), 8) assert_equal(np.unique(events[:, 1])[0], 0) assert (np.unique(events[:, 0])[0] != 0) assert (np.unique(events[:, 2])[0] != 0) pytest.raises(ValueError, read_raw_egi, egi_fname_mff, include=['Foo'], preload=False) pytest.raises(ValueError, read_raw_egi, egi_fname_mff, exclude=['Bar'], preload=False) for ii, k in enumerate(include, 1): assert (k in raw.event_id) assert (raw.event_id[k] == ii)
def test_find_ch_connectivity(): """Test computing the connectivity matrix.""" data_path = testing.data_path() raw = read_raw_fif(raw_fname, preload=True) sizes = {'mag': 828, 'grad': 1700, 'eeg': 386} nchans = {'mag': 102, 'grad': 204, 'eeg': 60} for ch_type in ['mag', 'grad', 'eeg']: conn, ch_names = find_ch_connectivity(raw.info, ch_type) # Silly test for checking the number of neighbors. assert_equal(conn.getnnz(), sizes[ch_type]) assert_equal(len(ch_names), nchans[ch_type]) pytest.raises(ValueError, find_ch_connectivity, raw.info, None) # Test computing the conn matrix with gradiometers. conn, ch_names = _compute_ch_connectivity(raw.info, 'grad') assert_equal(conn.getnnz(), 2680) # Test ch_type=None. raw.pick_types(meg='mag') find_ch_connectivity(raw.info, None) bti_fname = op.join(data_path, 'BTi', 'erm_HFH', 'c,rfDC') bti_config_name = op.join(data_path, 'BTi', 'erm_HFH', 'config') raw = read_raw_bti(bti_fname, bti_config_name, None) _, ch_names = find_ch_connectivity(raw.info, 'mag') assert 'A1' in ch_names ctf_fname = op.join(data_path, 'CTF', 'testdata_ctf_short.ds') raw = read_raw_ctf(ctf_fname) _, ch_names = find_ch_connectivity(raw.info, 'mag') assert 'MLC11' in ch_names pytest.raises(ValueError, find_ch_connectivity, raw.info, 'eog')
def test_events_long(): """Test events.""" data_path = testing.data_path() raw_fname = data_path + '/MEG/sample/sample_audvis_trunc_raw.fif' raw = read_raw_fif(raw_fname, preload=True) raw_tmin, raw_tmax = 0, 90 tmin, tmax = -0.2, 0.5 event_id = dict(aud_l=1, vis_l=3) # select gradiometers picks = pick_types(raw.info, meg='grad', eeg=False, eog=True, stim=True, exclude=raw.info['bads']) # load data with usual Epochs for later verification raw = concatenate_raws([raw, raw.copy(), raw.copy(), raw.copy(), raw.copy(), raw.copy()]) assert 110 < raw.times[-1] < 130 raw_cropped = raw.copy().crop(raw_tmin, raw_tmax) events_offline = find_events(raw_cropped) epochs_offline = Epochs(raw_cropped, events_offline, event_id=event_id, tmin=tmin, tmax=tmax, picks=picks, decim=1, reject=dict(grad=4000e-13, eog=150e-6), baseline=None) epochs_offline.drop_bad() # create the mock-client object rt_client = MockRtClient(raw) rt_epochs = RtEpochs(rt_client, event_id, tmin, tmax, picks=picks, decim=1, reject=dict(grad=4000e-13, eog=150e-6), baseline=None, isi_max=1.) rt_epochs.start() rt_client.send_data(rt_epochs, picks, tmin=raw_tmin, tmax=raw_tmax, buffer_size=1000) expected_events = epochs_offline.events.copy() expected_events[:, 0] = expected_events[:, 0] - raw_cropped.first_samp assert np.all(expected_events[:, 0] <= (raw_tmax - tmax) * raw.info['sfreq']) assert_array_equal(rt_epochs.events, expected_events) assert len(rt_epochs) == len(epochs_offline) data_picks = pick_types(epochs_offline.info, meg='grad', eeg=False, eog=True, stim=False, exclude=raw.info['bads']) for ev_num, ev in enumerate(rt_epochs.iter_evoked()): if ev_num == 0: X_rt = ev.data[None, data_picks, :] y_rt = int(ev.comment) # comment attribute contains the event_id else: X_rt = np.concatenate((X_rt, ev.data[None, data_picks, :]), axis=0) y_rt = np.append(y_rt, int(ev.comment)) X_offline = epochs_offline.get_data()[:, data_picks, :] y_offline = epochs_offline.events[:, 2] assert_array_equal(X_rt, X_offline) assert_array_equal(y_rt, y_offline)
def test_interpolation_ctf_comp(): """Test interpolation with compensated CTF data.""" ctf_dir = op.join(testing.data_path(download=False), 'CTF') raw_fname = op.join(ctf_dir, 'somMDYO-18av.ds') raw = io.read_raw_ctf(raw_fname, preload=True) raw.info['bads'] = [raw.ch_names[5], raw.ch_names[-5]] raw.interpolate_bads(mode='fast') assert raw.info['bads'] == []
def test_io_egi_crop_no_preload(): """Test crop non-preloaded EGI MFF data (BUG).""" egi_fname_mff = op.join(data_path(), 'EGI', 'test_egi.mff') raw = read_raw_egi(egi_fname_mff, preload=False) raw.crop(17.5, 20.5) raw.load_data() raw_preload = read_raw_egi(egi_fname_mff, preload=True) raw_preload.crop(17.5, 20.5) raw_preload.load_data() assert_allclose(raw._data, raw_preload._data)
def test_inverse_ctf_comp(): """Test interpolation with compensated CTF data.""" ctf_dir = op.join(testing.data_path(download=False), 'CTF') raw_fname = op.join(ctf_dir, 'somMDYO-18av.ds') raw = mne.io.read_raw_ctf(raw_fname) raw.apply_gradient_compensation(1) sphere = make_sphere_model() cov = make_ad_hoc_cov(raw.info) src = mne.setup_volume_source_space( pos=dict(rr=[[0., 0., 0.01]], nn=[[0., 1., 0.]])) fwd = make_forward_solution(raw.info, None, src, sphere, eeg=False) inv = make_inverse_operator(raw.info, fwd, cov, loose=1.) apply_inverse_raw(raw, inv, 1. / 9.)
def test_io_egi_pns_mff(): """Test importing EGI MFF with PNS data.""" egi_fname_mff = op.join(data_path(), 'EGI', 'test_egi_pns.mff') raw = read_raw_egi(egi_fname_mff, include=None, preload=True, verbose='error') assert ('RawMff' in repr(raw)) pns_chans = pick_types(raw.info, ecg=True, bio=True, emg=True) assert_equal(len(pns_chans), 7) names = [raw.ch_names[x] for x in pns_chans] pns_names = ['Resp. Temperature'[:15], 'Resp. Pressure', 'ECG', 'Body Position', 'Resp. Effort Chest'[:15], 'Resp. Effort Abdomen'[:15], 'EMG-Leg'] _test_raw_reader(read_raw_egi, input_fname=egi_fname_mff, channel_naming='EEG %03d', verbose='error') assert_equal(names, pns_names) mat_names = [ 'Resp_Temperature'[:15], 'Resp_Pressure', 'ECG', 'Body_Position', 'Resp_Effort_Chest'[:15], 'Resp_Effort_Abdomen'[:15], 'EMGLeg' ] egi_fname_mat = op.join(data_path(), 'EGI', 'test_egi_pns.mat') mc = sio.loadmat(egi_fname_mat) for ch_name, ch_idx, mat_name in zip(pns_names, pns_chans, mat_names): print('Testing {}'.format(ch_name)) mc_key = [x for x in mc.keys() if mat_name in x][0] cal = raw.info['chs'][ch_idx]['cal'] mat_data = mc[mc_key] * cal raw_data = raw[ch_idx][0] assert_array_equal(mat_data, raw_data)
def test_eeglab_event_from_annot(): """Test all forms of obtaining annotations.""" base_dir = op.join(testing.data_path(download=False), 'EEGLAB') raw_fname_mat = op.join(base_dir, 'test_raw.set') raw_fname = raw_fname_mat montage = op.join(base_dir, 'test_chans.locs') event_id = {'rt': 1, 'square': 2} raw1 = read_raw_eeglab(input_fname=raw_fname, montage=montage, preload=False) annotations = read_annotations(raw_fname) assert len(raw1.annotations) == 154 raw1.set_annotations(annotations) events_b, _ = events_from_annotations(raw1, event_id=event_id) assert len(events_b) == 154
def test_plot_ctf(): """Test plotting of CTF evoked.""" ctf_dir = op.join(testing.data_path(download=False), 'CTF') raw_fname = op.join(ctf_dir, 'testdata_ctf.ds') raw = mne.io.read_raw_ctf(raw_fname, preload=True) events = np.array([[200, 0, 1]]) event_id = 1 tmin, tmax = -0.1, 0.5 # start and end of an epoch in sec. picks = mne.pick_types(raw.info, meg=True, stim=True, eog=True, ref_meg=True, exclude='bads')[::20] epochs = mne.Epochs(raw, events, event_id, tmin, tmax, proj=True, picks=picks, preload=True, decim=10, verbose='error') evoked = epochs.average() evoked.plot_joint(times=[0.1]) mne.viz.plot_compare_evokeds([evoked, evoked])
def test_maxwell_filter_additional(): """Test processing of Maxwell filtered data""" # TODO: Future tests integrate with mne/io/tests/test_proc_history # Load testing data (raw, SSS std origin, SSS non-standard origin) data_path = op.join(testing.data_path(download=False)) file_name = 'test_move_anon' raw_fname = op.join(data_path, 'SSS', file_name + '_raw.fif') with warnings.catch_warnings(record=True): # maxshield # Use 2.0 seconds of data to get stable cov. estimate raw = Raw(raw_fname, preload=False, proj=False, allow_maxshield=True).crop(0., 2., False) # Get MEG channels, compute Maxwell filtered data raw.load_data() raw.pick_types(meg=True, eeg=False) int_order, ext_order = 8, 3 raw_sss = maxwell.maxwell_filter(raw, int_order=int_order, ext_order=ext_order) # Test io on processed data tempdir = _TempDir() test_outname = op.join(tempdir, 'test_raw_sss.fif') raw_sss.save(test_outname) raw_sss_loaded = Raw(test_outname, preload=True, proj=False, allow_maxshield=True) # Some numerical imprecision since save uses 'single' fmt assert_allclose(raw_sss_loaded._data[:, :], raw_sss._data[:, :], rtol=1e-6, atol=1e-20) # Test rank of covariance matrices for raw and SSS processed data cov_raw = compute_raw_covariance(raw) cov_sss = compute_raw_covariance(raw_sss) scalings = None cov_raw_rank = _estimate_rank_meeg_cov(cov_raw['data'], raw.info, scalings) cov_sss_rank = _estimate_rank_meeg_cov(cov_sss['data'], raw_sss.info, scalings) assert_equal(cov_raw_rank, raw.info['nchan']) assert_equal(cov_sss_rank, maxwell.get_num_moments(int_order, 0))
def test_add_noise(): """Test noise addition.""" rng = np.random.RandomState(0) data_path = testing.data_path() raw = read_raw_fif(data_path + '/MEG/sample/sample_audvis_trunc_raw.fif') raw.del_proj() picks = pick_types(raw.info, eeg=True, exclude=()) cov = compute_raw_covariance(raw, picks=picks) with pytest.raises(RuntimeError, match='to be loaded'): add_noise(raw, cov) raw.crop(0, 1).load_data() with pytest.raises(TypeError, match='Raw, Epochs, or Evoked'): add_noise(0., cov) with pytest.raises(TypeError, match='Covariance'): add_noise(raw, 0.) # test a no-op (data preserved) orig_data = raw[:][0] zero_cov = cov.copy() zero_cov['data'].fill(0) add_noise(raw, zero_cov) new_data = raw[:][0] assert_allclose(orig_data, new_data, atol=1e-30) # set to zero to make comparisons easier raw._data[:] = 0. epochs = EpochsArray(np.zeros((1, len(raw.ch_names), 100)), raw.info.copy()) epochs.info['bads'] = [] evoked = epochs.average(picks=np.arange(len(raw.ch_names))) for inst in (raw, epochs, evoked): with catch_logging() as log: add_noise(inst, cov, random_state=rng, verbose=True) log = log.getvalue() want = ('to {0}/{1} channels ({0}' .format(len(cov['names']), len(raw.ch_names))) assert want in log if inst is evoked: inst = EpochsArray(inst.data[np.newaxis], inst.info) if inst is raw: cov_new = compute_raw_covariance(inst, picks=picks, verbose='error') # samples else: cov_new = compute_covariance(inst, verbose='error') # avg ref assert cov['names'] == cov_new['names'] r = np.corrcoef(cov['data'].ravel(), cov_new['data'].ravel())[0, 1] assert r > 0.99
def test_lcmv_ctf_comp(): """Test interpolation with compensated CTF data.""" ctf_dir = op.join(testing.data_path(download=False), 'CTF') raw_fname = op.join(ctf_dir, 'somMDYO-18av.ds') raw = mne.io.read_raw_ctf(raw_fname, preload=True) events = mne.make_fixed_length_events(raw, duration=0.2)[:2] epochs = mne.Epochs(raw, events, tmin=0., tmax=0.2) evoked = epochs.average() with pytest.warns(RuntimeWarning, match='Too few samples .* estimate may be unreliable'): data_cov = mne.compute_covariance(epochs) fwd = mne.make_forward_solution(evoked.info, None, mne.setup_volume_source_space(pos=15.0), mne.make_sphere_model()) filters = mne.beamformer.make_lcmv(evoked.info, fwd, data_cov) assert 'weights' in filters
def generate_data_for_comparing_against_eeglab_infomax(ch_type, random_state): """Generate data.""" data_dir = op.join(testing.data_path(download=False), 'MEG', 'sample') raw_fname = op.join(data_dir, 'sample_audvis_trunc_raw.fif') raw = read_raw_fif(raw_fname, preload=True, add_eeg_ref=False) if ch_type == 'eeg': picks = pick_types(raw.info, meg=False, eeg=True, exclude='bads') else: picks = pick_types(raw.info, meg=ch_type, eeg=False, exclude='bads') # select a small number of channels for the test number_of_channels_to_use = 5 idx_perm = random_permutation(picks.shape[0], random_state) picks = picks[idx_perm[:number_of_channels_to_use]] with warnings.catch_warnings(record=True): # deprecated params raw.filter(1, 45, picks=picks) # Eventually we will need to add these, but for now having none of # them is a nice deprecation sanity check. # filter_length='10s', # l_trans_bandwidth=0.5, h_trans_bandwidth=0.5, # phase='zero-double', fir_window='hann') # use the old way X = raw[picks, :][0][:, ::20] # Subtract the mean mean_X = X.mean(axis=1) X -= mean_X[:, None] # pre_whitening: z-score X /= np.std(X) T = X.shape[1] cov_X = np.dot(X, X.T) / T # Let's whiten the data U, D, _ = svd(cov_X) W = np.dot(U, U.T / np.sqrt(D)[:, None]) Y = np.dot(W, X) return Y
def test_maxwell_filter_additional(): """Test processing of Maxwell filtered data.""" # TODO: Future tests integrate with mne/io/tests/test_proc_history # Load testing data (raw, SSS std origin, SSS non-standard origin) data_path = op.join(testing.data_path(download=False)) file_name = 'test_move_anon' raw_fname = op.join(data_path, 'SSS', file_name + '_raw.fif') # Use 2.0 seconds of data to get stable cov. estimate raw = read_crop(raw_fname, (0., 2.)) # Get MEG channels, compute Maxwell filtered data raw.load_data() raw.pick_types(meg=True, eeg=False) int_order = 8 raw_sss = maxwell_filter(raw, origin=mf_head_origin, regularize=None, bad_condition='ignore') # Test io on processed data tempdir = _TempDir() test_outname = op.join(tempdir, 'test_raw_sss.fif') raw_sss.save(test_outname) raw_sss_loaded = read_crop(test_outname).load_data() # Some numerical imprecision since save uses 'single' fmt assert_allclose(raw_sss_loaded[:][0], raw_sss[:][0], rtol=1e-6, atol=1e-20) # Test rank of covariance matrices for raw and SSS processed data cov_raw = compute_raw_covariance(raw) cov_sss = compute_raw_covariance(raw_sss) scalings = None cov_raw_rank = _estimate_rank_meeg_cov(cov_raw['data'], raw.info, scalings) cov_sss_rank = _estimate_rank_meeg_cov(cov_sss['data'], raw_sss.info, scalings) assert_equal(cov_raw_rank, raw.info['nchan']) assert_equal(cov_sss_rank, _get_n_moments(int_order))
def test_eeglab_event_from_annot(recwarn): """Test all forms of obtaining annotations.""" base_dir = op.join(testing.data_path(download=False), 'EEGLAB') raw_fname_mat = op.join(base_dir, 'test_raw.set') raw_fname = raw_fname_mat montage = op.join(base_dir, 'test_chans.locs') event_id = {'rt': 1, 'square': 2} raw1 = read_raw_eeglab(input_fname=raw_fname, montage=montage, event_id=event_id, preload=False) events_a = find_events(raw1) events_b = read_events_eeglab(raw_fname, event_id=event_id) annotations = read_annotations_eeglab(raw_fname) assert raw1.annotations is None raw1.set_annotations(annotations) events_c, _ = events_from_annotations(raw1, event_id=event_id) assert_array_equal(events_a, events_b) assert_array_equal(events_a, events_c)
def test_min_distance_fit_dipole(): """Test dipole min_dist to inner_skull""" data_path = testing.data_path() raw_fname = data_path + '/MEG/sample/sample_audvis_trunc_raw.fif' subjects_dir = op.join(data_path, 'subjects') fname_cov = op.join(data_path, 'MEG', 'sample', 'sample_audvis-cov.fif') fname_trans = op.join(data_path, 'MEG', 'sample', 'sample_audvis_trunc-trans.fif') fname_bem = op.join(subjects_dir, 'sample', 'bem', 'sample-1280-1280-1280-bem-sol.fif') subject = 'sample' raw = Raw(raw_fname, preload=True) # select eeg data picks = pick_types(raw.info, meg=False, eeg=True, exclude='bads') info = pick_info(raw.info, picks) # Let's use cov = Identity cov = read_cov(fname_cov) cov['data'] = np.eye(cov['data'].shape[0]) # Simulated scal map simulated_scalp_map = np.zeros(picks.shape[0]) simulated_scalp_map[27:34] = 1 simulated_scalp_map = simulated_scalp_map[:, None] evoked = EvokedArray(simulated_scalp_map, info, tmin=0) min_dist = 5. # distance in mm dip, residual = fit_dipole(evoked, cov, fname_bem, fname_trans, min_dist=min_dist) dist = _compute_depth(dip, fname_bem, fname_trans, subject, subjects_dir) assert_true(min_dist < (dist[0] * 1000.) < (min_dist + 1.)) assert_raises(ValueError, fit_dipole, evoked, cov, fname_bem, fname_trans, -1.)
def test_plot_ctf(): """Test plotting of CTF evoked.""" ctf_dir = op.join(testing.data_path(download=False), 'CTF') raw_fname = op.join(ctf_dir, 'testdata_ctf.ds') raw = mne.io.read_raw_ctf(raw_fname, preload=True) events = np.array([[200, 0, 1]]) event_id = 1 tmin, tmax = -0.1, 0.5 # start and end of an epoch in sec. picks = mne.pick_types(raw.info, meg=True, stim=True, eog=True, ref_meg=True, exclude='bads')[::20] epochs = mne.Epochs(raw, events, event_id, tmin, tmax, proj=True, picks=picks, preload=True, decim=10, verbose='error') evoked = epochs.average() evoked.plot_joint(times=[0.1]) mne.viz.plot_compare_evokeds([evoked, evoked]) # make sure axes position is "almost" unchanged # when axes were passed to plot_joint by the user times = [0.1, 0.2, 0.3] fig = plt.figure() # create custom axes for topomaps, colorbar and the timeseries gs = gridspec.GridSpec(3, 7, hspace=0.5, top=0.8) topo_axes = [fig.add_subplot(gs[0, idx * 2:(idx + 1) * 2]) for idx in range(len(times))] topo_axes.append(fig.add_subplot(gs[0, -1])) ts_axis = fig.add_subplot(gs[1:, 1:-1]) def get_axes_midpoints(axes): midpoints = list() for ax in axes[:-1]: pos = ax.get_position() midpoints.append([pos.x0 + (pos.width * 0.5), pos.y0 + (pos.height * 0.5)]) return np.array(midpoints) midpoints_before = get_axes_midpoints(topo_axes) evoked.plot_joint(times=times, ts_args={'axes': ts_axis}, topomap_args={'axes': topo_axes}, title=None) midpoints_after = get_axes_midpoints(topo_axes) assert (np.linalg.norm(midpoints_before - midpoints_after) < 0.1).all()
def test_maxwell_filter_additional(): """Test processing of Maxwell filtered data""" # TODO: Future tests integrate with mne/io/tests/test_proc_history # Load testing data (raw, SSS std origin, SSS non-standard origin) data_path = op.join(testing.data_path(download=False)) file_name = "test_move_anon" raw_fname = op.join(data_path, "SSS", file_name + "_raw.fif") with warnings.catch_warnings(record=True): # maxshield # Use 2.0 seconds of data to get stable cov. estimate raw = Raw(raw_fname, allow_maxshield=True).crop(0.0, 2.0, False) # Get MEG channels, compute Maxwell filtered data raw.load_data() raw.pick_types(meg=True, eeg=False) int_order = 8 raw_sss = maxwell_filter(raw, origin=mf_head_origin, regularize=None, bad_condition="ignore") # Test io on processed data tempdir = _TempDir() test_outname = op.join(tempdir, "test_raw_sss.fif") raw_sss.save(test_outname) raw_sss_loaded = Raw(test_outname, preload=True) # Some numerical imprecision since save uses 'single' fmt assert_allclose(raw_sss_loaded[:][0], raw_sss[:][0], rtol=1e-6, atol=1e-20) # Test rank of covariance matrices for raw and SSS processed data cov_raw = compute_raw_covariance(raw) cov_sss = compute_raw_covariance(raw_sss) scalings = None cov_raw_rank = _estimate_rank_meeg_cov(cov_raw["data"], raw.info, scalings) cov_sss_rank = _estimate_rank_meeg_cov(cov_sss["data"], raw_sss.info, scalings) assert_equal(cov_raw_rank, raw.info["nchan"]) assert_equal(cov_sss_rank, _get_n_moments(int_order))
def test_1020_selection(): """Test making a 10/20 selection dict.""" base_dir = op.join(testing.data_path(download=False), 'EEGLAB') raw_fname = op.join(base_dir, 'test_raw.set') loc_fname = op.join(base_dir, 'test_chans.locs') raw = read_raw_eeglab(raw_fname, montage=loc_fname) for input in ("a_string", 100, raw, [1, 2]): pytest.raises(TypeError, make_1020_channel_selections, input) sels = make_1020_channel_selections(raw.info) # are all frontal channels placed before all occipital channels? for name, picks in sels.items(): fs = min([ii for ii, pick in enumerate(picks) if raw.ch_names[pick].startswith("F")]) ps = max([ii for ii, pick in enumerate(picks) if raw.ch_names[pick].startswith("O")]) assert fs > ps # are channels in the correct selection? fz_c3_c4 = [raw.ch_names.index(ch) for ch in ("Fz", "C3", "C4")] for channel, roi in zip(fz_c3_c4, ("Midline", "Left", "Right")): assert channel in sels[roi]
def generate_data_for_comparing_against_eeglab_infomax(ch_type, random_state): data_dir = op.join(testing.data_path(download=False), 'MEG', 'sample') raw_fname = op.join(data_dir, 'sample_audvis_trunc_raw.fif') raw = Raw(raw_fname, preload=True) if ch_type == 'eeg': picks = pick_types(raw.info, meg=False, eeg=True, exclude='bads') else: picks = pick_types(raw.info, meg=ch_type, eeg=False, exclude='bads') # select a small number of channels for the test number_of_channels_to_use = 5 idx_perm = random_permutation(picks.shape[0], random_state) picks = picks[idx_perm[:number_of_channels_to_use]] raw.filter(1, 45, n_jobs=2) X = raw[picks, :][0][:, ::20] # Substract the mean mean_X = X.mean(axis=1) X -= mean_X[:, None] # pre_whitening: z-score X /= np.std(X) T = X.shape[1] cov_X = np.dot(X, X.T) / T # Let's whiten the data U, D, _ = svd(cov_X) W = np.dot(U, U.T / np.sqrt(D)[:, None]) Y = np.dot(W, X) return Y
def test_scale_mri_xfm(tmpdir, few_surfaces): """Test scale_mri transforms and MRI scaling.""" # scale fsaverage tempdir = str(tmpdir) fake_home = testing.data_path() # add fsaverage create_default_subject(subjects_dir=tempdir, fs_home=fake_home, verbose=True) # add sample (with few files) sample_dir = op.join(tempdir, 'sample') os.mkdir(sample_dir) os.mkdir(op.join(sample_dir, 'bem')) for dirname in ('mri', 'surf'): copytree(op.join(fake_home, 'subjects', 'sample', dirname), op.join(sample_dir, dirname)) subject_to = 'flachkopf' spacing = 'oct2' for subject_from in ('fsaverage', 'sample'): if subject_from == 'fsaverage': scale = 1. # single dim else: scale = [0.9, 2, .8] # separate src_from_fname = op.join(tempdir, subject_from, 'bem', '%s-%s-src.fif' % (subject_from, spacing)) src_from = mne.setup_source_space(subject_from, spacing, subjects_dir=tempdir, add_dist=False) write_source_spaces(src_from_fname, src_from) vertices_from = np.concatenate([s['vertno'] for s in src_from]) assert len(vertices_from) == 36 hemis = ([0] * len(src_from[0]['vertno']) + [1] * len(src_from[0]['vertno'])) mni_from = mne.vertex_to_mni(vertices_from, hemis, subject_from, subjects_dir=tempdir) if subject_from == 'fsaverage': # identity transform source_rr = np.concatenate( [s['rr'][s['vertno']] for s in src_from]) * 1e3 assert_allclose(mni_from, source_rr) if subject_from == 'fsaverage': overwrite = skip_fiducials = False else: with pytest.raises(IOError, match='No fiducials file'): scale_mri(subject_from, subject_to, scale, subjects_dir=tempdir) skip_fiducials = True with pytest.raises(IOError, match='already exists'): scale_mri(subject_from, subject_to, scale, subjects_dir=tempdir, skip_fiducials=skip_fiducials) overwrite = True if subject_from == 'sample': # support for not needing all surf files os.remove(op.join(sample_dir, 'surf', 'lh.curv')) scale_mri(subject_from, subject_to, scale, subjects_dir=tempdir, verbose='debug', overwrite=overwrite, skip_fiducials=skip_fiducials) if subject_from == 'fsaverage': assert _is_mri_subject(subject_to, tempdir), "Scaling failed" src_to_fname = op.join(tempdir, subject_to, 'bem', '%s-%s-src.fif' % (subject_to, spacing)) assert op.exists(src_to_fname), "Source space was not scaled" # Check MRI scaling fname_mri = op.join(tempdir, subject_to, 'mri', 'T1.mgz') assert op.exists(fname_mri), "MRI was not scaled" # Check MNI transform src = mne.read_source_spaces(src_to_fname) vertices = np.concatenate([s['vertno'] for s in src]) assert_array_equal(vertices, vertices_from) mni = mne.vertex_to_mni(vertices, hemis, subject_to, subjects_dir=tempdir) assert_allclose(mni, mni_from, atol=1e-3) # 0.001 mm
def test_scale_mri(tmpdir, few_surfaces, scale): """Test creating fsaverage and scaling it.""" # create fsaverage using the testing "fsaverage" instead of the FreeSurfer # one tempdir = str(tmpdir) fake_home = testing.data_path() create_default_subject(subjects_dir=tempdir, fs_home=fake_home, verbose=True) assert _is_mri_subject('fsaverage', tempdir), "Creating fsaverage failed" fid_path = op.join(tempdir, 'fsaverage', 'bem', 'fsaverage-fiducials.fif') os.remove(fid_path) create_default_subject(update=True, subjects_dir=tempdir, fs_home=fake_home) assert op.exists(fid_path), "Updating fsaverage" # copy MRI file from sample data (shouldn't matter that it's incorrect, # so here choose a small one) path_from = op.join(testing.data_path(), 'subjects', 'sample', 'mri', 'T1.mgz') path_to = op.join(tempdir, 'fsaverage', 'mri', 'orig.mgz') copyfile(path_from, path_to) # remove redundant label files label_temp = op.join(tempdir, 'fsaverage', 'label', '*.label') label_paths = glob(label_temp) for label_path in label_paths[1:]: os.remove(label_path) # create source space print('Creating surface source space') path = op.join(tempdir, 'fsaverage', 'bem', 'fsaverage-%s-src.fif') src = mne.setup_source_space('fsaverage', 'ico0', subjects_dir=tempdir, add_dist=False) mri = op.join(tempdir, 'fsaverage', 'mri', 'orig.mgz') print('Creating volume source space') vsrc = mne.setup_volume_source_space('fsaverage', pos=50, mri=mri, subjects_dir=tempdir, add_interpolator=False) write_source_spaces(path % 'vol-50', vsrc) # scale fsaverage write_source_spaces(path % 'ico-0', src, overwrite=True) with pytest.warns(None): # sometimes missing nibabel scale_mri('fsaverage', 'flachkopf', scale, True, subjects_dir=tempdir, verbose='debug') assert _is_mri_subject('flachkopf', tempdir), "Scaling failed" spath = op.join(tempdir, 'flachkopf', 'bem', 'flachkopf-%s-src.fif') assert op.exists(spath % 'ico-0'), "Source space ico-0 was not scaled" assert os.path.isfile( os.path.join(tempdir, 'flachkopf', 'surf', 'lh.sphere.reg')) vsrc_s = mne.read_source_spaces(spath % 'vol-50') for vox in ([0, 0, 0], [1, 0, 0], [0, 1, 0], [0, 0, 1], [1, 2, 3]): idx = np.ravel_multi_index(vox, vsrc[0]['shape'], order='F') err_msg = f'idx={idx} @ {vox}, scale={scale}' assert_allclose(apply_trans(vsrc[0]['src_mri_t'], vox), vsrc[0]['rr'][idx], err_msg=err_msg) assert_allclose(apply_trans(vsrc_s[0]['src_mri_t'], vox), vsrc_s[0]['rr'][idx], err_msg=err_msg) scale_labels('flachkopf', subjects_dir=tempdir) # add distances to source space after hacking the properties to make # it run *much* faster src_dist = src.copy() for s in src_dist: s.update(rr=s['rr'][s['vertno']], nn=s['nn'][s['vertno']], tris=s['use_tris']) s.update(np=len(s['rr']), ntri=len(s['tris']), vertno=np.arange(len(s['rr'])), inuse=np.ones(len(s['rr']), int)) mne.add_source_space_distances(src_dist) write_source_spaces(path % 'ico-0', src_dist, overwrite=True) # scale with distances os.remove(spath % 'ico-0') scale_source_space('flachkopf', 'ico-0', subjects_dir=tempdir) ssrc = mne.read_source_spaces(spath % 'ico-0') assert ssrc[0]['dist'] is not None assert ssrc[0]['nearest'] is not None # check patch info computation (only if SciPy is new enough to be fast) if check_version('scipy', '1.3'): for s in src_dist: for key in ('dist', 'dist_limit'): s[key] = None write_source_spaces(path % 'ico-0', src_dist, overwrite=True) # scale with distances os.remove(spath % 'ico-0') scale_source_space('flachkopf', 'ico-0', subjects_dir=tempdir) ssrc = mne.read_source_spaces(spath % 'ico-0') assert ssrc[0]['dist'] is None assert ssrc[0]['nearest'] is not None
import os.path as op import numpy as np from numpy.testing import assert_allclose, assert_equal import pytest from mne.utils import run_tests_if_main, _TempDir from mne.io import read_raw_artemis123 from mne.io.tests.test_raw import _test_raw_reader from mne.datasets import testing from mne.io.artemis123.utils import _generate_mne_locs_file, _load_mne_locs from mne import pick_types from mne.transforms import rot_to_quat, _angle_between_quats artemis123_dir = op.join(testing.data_path(download=False), 'ARTEMIS123') short_HPI_dip_fname = op.join( artemis123_dir, 'Artemis_Data_2017-04-04-15h-44m-' + '22s_Motion_Translation-z.bin') dig_fname = op.join(artemis123_dir, 'Phantom_040417_dig.pos') short_hpi_1kz_fname = op.join( artemis123_dir, 'Artemis_Data_2017-04-14-10h' + '-38m-59s_Phantom_1k_HPI_1s.bin') def _assert_trans(actual, desired, dist_tol=0.003, angle_tol=5.): trans_est = actual[0:3, 3] quat_est = rot_to_quat(actual[0:3, 0:3])
def test_scale_mri(): """Test creating fsaverage and scaling it.""" # create fsaverage using the testing "fsaverage" instead of the FreeSurfer # one tempdir = _TempDir() fake_home = testing.data_path() create_default_subject(subjects_dir=tempdir, fs_home=fake_home, verbose=True) assert _is_mri_subject('fsaverage', tempdir), "Creating fsaverage failed" fid_path = op.join(tempdir, 'fsaverage', 'bem', 'fsaverage-fiducials.fif') os.remove(fid_path) create_default_subject(update=True, subjects_dir=tempdir, fs_home=fake_home) assert op.exists(fid_path), "Updating fsaverage" # copy MRI file from sample data (shouldn't matter that it's incorrect, # so here choose a small one) path_from = op.join(testing.data_path(), 'subjects', 'sample', 'mri', 'T1.mgz') path_to = op.join(tempdir, 'fsaverage', 'mri', 'orig.mgz') copyfile(path_from, path_to) # remove redundant label files label_temp = op.join(tempdir, 'fsaverage', 'label', '*.label') label_paths = glob(label_temp) for label_path in label_paths[1:]: os.remove(label_path) # create source space print('Creating surface source space') path = op.join(tempdir, 'fsaverage', 'bem', 'fsaverage-%s-src.fif') src = mne.setup_source_space('fsaverage', 'ico0', subjects_dir=tempdir, add_dist=False) write_source_spaces(path % 'ico-0', src) mri = op.join(tempdir, 'fsaverage', 'mri', 'orig.mgz') print('Creating volume source space') vsrc = mne.setup_volume_source_space( 'fsaverage', pos=50, mri=mri, subjects_dir=tempdir, add_interpolator=False) write_source_spaces(path % 'vol-50', vsrc) # scale fsaverage for scale in (.9, [1, .2, .8]): os.environ['_MNE_FEW_SURFACES'] = 'true' scale_mri('fsaverage', 'flachkopf', scale, True, subjects_dir=tempdir, verbose='debug') del os.environ['_MNE_FEW_SURFACES'] assert _is_mri_subject('flachkopf', tempdir), "Scaling failed" spath = op.join(tempdir, 'flachkopf', 'bem', 'flachkopf-%s-src.fif') assert op.exists(spath % 'ico-0'), "Source space ico-0 was not scaled" assert os.path.isfile(os.path.join(tempdir, 'flachkopf', 'surf', 'lh.sphere.reg')) vsrc_s = mne.read_source_spaces(spath % 'vol-50') pt = np.array([0.12, 0.41, -0.22]) assert_array_almost_equal( apply_trans(vsrc_s[0]['src_mri_t'], pt * np.array(scale)), apply_trans(vsrc[0]['src_mri_t'], pt)) scale_labels('flachkopf', subjects_dir=tempdir) # add distances to source space mne.add_source_space_distances(src) src.save(path % 'ico-0', overwrite=True) # scale with distances os.remove(spath % 'ico-0') scale_source_space('flachkopf', 'ico-0', subjects_dir=tempdir) ssrc = mne.read_source_spaces(spath % 'ico-0') assert ssrc[0]['dist'] is not None
def test_notebook_interactive(renderer_notebook, brain_gc, nbexec): """Test interactive modes.""" import os import tempfile from contextlib import contextmanager from numpy.testing import assert_allclose from ipywidgets import Button import matplotlib.pyplot as plt import mne from mne.datasets import testing data_path = testing.data_path() sample_dir = os.path.join(data_path, 'MEG', 'sample') subjects_dir = os.path.join(data_path, 'subjects') fname_stc = os.path.join(sample_dir, 'sample_audvis_trunc-meg') stc = mne.read_source_estimate(fname_stc, subject='sample') initial_time = 0.13 mne.viz.set_3d_backend('notebook') brain_class = mne.viz.get_brain_class() @contextmanager def interactive(on): old = plt.isinteractive() plt.interactive(on) try: yield finally: plt.interactive(old) with interactive(False): brain = stc.plot(subjects_dir=subjects_dir, initial_time=initial_time, clim=dict(kind='value', pos_lims=[3, 6, 9]), time_viewer=True, show_traces=True, hemi='lh', size=300) assert isinstance(brain, brain_class) assert brain._renderer.figure.notebook assert brain._renderer.figure.display is not None brain._renderer._update() tmp_path = tempfile.mkdtemp() movie_path = os.path.join(tmp_path, 'test.gif') screenshot_path = os.path.join(tmp_path, 'test.png') brain._renderer.actions['movie_field'].value = movie_path brain._renderer.actions['screenshot_field'].value = screenshot_path total_number_of_buttons = sum('_field' not in k for k in brain._renderer.actions.keys()) number_of_buttons = 0 for action in brain._renderer.actions.values(): if isinstance(action, Button): action.click() number_of_buttons += 1 assert number_of_buttons == total_number_of_buttons assert os.path.isfile(movie_path) assert os.path.isfile(screenshot_path) img_nv = brain.screenshot() assert img_nv.shape == (300, 300, 3), img_nv.shape img_v = brain.screenshot(time_viewer=True) assert img_v.shape[1:] == (300, 3), img_v.shape # XXX This rtol is not very good, ideally would be zero assert_allclose(img_v.shape[0], img_nv.shape[0] * 1.25, err_msg=img_nv.shape, rtol=0.1) brain.close()
def test_fif(_bids_validate): """Test functionality of the write_raw_bids conversion for fif.""" output_path = _TempDir() data_path = testing.data_path() raw_fname = op.join(data_path, 'MEG', 'sample', 'sample_audvis_trunc_raw.fif') event_id = {'Auditory/Left': 1, 'Auditory/Right': 2, 'Visual/Left': 3, 'Visual/Right': 4, 'Smiley': 5, 'Button': 32} events_fname = op.join(data_path, 'MEG', 'sample', 'sample_audvis_trunc_raw-eve.fif') raw = mne.io.read_raw_fif(raw_fname) write_raw_bids(raw, bids_basename, output_path, events_data=events_fname, event_id=event_id, overwrite=False) # Read the file back in to check that the data has come through cleanly. # Events and bad channel information was read through JSON sidecar files. with pytest.raises(TypeError, match="unexpected keyword argument 'foo'"): read_raw_bids(bids_basename + '_meg.fif', output_path, extra_params=dict(foo='bar')) raw2 = read_raw_bids(bids_basename + '_meg.fif', output_path, extra_params=dict(allow_maxshield=True)) assert set(raw.info['bads']) == set(raw2.info['bads']) events, _ = mne.events_from_annotations(raw2) events2 = mne.read_events(events_fname) events2 = events2[events2[:, 2] != 0] assert_array_equal(events2[:, 0], events[:, 0]) # check if write_raw_bids works when there is no stim channel raw.set_channel_types({raw.ch_names[i]: 'misc' for i in mne.pick_types(raw.info, stim=True, meg=False)}) output_path = _TempDir() with pytest.warns(UserWarning, match='No events found or provided.'): write_raw_bids(raw, bids_basename, output_path, overwrite=False) _bids_validate(output_path) # try with eeg data only (conversion to bv) output_path = _TempDir() raw = mne.io.read_raw_fif(raw_fname) raw.load_data() raw2 = raw.pick_types(meg=False, eeg=True, stim=True, eog=True, ecg=True) raw2.save(op.join(output_path, 'test-raw.fif'), overwrite=True) raw2 = mne.io.Raw(op.join(output_path, 'test-raw.fif'), preload=False) with pytest.warns(UserWarning, match='Converting data files to BrainVision format'): write_raw_bids(raw2, bids_basename, output_path, events_data=events_fname, event_id=event_id, verbose=True, overwrite=False) os.remove(op.join(output_path, 'test-raw.fif')) bids_dir = op.join(output_path, 'sub-%s' % subject_id, 'ses-%s' % session_id, 'eeg') for sidecar in ['channels.tsv', 'eeg.eeg', 'eeg.json', 'eeg.vhdr', 'eeg.vmrk', 'events.tsv']: assert op.isfile(op.join(bids_dir, bids_basename + '_' + sidecar)) raw2 = mne.io.read_raw_brainvision(op.join(bids_dir, bids_basename + '_eeg.vhdr')) assert_array_almost_equal(raw.get_data(), raw2.get_data()) _bids_validate(output_path) # write the same data but pretend it is empty room data: raw = mne.io.read_raw_fif(raw_fname) er_date = datetime.fromtimestamp( raw.info['meas_date'][0]).strftime('%Y%m%d') er_bids_basename = 'sub-emptyroom_ses-{0}_task-noise'.format(str(er_date)) write_raw_bids(raw, er_bids_basename, output_path, overwrite=False) assert op.exists(op.join( output_path, 'sub-emptyroom', 'ses-{0}'.format(er_date), 'meg', 'sub-emptyroom_ses-{0}_task-noise_meg.json'.format(er_date))) # test that an incorrect date raises an error. er_bids_basename_bad = 'sub-emptyroom_ses-19000101_task-noise' with pytest.raises(ValueError, match='Date provided'): write_raw_bids(raw, er_bids_basename_bad, output_path, overwrite=False) # give the raw object some fake participant data (potentially overwriting) raw = mne.io.read_raw_fif(raw_fname) raw.info['subject_info'] = {'his_id': subject_id2, 'birthday': (1993, 1, 26), 'sex': 1} write_raw_bids(raw, bids_basename, output_path, events_data=events_fname, event_id=event_id, overwrite=True) # assert age of participant is correct participants_tsv = op.join(output_path, 'participants.tsv') data = _from_tsv(participants_tsv) assert data['age'][data['participant_id'].index('sub-01')] == '9' # try and write preloaded data raw = mne.io.read_raw_fif(raw_fname, preload=True) with pytest.raises(ValueError, match='preloaded'): write_raw_bids(raw, bids_basename, output_path, events_data=events_fname, event_id=event_id, overwrite=False) # test anonymize raw = mne.io.read_raw_fif(raw_fname) raw.anonymize() data_path2 = _TempDir() raw_fname2 = op.join(data_path2, 'sample_audvis_raw.fif') raw.save(raw_fname2) bids_basename2 = bids_basename.replace(subject_id, subject_id2) raw = mne.io.read_raw_fif(raw_fname2) bids_output_path = write_raw_bids(raw, bids_basename2, output_path, events_data=events_fname, event_id=event_id, overwrite=False) # check that the overwrite parameters work correctly for the participant # data # change the gender but don't force overwrite. raw.info['subject_info'] = {'his_id': subject_id2, 'birthday': (1994, 1, 26), 'sex': 2} with pytest.raises(FileExistsError, match="already exists"): # noqa: F821 write_raw_bids(raw, bids_basename2, output_path, events_data=events_fname, event_id=event_id, overwrite=False) # now force the overwrite write_raw_bids(raw, bids_basename2, output_path, events_data=events_fname, event_id=event_id, overwrite=True) with pytest.raises(ValueError, match='raw_file must be'): write_raw_bids('blah', bids_basename, output_path) bids_basename2 = 'sub-01_ses-01_xyz-01_run-01' with pytest.raises(KeyError, match='Unexpected entity'): write_raw_bids(raw, bids_basename2, output_path) bids_basename2 = 'sub-01_run-01_task-auditory' with pytest.raises(ValueError, match='ordered correctly'): write_raw_bids(raw, bids_basename2, output_path, overwrite=True) del raw._filenames with pytest.raises(ValueError, match='raw.filenames is missing'): write_raw_bids(raw, bids_basename2, output_path) _bids_validate(output_path) assert op.exists(op.join(output_path, 'participants.tsv')) # asserting that single fif files do not include the part key files = glob(op.join(bids_output_path, 'sub-' + subject_id2, 'ses-' + subject_id2, 'meg', '*.fif')) for ii, FILE in enumerate(files): assert 'part' not in FILE assert ii < 1 # test keyword mne-bids anonymize raw = mne.io.read_raw_fif(raw_fname) with pytest.raises(ValueError, match='`daysback` argument required'): write_raw_bids(raw, bids_basename, output_path, events_data=events_fname, event_id=event_id, anonymize=dict(), overwrite=True) output_path = _TempDir() raw = mne.io.read_raw_fif(raw_fname) with pytest.warns(UserWarning, match='daysback` is too small'): write_raw_bids(raw, bids_basename, output_path, events_data=events_fname, event_id=event_id, anonymize=dict(daysback=400), overwrite=False) output_path = _TempDir() raw = mne.io.read_raw_fif(raw_fname) with pytest.raises(ValueError, match='`daysback` exceeds maximum value'): write_raw_bids(raw, bids_basename, output_path, events_data=events_fname, event_id=event_id, anonymize=dict(daysback=40000), overwrite=False) output_path = _TempDir() raw = mne.io.read_raw_fif(raw_fname) write_raw_bids(raw, bids_basename, output_path, events_data=events_fname, event_id=event_id, anonymize=dict(daysback=30000, keep_his=True), overwrite=False) scans_tsv = make_bids_basename( subject=subject_id, session=session_id, suffix='scans.tsv', prefix=op.join(output_path, 'sub-01', 'ses-01')) data = _from_tsv(scans_tsv) assert datetime.strptime(data['acq_time'][0], '%Y-%m-%dT%H:%M:%S').year < 1925 _bids_validate(output_path) # check that split files have part key raw = mne.io.read_raw_fif(raw_fname) data_path3 = _TempDir() raw_fname3 = op.join(data_path3, 'sample_audvis_raw.fif') raw.save(raw_fname3, buffer_size_sec=1.0, split_size='10MB', split_naming='neuromag', overwrite=True) raw = mne.io.read_raw_fif(raw_fname3) subject_id3 = '03' bids_basename3 = bids_basename.replace(subject_id, subject_id3) bids_output_path = write_raw_bids(raw, bids_basename3, output_path, overwrite=False) files = glob(op.join(bids_output_path, 'sub-' + subject_id3, 'ses-' + subject_id3, 'meg', '*.fif')) for FILE in files: assert 'part' in FILE # test unknown extention raw = mne.io.read_raw_fif(raw_fname) raw._filenames = (raw.filenames[0].replace('.fif', '.foo'),) with pytest.raises(ValueError, match='Unrecognized file format'): write_raw_bids(raw, bids_basename, output_path)
from mne.utils import run_tests_if_main from mne.event import (define_target_events, merge_events, AcqParserFIF, shift_time_events) from mne.datasets import testing base_dir = op.join(op.dirname(__file__), '..', 'io', 'tests', 'data') fname = op.join(base_dir, 'test-eve.fif') fname_raw = op.join(base_dir, 'test_raw.fif') fname_gz = op.join(base_dir, 'test-eve.fif.gz') fname_1 = op.join(base_dir, 'test-1-eve.fif') fname_txt = op.join(base_dir, 'test-eve.eve') fname_txt_1 = op.join(base_dir, 'test-eve-1.eve') fname_c_annot = op.join(base_dir, 'test_raw-annot.fif') # for testing Elekta averager elekta_base_dir = op.join(testing.data_path(download=False), 'misc') fname_raw_elekta = op.join(elekta_base_dir, 'test_elekta_3ch_raw.fif') fname_ave_elekta = op.join(elekta_base_dir, 'test_elekta-ave.fif') # using mne_process_raw --raw test_raw.fif --eventsout test-mpr-eve.eve: fname_txt_mpr = op.join(base_dir, 'test-mpr-eve.eve') fname_old_txt = op.join(base_dir, 'test-eve-old-style.eve') raw_fname = op.join(base_dir, 'test_raw.fif') def test_fix_stim(): """Test fixing stim STI016 for Neuromag.""" raw = read_raw_fif(raw_fname, preload=True) # 32768 (016) + 3 (002+001) bits gets incorrectly coded during acquisition raw._data[raw.ch_names.index('STI 014'), :3] = [0, -32765, 0] with pytest.warns(RuntimeWarning, match='STI016'):
kit_path = op.abspath(op.dirname(mne.__file__)) + '/io/kit/tests/data/' raws = { 'Neuromag': read_raw_fif(sample.data_path() + '/MEG/sample/sample_audvis_raw.fif'), 'CTF 275': read_raw_ctf(spm_face.data_path() + '/MEG/spm/SPM_CTF_MEG_example_faces1_3D.ds'), 'Magnes 3600wh': read_raw_bti(op.join(bti_path, 'test_pdf_linux'), op.join(bti_path, 'test_config_linux'), op.join(bti_path, 'test_hs_linux')), 'KIT': read_raw_kit(op.join(kit_path, 'test.sqd')), 'Artemis123': read_raw_artemis123( op.join(testing.data_path(), 'ARTEMIS123', 'Artemis_Data_2017-04-14-10h-38m-59s_Phantom_1k_HPI_1s.bin')), } for system, raw in sorted(raws.items()): meg = ['helmet', 'sensors'] # We don't have coil definitions for KIT refs, so exclude them if system != 'KIT': meg.append('ref') fig = plot_alignment(raw.info, trans=None, dig=False, eeg=False, surfaces=[], meg=meg, coord_frame='meg',
def test_scale_mri(): """Test creating fsaverage and scaling it.""" # create fsaverage using the testing "fsaverage" instead of the FreeSurfer # one tempdir = _TempDir() fake_home = testing.data_path() create_default_subject(subjects_dir=tempdir, fs_home=fake_home, verbose=True) assert _is_mri_subject('fsaverage', tempdir), "Creating fsaverage failed" fid_path = op.join(tempdir, 'fsaverage', 'bem', 'fsaverage-fiducials.fif') os.remove(fid_path) create_default_subject(update=True, subjects_dir=tempdir, fs_home=fake_home) assert op.exists(fid_path), "Updating fsaverage" # copy MRI file from sample data (shouldn't matter that it's incorrect, # so here choose a small one) path_from = op.join(testing.data_path(), 'subjects', 'sample', 'mri', 'T1.mgz') path_to = op.join(tempdir, 'fsaverage', 'mri', 'orig.mgz') copyfile(path_from, path_to) # remove redundant label files label_temp = op.join(tempdir, 'fsaverage', 'label', '*.label') label_paths = glob(label_temp) for label_path in label_paths[1:]: os.remove(label_path) # create source space print('Creating surface source space') path = op.join(tempdir, 'fsaverage', 'bem', 'fsaverage-%s-src.fif') src = mne.setup_source_space('fsaverage', 'ico0', subjects_dir=tempdir, add_dist=False) mri = op.join(tempdir, 'fsaverage', 'mri', 'orig.mgz') print('Creating volume source space') vsrc = mne.setup_volume_source_space( 'fsaverage', pos=50, mri=mri, subjects_dir=tempdir, add_interpolator=False) write_source_spaces(path % 'vol-50', vsrc) # scale fsaverage for scale in (.9, [1, .2, .8]): write_source_spaces(path % 'ico-0', src, overwrite=True) os.environ['_MNE_FEW_SURFACES'] = 'true' with pytest.warns(None): # sometimes missing nibabel scale_mri('fsaverage', 'flachkopf', scale, True, subjects_dir=tempdir, verbose='debug') del os.environ['_MNE_FEW_SURFACES'] assert _is_mri_subject('flachkopf', tempdir), "Scaling failed" spath = op.join(tempdir, 'flachkopf', 'bem', 'flachkopf-%s-src.fif') assert op.exists(spath % 'ico-0'), "Source space ico-0 was not scaled" assert os.path.isfile(os.path.join(tempdir, 'flachkopf', 'surf', 'lh.sphere.reg')) vsrc_s = mne.read_source_spaces(spath % 'vol-50') pt = np.array([0.12, 0.41, -0.22]) assert_array_almost_equal( apply_trans(vsrc_s[0]['src_mri_t'], pt * np.array(scale)), apply_trans(vsrc[0]['src_mri_t'], pt)) scale_labels('flachkopf', subjects_dir=tempdir) # add distances to source space after hacking the properties to make # it run *much* faster src_dist = src.copy() for s in src_dist: s.update(rr=s['rr'][s['vertno']], nn=s['nn'][s['vertno']], tris=s['use_tris']) s.update(np=len(s['rr']), ntri=len(s['tris']), vertno=np.arange(len(s['rr'])), inuse=np.ones(len(s['rr']), int)) mne.add_source_space_distances(src_dist) write_source_spaces(path % 'ico-0', src_dist, overwrite=True) # scale with distances os.remove(spath % 'ico-0') scale_source_space('flachkopf', 'ico-0', subjects_dir=tempdir) ssrc = mne.read_source_spaces(spath % 'ico-0') assert ssrc[0]['dist'] is not None
import numpy as np from numpy import array_equal from numpy.testing import assert_allclose, assert_array_equal, assert_equal import pytest from mne import pick_types # from mne.tests.common import assert_dig_allclose from mne.transforms import apply_trans from mne.io import read_raw_fif, read_raw_ctf from mne.io.compensator import get_current_comp from mne.io.tests.test_raw import _test_raw_reader from mne.utils import _TempDir, run_tests_if_main, _clean_names, catch_logging from mne.datasets import testing, spm_face from mne.io.constants import FIFF ctf_dir = op.join(testing.data_path(download=False), 'CTF') ctf_fname_continuous = 'testdata_ctf.ds' ctf_fname_1_trial = 'testdata_ctf_short.ds' ctf_fname_2_trials = 'testdata_ctf_pseudocontinuous.ds' ctf_fname_discont = 'testdata_ctf_short_discontinuous.ds' ctf_fname_somato = 'somMDYO-18av.ds' ctf_fname_catch = 'catch-alp-good-f.ds' block_sizes = { ctf_fname_continuous: 12000, ctf_fname_1_trial: 4801, ctf_fname_2_trials: 12000, ctf_fname_discont: 1201, ctf_fname_somato: 313, ctf_fname_catch: 2500, }
from nose.tools import assert_true, assert_equal, assert_raises from numpy.testing import assert_array_equal, assert_allclose from mne import pick_channels, pick_types, Evoked, Epochs, read_events from mne.epochs import _BaseEpochs from mne.io.constants import FIFF from mne.io import (set_eeg_reference, set_bipolar_reference, add_reference_channels) from mne.io.proj import _has_eeg_average_ref_proj from mne.io.reference import _apply_reference from mne.datasets import testing from mne.io import Raw warnings.simplefilter('always') # enable b/c these tests throw warnings data_dir = op.join(testing.data_path(download=False), 'MEG', 'sample') fif_fname = op.join(data_dir, 'sample_audvis_trunc_raw.fif') eve_fname = op.join(data_dir, 'sample_audvis_trunc_raw-eve.fif') ave_fname = op.join(data_dir, 'sample_audvis_trunc-ave.fif') def _test_reference(raw, reref, ref_data, ref_from): """Helper function to test whether a reference has been correctly applied.""" # Separate EEG channels from other channel types picks_eeg = pick_types(raw.info, meg=False, eeg=True, exclude='bads') picks_other = pick_types(raw.info, meg=True, eeg=False, eog=True, stim=True, exclude='bads') # Calculate indices of reference channesl picks_ref = [raw.ch_names.index(ch) for ch in ref_from]
def test_find_empty_room(return_bids_test_dir, tmp_path): """Test reading of empty room data.""" data_path = testing.data_path() raw_fname = op.join(data_path, 'MEG', 'sample', 'sample_audvis_trunc_raw.fif') bids_root = tmp_path / "bids" bids_root.mkdir() tmp_dir = tmp_path / "tmp" tmp_dir.mkdir() raw = _read_raw_fif(raw_fname) bids_path = BIDSPath(subject='01', session='01', task='audiovisual', run='01', root=bids_root, suffix='meg') write_raw_bids(raw, bids_path, overwrite=True, verbose=False) # No empty-room data present. er_basename = bids_path.find_empty_room() assert er_basename is None # Now create data resembling an empty-room recording. # The testing data has no "noise" recording, so save the actual data # as named as if it were noise. We first need to write the FIFF file # before reading it back in. er_raw_fname = op.join(tmp_dir, 'ernoise_raw.fif') raw.copy().crop(0, 10).save(er_raw_fname, overwrite=True) er_raw = _read_raw_fif(er_raw_fname) if not isinstance(er_raw.info['meas_date'], datetime): # pragma: no cover # mne < v0.20 er_date = datetime.fromtimestamp(er_raw.info['meas_date'][0]) else: er_date = er_raw.info['meas_date'] er_date = er_date.strftime('%Y%m%d') er_bids_path = BIDSPath(subject='emptyroom', task='noise', session=er_date, suffix='meg', root=bids_root) write_raw_bids(er_raw, er_bids_path, overwrite=True, verbose=False) recovered_er_bids_path = bids_path.find_empty_room() assert er_bids_path == recovered_er_bids_path # assert that we get best emptyroom if there are multiple available sh.rmtree(op.join(bids_root, 'sub-emptyroom')) dates = ['20021204', '20021201', '20021001'] for date in dates: er_bids_path.update(session=date) er_meas_date = datetime.strptime(date, '%Y%m%d') er_meas_date = er_meas_date.replace(tzinfo=timezone.utc) er_raw.set_meas_date(er_meas_date) write_raw_bids(er_raw, er_bids_path, verbose=False) best_er_basename = bids_path.find_empty_room() assert best_er_basename.session == '20021204' with pytest.raises(ValueError, match='The root of the "bids_path" must be set'): bids_path.copy().update(root=None).find_empty_room() # assert that we get an error if meas_date is not available. raw = read_raw_bids(bids_path=bids_path) raw.set_meas_date(None) anonymize_info(raw.info) write_raw_bids(raw, bids_path, overwrite=True) with pytest.raises(ValueError, match='The provided recording does not ' 'have a measurement date set'): bids_path.find_empty_room() # test that the `AssociatedEmptyRoom` key in MEG sidecar is respected bids_root = tmp_path / 'associated-empty-room' bids_root.mkdir() raw = _read_raw_fif(raw_fname) meas_date = datetime(year=2020, month=1, day=10, tzinfo=timezone.utc) er_date = datetime(year=2010, month=1, day=1, tzinfo=timezone.utc) raw.set_meas_date(meas_date) er_raw_matching_date = er_raw.copy().set_meas_date(meas_date) er_raw_associated = er_raw.copy().set_meas_date(er_date) # First write empty-room data # We write two empty-room recordings: one with a date matching exactly the # experimental measurement date, and one dated approx. 10 years earlier # We will want to enforce using the older recording via # `AssociatedEmptyRoom` (without AssociatedEmptyRoom, find_empty_room() # would return the recording with the matching date instead) er_matching_date_bids_path = BIDSPath(subject='emptyroom', session='20200110', task='noise', root=bids_root, datatype='meg', suffix='meg', extension='.fif') write_raw_bids(er_raw_matching_date, bids_path=er_matching_date_bids_path) er_associated_bids_path = (er_matching_date_bids_path.copy().update( session='20100101')) write_raw_bids(er_raw_associated, bids_path=er_associated_bids_path) # Now we write experimental data and associate it with the earlier # empty-room recording bids_path = (er_matching_date_bids_path.copy().update(subject='01', session=None, task='task')) write_raw_bids(raw, bids_path=bids_path, empty_room=er_associated_bids_path) # Retrieve empty-room BIDSPath assert bids_path.find_empty_room() == er_associated_bids_path # Should only work for MEG with pytest.raises(ValueError, match='only supported for MEG'): bids_path.copy().update(datatype='eeg').find_empty_room() # Don't create `AssociatedEmptyRoom` entry in sidecar – we should now # retrieve the empty-room recording closer in time write_raw_bids(raw, bids_path=bids_path, empty_room=None, overwrite=True) assert bids_path.find_empty_room() == er_matching_date_bids_path # If we enforce searching only via `AssociatedEmptyRoom`, we should get no # result assert bids_path.find_empty_room(use_sidecar_only=True) is None
import numpy as np from nose.tools import assert_raises, assert_true, assert_false from numpy.testing import assert_allclose, assert_array_equal, assert_equal import pytest from mne import pick_types # from mne.tests.common import assert_dig_allclose from mne.transforms import apply_trans from mne.io import read_raw_fif, read_raw_ctf from mne.io.tests.test_raw import _test_raw_reader from mne.utils import _TempDir, run_tests_if_main from mne.datasets import testing, spm_face from mne.io.constants import FIFF ctf_dir = op.join(testing.data_path(download=False), 'CTF') ctf_fname_continuous = 'testdata_ctf.ds' ctf_fname_1_trial = 'testdata_ctf_short.ds' ctf_fname_2_trials = 'testdata_ctf_pseudocontinuous.ds' ctf_fname_discont = 'testdata_ctf_short_discontinuous.ds' ctf_fname_somato = 'somMDYO-18av.ds' ctf_fname_catch = 'catch-alp-good-f.ds' block_sizes = { ctf_fname_continuous: 12000, ctf_fname_1_trial: 4801, ctf_fname_2_trials: 12000, ctf_fname_discont: 1201, ctf_fname_somato: 313, ctf_fname_catch: 2500, }
import warnings import numpy as np from numpy.testing import (assert_array_equal, assert_array_almost_equal, assert_equal) import pytest from scipy import io from mne import write_events, read_epochs_eeglab, Epochs, find_events from mne.io import read_raw_eeglab from mne.io.tests.test_raw import _test_raw_reader from mne.io.eeglab.eeglab import read_events_eeglab from mne.io.eeglab import read_annotations_eeglab from mne.datasets import testing from mne.utils import _TempDir, run_tests_if_main base_dir = op.join(testing.data_path(download=False), 'EEGLAB') raw_fname = op.join(base_dir, 'test_raw.set') raw_fname_onefile = op.join(base_dir, 'test_raw_onefile.set') epochs_fname = op.join(base_dir, 'test_epochs.set') epochs_fname_onefile = op.join(base_dir, 'test_epochs_onefile.set') montage = op.join(base_dir, 'test_chans.locs') warnings.simplefilter('always') # enable b/c these tests throw warnings @testing.requires_testing_data def test_io_set(): """Test importing EEGLAB .set files.""" with warnings.catch_warnings(record=True) as w: warnings.simplefilter('always') # main tests, and test missing event_id
from groupmne.utils import compute_coreg_dist from mne.datasets import testing import os.path as op import os data_path = testing.data_path(download=True) subjects_dir = op.join(data_path, 'subjects') subject = "sample" trans_fname = op.join(data_path, 'MEG', 'sample', 'sample_audvis_trunc-trans.fif') raw_fname = op.join(data_path, 'MEG', 'sample', 'sample_audvis_trunc_raw.fif') os.environ['SUBJECTS_DIR'] = subjects_dir @testing.requires_testing_data def test_trans(): d = compute_coreg_dist(subject, trans_fname, raw_fname, subjects_dir) assert d < 5e-2
############################################################################### # BTi # --- bti_path = op.abspath(op.dirname(mne.__file__)) + '/io/bti/tests/data/' raw = read_raw_bti(op.join(bti_path, 'test_pdf_linux'), op.join(bti_path, 'test_config_linux'), op.join(bti_path, 'test_hs_linux')) fig = plot_alignment(raw.info, meg=('helmet', 'sensors', 'ref'), **kwargs) set_3d_title(figure=fig, title='Magnes 3600wh') ############################################################################### # KIT # --- kit_path = op.abspath(op.dirname(mne.__file__)) + '/io/kit/tests/data/' raw = read_raw_kit(op.join(kit_path, 'test.sqd')) fig = plot_alignment(raw.info, meg=('helmet', 'sensors'), **kwargs) set_3d_title(figure=fig, title='KIT') ############################################################################### # Artemis123 # ---------- raw = read_raw_artemis123(op.join( testing.data_path(), 'ARTEMIS123', 'Artemis_Data_2017-04-14-10h-38m-59s_Phantom_1k_HPI_1s.bin')) fig = plot_alignment(raw.info, meg=('helmet', 'sensors', 'ref'), **kwargs) set_3d_title(figure=fig, title='Artemis123')
def test_events_long(): """Test events.""" data_path = testing.data_path() raw_fname = data_path + '/MEG/sample/sample_audvis_trunc_raw.fif' raw = read_raw_fif(raw_fname, preload=True) raw_tmin, raw_tmax = 0, 90 tmin, tmax = -0.2, 0.5 event_id = dict(aud_l=1, vis_l=3) # select gradiometers picks = pick_types(raw.info, meg='grad', eeg=False, eog=True, stim=True, exclude=raw.info['bads']) # load data with usual Epochs for later verification raw = concatenate_raws( [raw, raw.copy(), raw.copy(), raw.copy(), raw.copy(), raw.copy()]) assert 110 < raw.times[-1] < 130 raw_cropped = raw.copy().crop(raw_tmin, raw_tmax) events_offline = find_events(raw_cropped) epochs_offline = Epochs(raw_cropped, events_offline, event_id=event_id, tmin=tmin, tmax=tmax, picks=picks, decim=1, reject=dict(grad=4000e-13, eog=150e-6), baseline=None) epochs_offline.drop_bad() # create the mock-client object rt_client = MockRtClient(raw) rt_epochs = RtEpochs(rt_client, event_id, tmin, tmax, picks=picks, decim=1, reject=dict(grad=4000e-13, eog=150e-6), baseline=None, isi_max=1.) rt_epochs.start() rt_client.send_data(rt_epochs, picks, tmin=raw_tmin, tmax=raw_tmax, buffer_size=1000) expected_events = epochs_offline.events.copy() expected_events[:, 0] = expected_events[:, 0] - raw_cropped.first_samp assert np.all( expected_events[:, 0] <= (raw_tmax - tmax) * raw.info['sfreq']) assert_array_equal(rt_epochs.events, expected_events) assert len(rt_epochs) == len(epochs_offline) data_picks = pick_types(epochs_offline.info, meg='grad', eeg=False, eog=True, stim=False, exclude=raw.info['bads']) for ev_num, ev in enumerate(rt_epochs.iter_evoked()): if ev_num == 0: X_rt = ev.data[None, data_picks, :] y_rt = int(ev.comment) # comment attribute contains the event_id else: X_rt = np.concatenate((X_rt, ev.data[None, data_picks, :]), axis=0) y_rt = np.append(y_rt, int(ev.comment)) X_offline = epochs_offline.get_data()[:, data_picks, :] y_offline = epochs_offline.events[:, 2] assert_array_equal(X_rt, X_offline) assert_array_equal(y_rt, y_offline)
def test_write_anat(_bids_validate): """Test writing anatomical data.""" # Get the MNE testing sample data import nibabel as nib output_path = _TempDir() data_path = testing.data_path() raw_fname = op.join(data_path, 'MEG', 'sample', 'sample_audvis_trunc_raw.fif') event_id = {'Auditory/Left': 1, 'Auditory/Right': 2, 'Visual/Left': 3, 'Visual/Right': 4, 'Smiley': 5, 'Button': 32} events_fname = op.join(data_path, 'MEG', 'sample', 'sample_audvis_trunc_raw-eve.fif') raw = mne.io.read_raw_fif(raw_fname) write_raw_bids(raw, bids_basename, output_path, events_data=events_fname, event_id=event_id, overwrite=False) # Write some MRI data and supply a `trans` trans_fname = raw_fname.replace('_raw.fif', '-trans.fif') trans = mne.read_trans(trans_fname) # Get the T1 weighted MRI data file # Needs to be converted to Nifti because we only have mgh in our test base t1w_mgh = op.join(data_path, 'subjects', 'sample', 'mri', 'T1.mgz') anat_dir = write_anat(output_path, subject_id, t1w_mgh, session_id, acq, raw=raw, trans=trans, deface=True, verbose=True, overwrite=True) _bids_validate(output_path) # Validate that files are as expected t1w_json_path = op.join(anat_dir, 'sub-01_ses-01_acq-01_T1w.json') assert op.exists(t1w_json_path) assert op.exists(op.join(anat_dir, 'sub-01_ses-01_acq-01_T1w.nii.gz')) with open(t1w_json_path, 'r') as f: t1w_json = json.load(f) print(t1w_json) # We only should have AnatomicalLandmarkCoordinates as key np.testing.assert_array_equal(list(t1w_json.keys()), ['AnatomicalLandmarkCoordinates']) # And within AnatomicalLandmarkCoordinates only LPA, NAS, RPA in that order anat_dict = t1w_json['AnatomicalLandmarkCoordinates'] point_list = ['LPA', 'NAS', 'RPA'] np.testing.assert_array_equal(list(anat_dict.keys()), point_list) # test the actual values of the voxels (no floating points) for i, point in enumerate([(66, 51, 46), (41, 32, 74), (17, 53, 47)]): coords = anat_dict[point_list[i]] np.testing.assert_array_equal(np.asarray(coords, dtype=int), point) # BONUS: test also that we can find the matching sidecar side_fname = _find_matching_sidecar('sub-01_ses-01_acq-01_T1w.nii.gz', output_path, 'T1w.json') assert op.split(side_fname)[-1] == 'sub-01_ses-01_acq-01_T1w.json' # Now try some anat writing that will fail # We already have some MRI data there with pytest.raises(IOError, match='`overwrite` is set to False'): write_anat(output_path, subject_id, t1w_mgh, session_id, acq, raw=raw, trans=trans, verbose=True, deface=False, overwrite=False) # pass some invalid type as T1 MRI with pytest.raises(ValueError, match='must be a path to a T1 weighted'): write_anat(output_path, subject_id, 9999999999999, session_id, raw=raw, trans=trans, verbose=True, deface=False, overwrite=True) # Return without writing sidecar sh.rmtree(anat_dir) write_anat(output_path, subject_id, t1w_mgh, session_id) # Assert that we truly cannot find a sidecar with pytest.raises(RuntimeError, match='Did not find any'): _find_matching_sidecar('sub-01_ses-01_acq-01_T1w.nii.gz', output_path, 'T1w.json') # trans has a wrong type wrong_type = 1 match = 'transform type {} not known, must be'.format(type(wrong_type)) with pytest.raises(ValueError, match=match): write_anat(output_path, subject_id, t1w_mgh, session_id, raw=raw, trans=wrong_type, verbose=True, deface=False, overwrite=True) # trans is a str, but file does not exist wrong_fname = 'not_a_trans' match = 'trans file "{}" not found'.format(wrong_fname) with pytest.raises(IOError, match=match): write_anat(output_path, subject_id, t1w_mgh, session_id, raw=raw, trans=wrong_fname, verbose=True, overwrite=True) # However, reading trans if it is a string pointing to trans is fine write_anat(output_path, subject_id, t1w_mgh, session_id, raw=raw, trans=trans_fname, verbose=True, deface=False, overwrite=True) # Writing without a session does NOT yield "ses-None" anywhere anat_dir2 = write_anat(output_path, subject_id, t1w_mgh, None) assert 'ses-None' not in anat_dir2 assert op.exists(op.join(anat_dir2, 'sub-01_T1w.nii.gz')) # specify trans but not raw with pytest.raises(ValueError, match='must be specified if `trans`'): write_anat(output_path, subject_id, t1w_mgh, session_id, raw=None, trans=trans, verbose=True, deface=False, overwrite=True) # test deface anat_dir = write_anat(output_path, subject_id, t1w_mgh, session_id, raw=raw, trans=trans_fname, verbose=True, deface=True, overwrite=True) t1w = nib.load(op.join(anat_dir, 'sub-01_ses-01_T1w.nii.gz')) vox_sum = t1w.get_data().sum() anat_dir2 = write_anat(output_path, subject_id, t1w_mgh, session_id, raw=raw, trans=trans_fname, verbose=True, deface=dict(inset=25.), overwrite=True) t1w2 = nib.load(op.join(anat_dir2, 'sub-01_ses-01_T1w.nii.gz')) vox_sum2 = t1w2.get_data().sum() assert vox_sum > vox_sum2 anat_dir3 = write_anat(output_path, subject_id, t1w_mgh, session_id, raw=raw, trans=trans_fname, verbose=True, deface=dict(theta=25), overwrite=True) t1w3 = nib.load(op.join(anat_dir3, 'sub-01_ses-01_T1w.nii.gz')) vox_sum3 = t1w3.get_data().sum() assert vox_sum > vox_sum3 with pytest.raises(ValueError, match='The raw object, trans and raw or the landmarks'): write_anat(output_path, subject_id, t1w_mgh, session_id, raw=raw, trans=None, verbose=True, deface=True, overwrite=True) with pytest.raises(ValueError, match='inset must be numeric'): write_anat(output_path, subject_id, t1w_mgh, session_id, raw=raw, trans=trans, verbose=True, deface=dict(inset='small'), overwrite=True) with pytest.raises(ValueError, match='inset should be positive'): write_anat(output_path, subject_id, t1w_mgh, session_id, raw=raw, trans=trans, verbose=True, deface=dict(inset=-2.), overwrite=True) with pytest.raises(ValueError, match='theta must be numeric'): write_anat(output_path, subject_id, t1w_mgh, session_id, raw=raw, trans=trans, verbose=True, deface=dict(theta='big'), overwrite=True) with pytest.raises(ValueError, match='theta should be between 0 and 90 degrees'): write_anat(output_path, subject_id, t1w_mgh, session_id, raw=raw, trans=trans, verbose=True, deface=dict(theta=100), overwrite=True) # Write some MRI data and supply `landmarks` mri_voxel_landmarks = mne.channels.make_dig_montage( lpa=[66.08580, 51.33362, 46.52982], nasion=[41.87363, 32.24694, 74.55314], rpa=[17.23812, 53.08294, 47.01789], coord_frame='mri_voxel') mri_landmarks = mne.channels.make_dig_montage( lpa=[-0.07629625, -0.00062556, -0.00776012], nasion=[0.00267222, 0.09362256, 0.03224791], rpa=[0.07635873, -0.00258065, -0.01212903], coord_frame='mri') meg_landmarks = mne.channels.make_dig_montage( lpa=[-7.13766068e-02, 0.00000000e+00, 5.12227416e-09], nasion=[3.72529030e-09, 1.02605611e-01, 4.19095159e-09], rpa=[7.52676800e-02, 0.00000000e+00, 5.58793545e-09], coord_frame='head') # test mri voxel landmarks anat_dir = write_anat(output_path, subject_id, t1w_mgh, session_id, acq, deface=True, landmarks=mri_voxel_landmarks, verbose=True, overwrite=True) _bids_validate(output_path) t1w1 = nib.load(op.join(anat_dir, 'sub-01_ses-01_acq-01_T1w.nii.gz')) vox1 = t1w1.get_data() # test mri landmarks anat_dir = write_anat(output_path, subject_id, t1w_mgh, session_id, acq, deface=True, landmarks=mri_landmarks, verbose=True, overwrite=True) _bids_validate(output_path) t1w2 = nib.load(op.join(anat_dir, 'sub-01_ses-01_acq-01_T1w.nii.gz')) vox2 = t1w2.get_data() # because of significant rounding errors the voxels are fairly different # but the deface works in all three cases and was checked assert abs(vox1 - vox2).sum() / abs(vox1).sum() < 0.2 # crash for raw also with pytest.raises(ValueError, match='Please use either `landmarks`'): anat_dir = write_anat(output_path, subject_id, t1w_mgh, session_id, acq, raw=raw, trans=trans, deface=True, landmarks=mri_landmarks, verbose=True, overwrite=True) # crash for trans also with pytest.raises(ValueError, match='`trans` was provided'): anat_dir = write_anat(output_path, subject_id, t1w_mgh, session_id, acq, trans=trans, deface=True, landmarks=mri_landmarks, verbose=True, overwrite=True) # test meg landmarks tmp_dir = _TempDir() meg_landmarks.save(op.join(tmp_dir, 'meg_landmarks.fif')) anat_dir = write_anat(output_path, subject_id, t1w_mgh, session_id, acq, deface=True, trans=trans, landmarks=op.join(tmp_dir, 'meg_landmarks.fif'), verbose=True, overwrite=True) _bids_validate(output_path) t1w3 = nib.load(op.join(anat_dir, 'sub-01_ses-01_acq-01_T1w.nii.gz')) vox3 = t1w3.get_data() assert abs(vox1 - vox3).sum() / abs(vox1).sum() < 0.2 # test raise error on meg_landmarks with no trans with pytest.raises(ValueError, match='Head space landmarks provided'): anat_dir = write_anat(output_path, subject_id, t1w_mgh, session_id, acq, deface=True, landmarks=meg_landmarks, verbose=True, overwrite=True) # test unsupported (any coord_frame other than head and mri) coord_frame fail_landmarks = meg_landmarks.copy() fail_landmarks.dig[0]['coord_frame'] = 3 fail_landmarks.dig[1]['coord_frame'] = 3 fail_landmarks.dig[2]['coord_frame'] = 3 with pytest.raises(ValueError, match='Coordinate frame not recognized'): anat_dir = write_anat(output_path, subject_id, t1w_mgh, session_id, acq, deface=True, landmarks=fail_landmarks, verbose=True, overwrite=True)
def test_datasets(): """Test dataset config.""" # gh-4192 data_path = testing.data_path(download=False) os.environ['MNE_DATASETS_TESTING_PATH'] = op.dirname(data_path) assert testing.data_path(download=False) == data_path
def test_handle_ieeg_coords_reading(bids_path): """Test reading iEEG coordinates from BIDS files.""" bids_root = _TempDir() data_path = op.join(testing.data_path(), 'EDF') raw_fname = op.join(data_path, 'test_reduced.edf') bids_fname = bids_path.copy().update(datatype='ieeg', suffix='ieeg', extension='.edf', root=bids_root) raw = _read_raw_edf(raw_fname) # ensure we are writing 'ecog'/'ieeg' data raw.set_channel_types({ch: 'ecog' for ch in raw.ch_names}) # coordinate frames in mne-python should all map correctly # set a `random` montage ch_names = raw.ch_names elec_locs = np.random.random((len(ch_names), 3)).astype(float) ch_pos = dict(zip(ch_names, elec_locs)) coordinate_frames = ['mri', 'ras'] for coord_frame in coordinate_frames: # XXX: mne-bids doesn't support multiple electrodes.tsv files sh.rmtree(bids_root) montage = mne.channels.make_dig_montage(ch_pos=ch_pos, coord_frame=coord_frame) raw.set_montage(montage) write_raw_bids(raw, bids_fname, overwrite=True, verbose=False) # read in raw file w/ updated coordinate frame # and make sure all digpoints are correct coordinate frames raw_test = read_raw_bids(bids_path=bids_fname, verbose=False) coord_frame_int = MNE_STR_TO_FRAME[coord_frame] for digpoint in raw_test.info['dig']: assert digpoint['coord_frame'] == coord_frame_int # start w/ new bids root sh.rmtree(bids_root) write_raw_bids(raw, bids_fname, overwrite=True, verbose=False) # obtain the sensor positions and assert ch_coords are same raw_test = read_raw_bids(bids_path=bids_fname, verbose=False) orig_locs = raw.info['dig'][1] test_locs = raw_test.info['dig'][1] assert orig_locs == test_locs assert not object_diff(raw.info['chs'], raw_test.info['chs']) # read in the data and assert montage is the same # regardless of 'm', 'cm', 'mm', or 'pixel' scalings = {'m': 1, 'cm': 100, 'mm': 1000} bids_fname.update(root=bids_root) coordsystem_fname = _find_matching_sidecar(bids_fname, suffix='coordsystem', extension='.json') electrodes_fname = _find_matching_sidecar(bids_fname, suffix='electrodes', extension='.tsv') orig_electrodes_dict = _from_tsv(electrodes_fname, [str, float, float, float, str]) # not BIDS specified should not be read coord_unit = 'km' scaling = 0.001 _update_sidecar(coordsystem_fname, 'iEEGCoordinateUnits', coord_unit) electrodes_dict = _from_tsv(electrodes_fname, [str, float, float, float, str]) for axis in ['x', 'y', 'z']: electrodes_dict[axis] = \ np.multiply(orig_electrodes_dict[axis], scaling) _to_tsv(electrodes_dict, electrodes_fname) with pytest.warns(RuntimeWarning, match='Coordinate unit is not ' 'an accepted BIDS unit'): raw_test = read_raw_bids(bids_path=bids_fname, verbose=False) # correct BIDS units should scale to meters properly for coord_unit, scaling in scalings.items(): # update coordinate SI units _update_sidecar(coordsystem_fname, 'iEEGCoordinateUnits', coord_unit) electrodes_dict = _from_tsv(electrodes_fname, [str, float, float, float, str]) for axis in ['x', 'y', 'z']: electrodes_dict[axis] = \ np.multiply(orig_electrodes_dict[axis], scaling) _to_tsv(electrodes_dict, electrodes_fname) # read in raw file w/ updated montage raw_test = read_raw_bids(bids_path=bids_fname, verbose=False) # obtain the sensor positions and make sure they're the same assert_dig_allclose(raw.info, raw_test.info) # XXX: Improve by changing names to 'unknown' coordframe (needs mne PR) # check that coordinate systems other coordinate systems should be named # in the file and not the CoordinateSystem, which is reserved for keywords coordinate_frames = ['lia', 'ria', 'lip', 'rip', 'las'] for coord_frame in coordinate_frames: # update coordinate units _update_sidecar(coordsystem_fname, 'iEEGCoordinateSystem', coord_frame) # read in raw file w/ updated coordinate frame # and make sure all digpoints are MRI coordinate frame with pytest.warns(RuntimeWarning, match="iEEG Coordinate frame is " "not accepted BIDS keyword"): raw_test = read_raw_bids(bids_path=bids_fname, verbose=False) assert raw_test.info['dig'] is None # ACPC should be read in as RAS for iEEG _update_sidecar(coordsystem_fname, 'iEEGCoordinateSystem', 'acpc') raw_test = read_raw_bids(bids_path=bids_fname, verbose=False) coord_frame_int = MNE_STR_TO_FRAME['ras'] for digpoint in raw_test.info['dig']: assert digpoint['coord_frame'] == coord_frame_int # if we delete the coordsystem.json file, an error will be raised os.remove(coordsystem_fname) with pytest.raises(RuntimeError, match='BIDS mandates that ' 'the coordsystem.json'): raw = read_raw_bids(bids_path=bids_fname, verbose=False) # test error message if electrodes don't match bids_path.update(root=bids_root) write_raw_bids(raw, bids_path, overwrite=True) electrodes_dict = _from_tsv(electrodes_fname) # pop off 5 channels for key in electrodes_dict.keys(): for i in range(5): electrodes_dict[key].pop() _to_tsv(electrodes_dict, electrodes_fname) with pytest.raises(RuntimeError, match='Channels do not correspond'): raw_test = read_raw_bids(bids_path=bids_fname, verbose=False) # make sure montage is set if there are coordinates w/ 'n/a' raw.info['bads'] = [] write_raw_bids(raw, bids_path, overwrite=True, verbose=False) electrodes_dict = _from_tsv(electrodes_fname) for axis in ['x', 'y', 'z']: electrodes_dict[axis][0] = 'n/a' electrodes_dict[axis][3] = 'n/a' _to_tsv(electrodes_dict, electrodes_fname) # test if montage is correctly set via mne-bids # electrode coordinates should be nan # when coordinate is 'n/a' nan_chs = [electrodes_dict['name'][i] for i in [0, 3]] with pytest.warns(RuntimeWarning, match='There are channels ' 'without locations'): raw = read_raw_bids(bids_path=bids_fname, verbose=False) for idx, ch in enumerate(raw.info['chs']): if ch['ch_name'] in nan_chs: assert all(np.isnan(ch['loc'][:3])) else: assert not any(np.isnan(ch['loc'][:3])) assert ch['ch_name'] not in raw.info['bads']
from mne.io.pick import _DATA_CH_TYPES_SPLIT from mne.preprocessing import maxwell_filter from mne.rank import _compute_rank_int from mne.utils import (requires_version, run_tests_if_main, catch_logging, assert_snr) base_dir = op.join(op.dirname(__file__), '..', 'io', 'tests', 'data') cov_fname = op.join(base_dir, 'test-cov.fif') cov_gz_fname = op.join(base_dir, 'test-cov.fif.gz') cov_km_fname = op.join(base_dir, 'test-km-cov.fif') raw_fname = op.join(base_dir, 'test_raw.fif') ave_fname = op.join(base_dir, 'test-ave.fif') erm_cov_fname = op.join(base_dir, 'test_erm-cov.fif') hp_fif_fname = op.join(base_dir, 'test_chpi_raw_sss.fif') ctf_fname = op.join(testing.data_path(download=False), 'CTF', 'testdata_ctf.ds') @pytest.mark.parametrize('proj', (True, False)) @pytest.mark.parametrize('pca', (True, 'white', False)) def test_compute_whitener(proj, pca): """Test properties of compute_whitener.""" raw = read_raw_fif(raw_fname).crop(0, 3).load_data() raw.pick_types(eeg=True, exclude=()) if proj: raw.apply_proj() else: raw.del_proj() with pytest.warns(RuntimeWarning, match='Too few samples'): cov = compute_raw_covariance(raw)
from mne import (read_source_spaces, pick_types, read_trans, read_cov, make_sphere_model, create_info, setup_volume_source_space) from mne.chpi import (_calculate_chpi_positions, get_chpi_positions, _get_hpi_info) from mne.tests.test_chpi import _compare_positions from mne.datasets import testing from mne.simulation import simulate_sparse_stc, simulate_raw from mne.io import Raw, RawArray from mne.time_frequency import compute_raw_psd from mne.utils import _TempDir, run_tests_if_main, requires_version, slow_test from mne.fixes import isclose warnings.simplefilter('always') data_path = testing.data_path(download=False) raw_fname = op.join(data_path, 'MEG', 'sample', 'sample_audvis_trunc_raw.fif') cov_fname = op.join(data_path, 'MEG', 'sample', 'sample_audvis_trunc-cov.fif') trans_fname = op.join(data_path, 'MEG', 'sample', 'sample_audvis_trunc-trans.fif') bem_path = op.join(data_path, 'subjects', 'sample', 'bem') src_fname = op.join(bem_path, 'sample-oct-2-src.fif') bem_fname = op.join(bem_path, 'sample-320-320-320-bem-sol.fif') raw_chpi_fname = op.join(data_path, 'SSS', 'test_move_anon_raw.fif') pos_fname = op.join(data_path, 'SSS', 'test_move_anon_raw_subsampled.pos') def _make_stc(raw, src): """Helper to make a STC""" seed = 42
# -*- coding: utf-8 -*- # Authors: Robert Luke <*****@*****.**> # simplified BSD-3 license import os.path as op import datetime from numpy.testing import assert_allclose import pytest from mne.datasets.testing import data_path, requires_testing_data from mne.utils import requires_h5py, object_diff from mne.io import read_raw_snirf, read_raw_nirx from mne_nirs.io.snirf import write_raw_snirf fname_nirx_15_0 = op.join(data_path(download=False), 'NIRx', 'nirscout', 'nirx_15_0_recording') fname_nirx_15_2 = op.join(data_path(download=False), 'NIRx', 'nirscout', 'nirx_15_2_recording') fname_nirx_15_2_short = op.join(data_path(download=False), 'NIRx', 'nirscout', 'nirx_15_2_recording_w_short') @requires_h5py @requires_testing_data @pytest.mark.parametrize( 'fname', (fname_nirx_15_2_short, fname_nirx_15_2, fname_nirx_15_0)) def test_snirf_write(fname, tmpdir): """Test reading NIRX files.""" raw_orig = read_raw_nirx(fname, preload=True) write_raw_snirf(raw_orig, tmpdir.join('test_raw.snirf')) raw = read_raw_snirf(tmpdir.join('test_raw.snirf'))
import os.path as op from numpy.testing import assert_allclose, assert_almost_equal import shutil import pytest from mne.datasets.testing import data_path, requires_testing_data from mne.utils import requires_h5py from mne.io import read_raw_snirf, read_raw_nirx from mne.io.tests.test_raw import _test_raw_reader from mne.preprocessing.nirs import (optical_density, beer_lambert_law, short_channels, source_detector_distances) from mne.transforms import apply_trans, _get_trans # SfNIRS files sfnirs_homer_103_wShort = op.join( data_path(download=False), 'SNIRF', 'SfNIRS', 'snirf_homer3', '1.0.3', 'snirf_1_3_nirx_15_2_' 'recording_w_short.snirf') sfnirs_homer_103_wShort_original = op.join(data_path(download=False), 'NIRx', 'nirscout', 'nirx_15_2_recording_w_short') sfnirs_homer_103_153 = op.join(data_path(download=False), 'SNIRF', 'SfNIRS', 'snirf_homer3', '1.0.3', 'nirx_15_3_recording.snirf') # NIRSport2 files nirx_nirsport2_103 = op.join(data_path(download=False), 'SNIRF', 'NIRx', 'NIRSport2', '1.0.3', '2021-04-23_005.snirf') nirx_nirsport2_103_2 = op.join(data_path(download=False), 'SNIRF', 'NIRx', 'NIRSport2', '1.0.3', '2021-05-05_001.snirf')
import pytest from numpy.testing import assert_allclose from scipy.io import loadmat from scipy import linalg from mne.channels import make_dig_montage from mne import (create_info, EvokedArray, pick_types, Epochs, find_events, read_epochs) from mne.io import read_raw_fif, RawArray from mne.io.constants import FIFF from mne.utils import object_diff from mne.datasets import testing from mne.preprocessing import compute_current_source_density data_path = op.join(testing.data_path(download=False), 'preprocessing') eeg_fname = op.join(data_path, 'test_eeg.mat') coords_fname = op.join(data_path, 'test_eeg_pos.mat') csd_fname = op.join(data_path, 'test_eeg_csd.mat') io_path = op.join(op.dirname(__file__), '..', '..', 'io', 'tests', 'data') raw_fname = op.join(io_path, 'test_raw.fif') @pytest.fixture(scope='function', params=[testing._pytest_param()]) def evoked_csd_sphere(): """Get the MATLAB EEG data.""" data = loadmat(eeg_fname)['data'] coords = loadmat(coords_fname)['coords'] * 1e-3 csd = loadmat(csd_fname)['csd'] sphere = np.array((0, 0, 0, 0.08500060886258405)) # meters
def test_handle_eeg_coords_reading(): """Test reading iEEG coordinates from BIDS files.""" bids_root = _TempDir() bids_path = BIDSPath(subject=subject_id, session=session_id, run=run, acquisition=acq, task=task, root=bids_root) data_path = op.join(testing.data_path(), 'EDF') raw_fname = op.join(data_path, 'test_reduced.edf') raw = _read_raw_edf(raw_fname) # ensure we are writing 'eeg' data raw.set_channel_types({ch: 'eeg' for ch in raw.ch_names}) # set a `random` montage ch_names = raw.ch_names elec_locs = np.random.random((len(ch_names), 3)).astype(float) ch_pos = dict(zip(ch_names, elec_locs)) # # create montage in 'unknown' coordinate frame # # and assert coordsystem/electrodes sidecar tsv don't exist montage = mne.channels.make_dig_montage(ch_pos=ch_pos, coord_frame="unknown") raw.set_montage(montage) with pytest.warns(RuntimeWarning, match="Skipping EEG electrodes.tsv"): write_raw_bids(raw, bids_path, overwrite=True) bids_path.update(root=bids_root) coordsystem_fname = _find_matching_sidecar(bids_path, suffix='coordsystem', extension='.json', on_error='warn') electrodes_fname = _find_matching_sidecar(bids_path, suffix='electrodes', extension='.tsv', on_error='warn') assert coordsystem_fname is None assert electrodes_fname is None # create montage in head frame and set should result in # warning if landmarks not set montage = mne.channels.make_dig_montage(ch_pos=ch_pos, coord_frame="head") raw.set_montage(montage) with pytest.warns(RuntimeWarning, match='Setting montage not possible ' 'if anatomical landmarks'): write_raw_bids(raw, bids_path, overwrite=True) montage = mne.channels.make_dig_montage(ch_pos=ch_pos, coord_frame="head", nasion=[1, 0, 0], lpa=[0, 1, 0], rpa=[0, 0, 1]) raw.set_montage(montage) write_raw_bids(raw, bids_path, overwrite=True) # obtain the sensor positions and assert ch_coords are same raw_test = read_raw_bids(bids_path, verbose=True) assert not object_diff(raw.info['chs'], raw_test.info['chs']) # modify coordinate frame to not-captrak coordsystem_fname = _find_matching_sidecar(bids_path, suffix='coordsystem', extension='.json') _update_sidecar(coordsystem_fname, 'EEGCoordinateSystem', 'besa') with pytest.warns(RuntimeWarning, match='EEG Coordinate frame is not ' 'accepted BIDS keyword'): raw_test = read_raw_bids(bids_path) assert raw_test.info['dig'] is None
and the trials are plotted, sorting by response time. """ # Authors: Jona Sassenhagen <*****@*****.**> # # License: BSD (3-clause) import mne from mne.datasets import testing from mne import Epochs, io, pick_types from mne.event import define_target_events print(__doc__) ############################################################################### # Load EEGLAB example data (a small EEG dataset) data_path = testing.data_path() fname = data_path + "/EEGLAB/test_raw.set" montage = data_path + "/EEGLAB/test_chans.locs" event_id = {"rt": 1, "square": 2} # must be specified for str events eog = {"FPz", "EOG1", "EOG2"} raw = io.eeglab.read_raw_eeglab(fname, eog=eog, montage=montage, event_id=event_id) picks = pick_types(raw.info, eeg=True) events = mne.find_events(raw) ############################################################################### # Create Epochs # define target events: # 1. find response times: distance between "square" and "rt" events
from nose.tools import assert_true, assert_equal, assert_raises import pytest from mne import Epochs, read_events, read_evokeds from mne.io import read_raw_fif from mne.datasets import testing from mne.report import Report from mne.utils import (_TempDir, requires_mayavi, requires_nibabel, requires_PIL, run_tests_if_main) from mne.viz import plot_alignment import matplotlib matplotlib.use('Agg') # for testing don't use X server data_dir = testing.data_path(download=False) subjects_dir = op.join(data_dir, 'subjects') report_dir = op.join(data_dir, 'MEG', 'sample') raw_fname = op.join(report_dir, 'sample_audvis_trunc_raw.fif') event_fname = op.join(report_dir, 'sample_audvis_trunc_raw-eve.fif') cov_fname = op.join(report_dir, 'sample_audvis_trunc-cov.fif') fwd_fname = op.join(report_dir, 'sample_audvis_trunc-meg-eeg-oct-6-fwd.fif') trans_fname = op.join(report_dir, 'sample_audvis_trunc-trans.fif') inv_fname = op.join(report_dir, 'sample_audvis_trunc-meg-eeg-oct-6-meg-inv.fif') mri_fname = op.join(subjects_dir, 'sample', 'mri', 'T1.mgz') base_dir = op.realpath(op.join(op.dirname(__file__), '..', 'io', 'tests', 'data')) evoked_fname = op.join(base_dir, 'test-ave.fif')
from mne.io.constants import FIFF from mne import pick_types from mne.utils import assert_dig_allclose, run_tests_if_main from mne.transforms import Transform, combine_transforms, invert_transform base_dir = op.join(op.abspath(op.dirname(__file__)), 'data') archs = 'linux', 'solaris' pdf_fnames = [op.join(base_dir, 'test_pdf_%s' % a) for a in archs] config_fnames = [op.join(base_dir, 'test_config_%s' % a) for a in archs] hs_fnames = [op.join(base_dir, 'test_hs_%s' % a) for a in archs] exported_fnames = [op.join(base_dir, 'exported4D_%s_raw.fif' % a) for a in archs] tmp_raw_fname = op.join(base_dir, 'tmp_raw.fif') fname_2500 = op.join(testing.data_path(download=False), 'BTi', 'erm_HFH', 'c,rfDC') fname_sim = op.join(testing.data_path(download=False), 'BTi', '4Dsim', 'c,rfDC') fname_sim_filt = op.join(testing.data_path(download=False), 'BTi', '4Dsim', 'c,rfDC,fn50,o') # the 4D exporter doesn't export all channels, so we confine our comparison NCH = 248 @testing.requires_testing_data def test_read_2500(): """Test reading data from 2500 system.""" _test_raw_reader(read_raw_bti, pdf_fname=fname_2500, head_shape_fname=None)
_ica_explained_variance) from mne.io import read_raw_fif, Info, RawArray, read_raw_ctf, read_raw_eeglab from mne.io.meas_info import _kind_dict from mne.io.pick import _DATA_CH_TYPES_SPLIT from mne.rank import _compute_rank_int from mne.utils import (catch_logging, _TempDir, requires_sklearn, run_tests_if_main) from mne.datasets import testing from mne.event import make_fixed_length_events data_dir = op.join(op.dirname(__file__), '..', '..', 'io', 'tests', 'data') raw_fname = op.join(data_dir, 'test_raw.fif') event_name = op.join(data_dir, 'test-eve.fif') test_cov_name = op.join(data_dir, 'test-cov.fif') test_base_dir = testing.data_path(download=False) ctf_fname = op.join(test_base_dir, 'CTF', 'testdata_ctf.ds') fif_fname = op.join(test_base_dir, 'MEG', 'sample', 'sample_audvis_trunc_raw.fif') eeglab_fname = op.join(test_base_dir, 'EEGLAB', 'test_raw.set') eeglab_montage = op.join(test_base_dir, 'EEGLAB', 'test_chans.locs') ctf_fname2 = op.join(test_base_dir, 'CTF', 'catch-alp-good-f.ds') event_id, tmin, tmax = 1, -0.2, 0.2 # if stop is too small pca may fail in some cases, but we're okay on this file start, stop = 0, 6 score_funcs_unsuited = ['pointbiserialr', 'ansari']
import numpy as np from numpy.testing import assert_array_equal, assert_allclose, assert_equal import pytest from scipy import io as sio from mne import find_events, pick_types from mne.io import read_raw_egi, read_evokeds_mff from mne.io.tests.test_raw import _test_raw_reader from mne.io.egi.egi import _combine_triggers from mne.utils import run_tests_if_main, requires_version, object_diff from mne.datasets.testing import data_path, requires_testing_data base_dir = op.join(op.dirname(op.abspath(__file__)), 'data') egi_fname = op.join(base_dir, 'test_egi.raw') egi_txt_fname = op.join(base_dir, 'test_egi.txt') egi_path = op.join(data_path(download=False), 'EGI') egi_mff_fname = op.join(egi_path, 'test_egi.mff') egi_mff_pns_fname = op.join(egi_path, 'test_egi_pns.mff') egi_pause_fname = op.join(egi_path, 'test_egi_multiepoch_paused.mff') egi_eprime_pause_fname = op.join(egi_path, 'test_egi_multiepoch_eprime.mff') egi_pause_w1337_fname = op.join(egi_path, 'w1337_20191014_105416.mff') egi_mff_evoked_fname = op.join(egi_path, 'test_egi_evoked.mff') egi_txt_evoked_cat1_fname = op.join(egi_path, 'test_egi_evoked_cat1.txt') egi_txt_evoked_cat2_fname = op.join(egi_path, 'test_egi_evoked_cat2.txt') # absolute event times from NetStation egi_pause_events = { 'AM40': [7.224, 11.928, 14.413, 16.848], 'bgin': [6.121, 8.434, 13.369, 15.815, 18.094], 'FIX+': [6.225, 10.929, 13.414, 15.849], 'ITI+': [8.293, 12.997, 15.482, 17.918]
mne_make_scalp_surfaces, mne_maxfilter, mne_report, mne_surf2bem, mne_watershed_bem, mne_compare_fiff, mne_flash_bem, mne_show_fiff, mne_show_info) from mne.datasets import testing, sample from mne.io import read_raw_fif from mne.utils import (run_tests_if_main, _TempDir, requires_mne, requires_mayavi, requires_tvtk, requires_freesurfer, traits_test, ArgvSetter) matplotlib.use('Agg') base_dir = op.join(op.dirname(__file__), '..', '..', 'io', 'tests', 'data') raw_fname = op.join(base_dir, 'test_raw.fif') subjects_dir = op.join(testing.data_path(download=False), 'subjects') def check_usage(module, force_help=False): """Ensure we print usage.""" args = ('--help',) if force_help else () with ArgvSetter(args) as out: try: module.run() except SystemExit: pass assert 'Usage: ' in out.stdout.getvalue() @pytest.mark.slowtest def test_browse_raw():
from mne import datasets, read_labels_from_annot, write_labels_to_annot from mne.datasets import (testing, fetch_infant_template, fetch_phantom, fetch_dataset) from mne.datasets._fsaverage.base import _set_montage_coreg_path from mne.datasets._infant import base as infant_base from mne.datasets._phantom import base as phantom_base from mne.datasets.utils import _manifest_check_download from mne.utils import (requires_good_network, get_subjects_dir, ArgvSetter, _pl, use_log_level, catch_logging, hashfunc) from mne.utils.check import _soft_import # import pooch library for handling the dataset downloading pooch = _soft_import('pooch', 'dataset downloading', strict=True) subjects_dir = op.join(testing.data_path(download=False), 'subjects') def test_datasets_basic(tmpdir, monkeypatch): """Test simple dataset functions.""" # XXX 'hf_sef' and 'misc' do not conform to these standards for dname in ('sample', 'somato', 'spm_face', 'testing', 'opm', 'bst_raw', 'bst_auditory', 'bst_resting', 'multimodal', 'bst_phantom_ctf', 'bst_phantom_elekta', 'kiloword', 'mtrf', 'phantom_4dbti', 'visual_92_categories', 'fieldtrip_cmc'): if dname.startswith('bst'): dataset = getattr(datasets.brainstorm, dname) else: dataset = getattr(datasets, dname) if dataset.data_path(download=False) != '': assert isinstance(dataset.get_version(), str)
def test_scale_mri_xfm(): """Test scale_mri transforms and MRI scaling.""" # scale fsaverage tempdir = _TempDir() os.environ['_MNE_FEW_SURFACES'] = 'true' fake_home = testing.data_path() # add fsaverage create_default_subject(subjects_dir=tempdir, fs_home=fake_home, verbose=True) # add sample (with few files) sample_dir = op.join(tempdir, 'sample') os.mkdir(sample_dir) os.mkdir(op.join(sample_dir, 'bem')) for dirname in ('mri', 'surf'): copytree(op.join(fake_home, 'subjects', 'sample', dirname), op.join(sample_dir, dirname)) subject_to = 'flachkopf' spacing = 'oct2' for subject_from in ('fsaverage', 'sample'): if subject_from == 'fsaverage': scale = 1. # single dim else: scale = [0.9, 2, .8] # separate src_from_fname = op.join(tempdir, subject_from, 'bem', '%s-%s-src.fif' % (subject_from, spacing)) src_from = mne.setup_source_space( subject_from, spacing, subjects_dir=tempdir, add_dist=False) write_source_spaces(src_from_fname, src_from) print(src_from_fname) vertices_from = np.concatenate([s['vertno'] for s in src_from]) assert len(vertices_from) == 36 hemis = ([0] * len(src_from[0]['vertno']) + [1] * len(src_from[0]['vertno'])) mni_from = mne.vertex_to_mni(vertices_from, hemis, subject_from, subjects_dir=tempdir) if subject_from == 'fsaverage': # identity transform source_rr = np.concatenate([s['rr'][s['vertno']] for s in src_from]) * 1e3 assert_allclose(mni_from, source_rr) if subject_from == 'fsaverage': overwrite = skip_fiducials = False else: with pytest.raises(IOError, match='No fiducials file'): scale_mri(subject_from, subject_to, scale, subjects_dir=tempdir) skip_fiducials = True with pytest.raises(IOError, match='already exists'): scale_mri(subject_from, subject_to, scale, subjects_dir=tempdir, skip_fiducials=skip_fiducials) overwrite = True scale_mri(subject_from, subject_to, scale, subjects_dir=tempdir, verbose='debug', overwrite=overwrite, skip_fiducials=skip_fiducials) if subject_from == 'fsaverage': assert _is_mri_subject(subject_to, tempdir), "Scaling failed" src_to_fname = op.join(tempdir, subject_to, 'bem', '%s-%s-src.fif' % (subject_to, spacing)) assert op.exists(src_to_fname), "Source space was not scaled" # Check MRI scaling fname_mri = op.join(tempdir, subject_to, 'mri', 'T1.mgz') assert op.exists(fname_mri), "MRI was not scaled" # Check MNI transform src = mne.read_source_spaces(src_to_fname) vertices = np.concatenate([s['vertno'] for s in src]) assert_array_equal(vertices, vertices_from) mni = mne.vertex_to_mni(vertices, hemis, subject_to, subjects_dir=tempdir) assert_allclose(mni, mni_from, atol=1e-3) # 0.001 mm del os.environ['_MNE_FEW_SURFACES']
# Authors: Robert Luke <*****@*****.**> # # License: BSD (3-clause) import os.path as op import pytest import numpy as np from mne.datasets.testing import data_path from mne.io import read_raw_nirx from mne.preprocessing.nirs import optical_density, tddr from mne.datasets import testing fname_nirx_15_2 = op.join(data_path(download=False), 'NIRx', 'nirx_15_2_recording') @testing.requires_testing_data @pytest.mark.parametrize('fname', ([fname_nirx_15_2])) def test_temporal_derivative_distribution_repair(fname, tmpdir): """Test running artifact rejection.""" raw = read_raw_nirx(fname) raw = optical_density(raw) # Add a baseline shift artifact about half way through data max_shift = np.max(np.diff(raw._data[0])) shift_amp = 5 * max_shift raw._data[0, 0:30] = raw._data[0, 0:30] - (shift_amp) assert np.max(np.diff(raw._data[0])) > shift_amp # Ensure that applying the algorithm reduces the step change
from mne.source_estimate import read_source_estimate, VolSourceEstimate from mne import (read_cov, read_forward_solution, read_evokeds, pick_types, pick_types_forward, make_forward_solution, EvokedArray, convert_forward_solution, Covariance, combine_evoked) from mne.io import read_raw_fif, Info from mne.minimum_norm.inverse import (apply_inverse, read_inverse_operator, apply_inverse_raw, apply_inverse_epochs, make_inverse_operator, write_inverse_operator, compute_rank_inverse, prepare_inverse_operator) from mne.tests.common import assert_naming from mne.utils import _TempDir, run_tests_if_main from mne.externals import six test_path = testing.data_path(download=False) s_path = op.join(test_path, 'MEG', 'sample') fname_fwd = op.join(s_path, 'sample_audvis_trunc-meg-eeg-oct-4-fwd.fif') # Four inverses: fname_full = op.join(s_path, 'sample_audvis_trunc-meg-eeg-oct-6-meg-inv.fif') fname_inv = op.join(s_path, 'sample_audvis_trunc-meg-eeg-oct-4-meg-inv.fif') fname_inv_fixed_nodepth = op.join(s_path, 'sample_audvis_trunc-meg-eeg-oct-4-meg' '-nodepth-fixed-inv.fif') fname_inv_fixed_depth = op.join(s_path, 'sample_audvis_trunc-meg-eeg-oct-4-meg' '-fixed-inv.fif') fname_inv_meeg_diag = op.join(s_path, 'sample_audvis_trunc-' 'meg-eeg-oct-4-meg-eeg-diagnoise-inv.fif')
_needs_eeg_average_ref_proj) from mne.proj import (read_proj, write_proj, make_eeg_average_ref_proj, _has_eeg_average_ref_proj) from mne.tests.common import assert_naming from mne.utils import _TempDir, run_tests_if_main, slow_test warnings.simplefilter('always') # enable b/c these tests throw warnings base_dir = op.join(op.dirname(__file__), '..', 'io', 'tests', 'data') raw_fname = op.join(base_dir, 'test_raw.fif') event_fname = op.join(base_dir, 'test-eve.fif') proj_fname = op.join(base_dir, 'test-proj.fif') proj_gz_fname = op.join(base_dir, 'test-proj.fif.gz') bads_fname = op.join(base_dir, 'test_bads.txt') sample_path = op.join(testing.data_path(download=False), 'MEG', 'sample') fwd_fname = op.join(sample_path, 'sample_audvis_trunc-meg-eeg-oct-4-fwd.fif') sensmap_fname = op.join(sample_path, 'sample_audvis_trunc-%s-oct-4-fwd-sensmap-%s.w') eog_fname = op.join(sample_path, 'sample_audvis_eog-proj.fif') ecg_fname = op.join(sample_path, 'sample_audvis_ecg-proj.fif') def test_bad_proj(): """Test dealing with bad projection application.""" raw = read_raw_fif(raw_fname, preload=True) events = read_events(event_fname) picks = pick_types(raw.info, meg=True, stim=False, ecg=False, eog=False, exclude='bads') picks = picks[2:9:3]