def test_maxfilter_get_rank(n_proj, fname, rank_orig, meg): """Test maxfilter rank lookup.""" raw = read_raw_fif(fname).crop(0, 5).load_data().pick_types() assert raw.info['projs'] == [] mf = raw.info['proc_history'][0]['max_info'] assert mf['sss_info']['nfree'] == rank_orig assert _get_rank_sss(raw) == rank_orig mult = 1 + (meg == 'separate') rank = rank_orig - mult * n_proj if n_proj > 0: # Let's do some projection raw.add_proj( compute_proj_raw(raw, n_mag=n_proj, n_grad=n_proj, meg=meg, verbose=True)) raw.apply_proj() data_orig = raw[:][0] # degenerate cases with pytest.raises(ValueError, match='tol must be'): _estimate_rank_raw(raw, tol='foo') with pytest.raises(TypeError, match='must be a string or a number'): _estimate_rank_raw(raw, tol=None) allowed_rank = [rank_orig if meg == 'separate' else rank] if fname == mf_fif_fname: # Here we permit a -1 because for mf_fif_fname we miss by 1, which is # probably acceptable. If we use the entire duration instead of 5 sec # this problem goes away, but the test is much slower. allowed_rank.append(allowed_rank[0] - 1) # multiple ways of hopefully getting the same thing # default tol=1e-4, scalings='norm' rank_new = _estimate_rank_raw(raw) assert rank_new in allowed_rank tol = 'float32' # temporary option until we can fix things rank_new = _estimate_rank_raw(raw, tol=tol) assert rank_new in allowed_rank rank_new = _estimate_rank_raw(raw, scalings=dict(), tol=tol) assert rank_new in allowed_rank scalings = dict(grad=1e13, mag=1e15) rank_new = _compute_rank_int(raw, None, scalings=scalings, tol=tol, verbose='debug') assert rank_new in allowed_rank # XXX default scalings mis-estimate sometimes :( if fname == hp_fif_fname: allowed_rank.append(allowed_rank[0] - 2) rank_new = _compute_rank_int(raw, None, tol=tol, verbose='debug') assert rank_new in allowed_rank del allowed_rank rank_new = _compute_rank_int(raw, 'info') assert rank_new == rank assert_array_equal(raw[:][0], data_orig)
def test_ica_rank_reduction(method): """Test recovery ICA rank reduction.""" _skip_check_picard(method) # Most basic recovery raw = read_raw_fif(raw_fname).crop(0.5, stop).load_data() picks = pick_types(raw.info, meg=True, stim=False, ecg=False, eog=False, exclude='bads')[:10] n_components = 5 max_pca_components = len(picks) for n_pca_components in [6, 10]: with pytest.warns(UserWarning, match='did not converge'): ica = ICA(n_components=n_components, max_pca_components=max_pca_components, n_pca_components=n_pca_components, method=method, max_iter=1).fit(raw, picks=picks) rank_before = _compute_rank_int(raw.copy().pick(picks), proj=False) assert_equal(rank_before, len(picks)) raw_clean = ica.apply(raw.copy()) rank_after = _compute_rank_int(raw_clean.copy().pick(picks), proj=False) # interaction between ICA rejection and PCA components difficult # to preduct. Rank_after often seems to be 1 higher then # n_pca_components assert (n_components < n_pca_components <= rank_after <= rank_before)
def test_maxfilter_get_rank(n_proj, fname, rank_orig, meg): """Test maxfilter rank lookup.""" raw = read_raw_fif(fname).crop(0, 5).load_data().pick_types() assert raw.info['projs'] == [] mf = raw.info['proc_history'][0]['max_info'] assert mf['sss_info']['nfree'] == rank_orig assert _get_rank_sss(raw) == rank_orig mult = 1 + (meg == 'separate') rank = rank_orig - mult * n_proj if n_proj > 0: # Let's do some projection raw.add_proj(compute_proj_raw(raw, n_mag=n_proj, n_grad=n_proj, meg=meg, verbose=True)) raw.apply_proj() data_orig = raw[:][0] # degenerate cases with pytest.raises(ValueError, match='tol must be'): _estimate_rank_raw(raw, tol='foo') with pytest.raises(TypeError, match='must be a string or a number'): _estimate_rank_raw(raw, tol=None) allowed_rank = [rank_orig if meg == 'separate' else rank] if fname == mf_fif_fname: # Here we permit a -1 because for mf_fif_fname we miss by 1, which is # probably acceptable. If we use the entire duration instead of 5 sec # this problem goes away, but the test is much slower. allowed_rank.append(allowed_rank[0] - 1) # multiple ways of hopefully getting the same thing # default tol=1e-4, scalings='norm' rank_new = _estimate_rank_raw(raw) assert rank_new in allowed_rank tol = 'float32' # temporary option until we can fix things rank_new = _estimate_rank_raw(raw, tol=tol) assert rank_new in allowed_rank rank_new = _estimate_rank_raw(raw, scalings=dict(), tol=tol) assert rank_new in allowed_rank scalings = dict(grad=1e13, mag=1e15) rank_new = _compute_rank_int(raw, None, scalings=scalings, tol=tol, verbose='debug') assert rank_new in allowed_rank # XXX default scalings mis-estimate sometimes :( if fname == hp_fif_fname: allowed_rank.append(allowed_rank[0] - 2) rank_new = _compute_rank_int(raw, None, tol=tol, verbose='debug') assert rank_new in allowed_rank del allowed_rank rank_new = _compute_rank_int(raw, 'info') assert rank_new == rank assert_array_equal(raw[:][0], data_orig)
def test_spatiotemporal_only(): """Test tSSS-only processing.""" # Load raw testing data tmax = 0.5 raw = read_crop(raw_fname, (0, tmax)).load_data() picks = pick_types(raw.info, meg=True, exclude='bads')[::2] raw.pick_channels([raw.ch_names[pick] for pick in picks]) mag_picks = pick_types(raw.info, meg='mag', exclude=()) power = np.sqrt(np.sum(raw[mag_picks][0] ** 2)) # basics raw_tsss = maxwell_filter(raw, st_duration=tmax / 2., st_only=True) assert len(raw.info['projs']) == len(raw_tsss.info['projs']) assert _compute_rank_int(raw_tsss, proj=False) == len(picks) _assert_shielding(raw_tsss, power, 9) # with movement head_pos = read_head_pos(pos_fname) raw_tsss = maxwell_filter(raw, st_duration=tmax / 2., st_only=True, head_pos=head_pos) assert _compute_rank_int(raw_tsss, proj=False) == len(picks) _assert_shielding(raw_tsss, power, 9) with pytest.warns(RuntimeWarning, match='st_fixed'): raw_tsss = maxwell_filter(raw, st_duration=tmax / 2., st_only=True, head_pos=head_pos, st_fixed=False) assert _compute_rank_int(raw_tsss, proj=False) == len(picks) _assert_shielding(raw_tsss, power, 9) # should do nothing raw_tsss = maxwell_filter(raw, st_duration=tmax, st_correlation=1., st_only=True) assert_allclose(raw[:][0], raw_tsss[:][0]) # degenerate pytest.raises(ValueError, maxwell_filter, raw, st_only=True) # no ST # two-step process equivalent to single-step process raw_tsss = maxwell_filter(raw, st_duration=tmax, st_only=True) raw_tsss = maxwell_filter(raw_tsss) raw_tsss_2 = maxwell_filter(raw, st_duration=tmax) assert_meg_snr(raw_tsss, raw_tsss_2, 1e5) # now also with head movement, and a bad MEG channel assert len(raw.info['bads']) == 0 bads = [raw.ch_names[0]] raw.info['bads'] = list(bads) raw_tsss = maxwell_filter(raw, st_duration=tmax, st_only=True, head_pos=head_pos) assert raw.info['bads'] == bads assert raw_tsss.info['bads'] == bads # don't reset raw_tsss = maxwell_filter(raw_tsss, head_pos=head_pos) assert raw_tsss.info['bads'] == [] # do reset MEG bads raw_tsss_2 = maxwell_filter(raw, st_duration=tmax, head_pos=head_pos) assert raw_tsss_2.info['bads'] == [] assert_meg_snr(raw_tsss, raw_tsss_2, 1e5)
def test_sss_proj(): """Test `meg` proj option.""" raw = read_raw_fif(raw_fname) raw.crop(0, 1.0).load_data().pick_types(meg=True, exclude=()) raw.pick_channels(raw.ch_names[:51]).del_proj() raw_sss = maxwell_filter(raw, int_order=5, ext_order=2) sss_rank = 21 # really low due to channel picking assert len(raw_sss.info['projs']) == 0 for meg, n_proj, want_rank in (('separate', 6, sss_rank), ('combined', 3, sss_rank - 3)): proj = compute_proj_raw(raw_sss, n_grad=3, n_mag=3, meg=meg, verbose='error') this_raw = raw_sss.copy().add_proj(proj).apply_proj() assert len(this_raw.info['projs']) == n_proj sss_proj_rank = _compute_rank_int(this_raw) cov = compute_raw_covariance(this_raw, verbose='error') W, ch_names, rank = compute_whitener(cov, this_raw.info, return_rank=True) assert ch_names == this_raw.ch_names assert want_rank == sss_proj_rank == rank # proper reduction if meg == 'combined': assert this_raw.info['projs'][0]['data']['col_names'] == ch_names else: mag_names = ch_names[2::3] assert this_raw.info['projs'][3]['data']['col_names'] == mag_names
def test_sss_proj(): """Test `meg` proj option.""" raw = read_raw_fif(raw_fname) raw.crop(0, 1.0).load_data().pick_types(exclude=()) raw.pick_channels(raw.ch_names[:51]).del_proj() with pytest.raises(ValueError, match='can only be used with Maxfiltered'): compute_proj_raw(raw, meg='combined') raw_sss = maxwell_filter(raw, int_order=5, ext_order=2) sss_rank = 21 # really low due to channel picking assert len(raw_sss.info['projs']) == 0 for meg, n_proj, want_rank in (('separate', 6, sss_rank), ('combined', 3, sss_rank - 3)): proj = compute_proj_raw(raw_sss, n_grad=3, n_mag=3, meg=meg, verbose='error') this_raw = raw_sss.copy().add_proj(proj).apply_proj() assert len(this_raw.info['projs']) == n_proj sss_proj_rank = _compute_rank_int(this_raw) cov = compute_raw_covariance(this_raw, verbose='error') W, ch_names, rank = compute_whitener(cov, this_raw.info, return_rank=True) assert ch_names == this_raw.ch_names assert want_rank == sss_proj_rank == rank # proper reduction if meg == 'combined': assert this_raw.info['projs'][0]['data']['col_names'] == ch_names else: mag_names = ch_names[2::3] assert this_raw.info['projs'][3]['data']['col_names'] == mag_names
def test_maxwell_filter_additional(tmpdir): """Test processing of Maxwell filtered data.""" # TODO: Future tests integrate with mne/io/tests/test_proc_history # Load testing data (raw, SSS std origin, SSS non-standard origin) data_path = op.join(testing.data_path(download=False)) file_name = 'test_move_anon' raw_fname = op.join(data_path, 'SSS', file_name + '_raw.fif') # Use 2.0 seconds of data to get stable cov. estimate raw = read_crop(raw_fname, (0., 2.)) # Get MEG channels, compute Maxwell filtered data raw.load_data() raw.pick_types(meg=True, eeg=False) int_order = 8 raw_sss = maxwell_filter(raw, origin=mf_head_origin, regularize=None, bad_condition='ignore') # Test io on processed data tempdir = str(tmpdir) test_outname = op.join(tempdir, 'test_raw_sss.fif') raw_sss.save(test_outname) raw_sss_loaded = read_crop(test_outname).load_data() # Some numerical imprecision since save uses 'single' fmt assert_allclose(raw_sss_loaded[:][0], raw_sss[:][0], rtol=1e-6, atol=1e-20) # Test rank of covariance matrices for raw and SSS processed data cov_raw = compute_raw_covariance(raw) cov_sss = compute_raw_covariance(raw_sss) scalings = None cov_raw_rank = _compute_rank_int( cov_raw, scalings=scalings, info=raw.info, proj=False) cov_sss_rank = _compute_rank_int( cov_sss, scalings=scalings, info=raw_sss.info, proj=False) assert cov_raw_rank == raw.info['nchan'] assert cov_sss_rank == _get_n_moments(int_order)
def test_spatiotemporal(): """Test Maxwell filter (tSSS) spatiotemporal processing.""" # Load raw testing data raw = read_crop(raw_fname) # Test that window is less than length of data with pytest.raises(ValueError, match='duration'): maxwell_filter(raw, st_duration=1000.) # We could check both 4 and 10 seconds because Elekta handles them # differently (to ensure that std/non-std tSSS windows are correctly # handled), but the 4-sec case should hopefully be sufficient. st_durations = [4.] # , 10.] tols = [(80, 100)] # , 200.] kwargs = dict(origin=mf_head_origin, regularize=None, bad_condition='ignore') for st_duration, tol in zip(st_durations, tols): # Load tSSS data depending on st_duration and get data tSSS_fname = op.join(sss_path, 'test_move_anon_st%0ds_raw_sss.fif' % st_duration) tsss_bench = read_crop(tSSS_fname) # Because Elekta's tSSS sometimes(!) lumps the tail window of data # onto the previous buffer if it's shorter than st_duration, we have to # crop the data here to compensate for Elekta's tSSS behavior. # if st_duration == 10.: # tsss_bench.crop(0, st_duration) # raw.crop(0, st_duration) # Test sss computation at the standard head origin. Same cropping issue # as mentioned above. raw_tsss = maxwell_filter( raw, st_duration=st_duration, **kwargs) assert _compute_rank_int(raw_tsss, proj=False) == 140 assert_meg_snr(raw_tsss, tsss_bench, *tol) py_st = raw_tsss.info['proc_history'][0]['max_info']['max_st'] assert (len(py_st) > 0) assert py_st['buflen'] == st_duration assert py_st['subspcorr'] == 0.98 # Degenerate cases pytest.raises(ValueError, maxwell_filter, raw, st_duration=10., st_correlation=0.)
def test_low_rank_cov(raw_epochs_events): """Test additional properties of low rank computations.""" raw, epochs, events = raw_epochs_events sss_proj_rank = 139 # 80 MEG + 60 EEG - 1 proj n_ch = 366 proj_rank = 365 # one EEG proj with pytest.warns(RuntimeWarning, match='Too few samples'): emp_cov = compute_covariance(epochs) # Test equivalence with mne.cov.regularize subspace with pytest.raises(ValueError, match='are dependent.*must equal'): regularize(emp_cov, epochs.info, rank=None, mag=0.1, grad=0.2) assert _cov_rank(emp_cov, epochs.info) == sss_proj_rank reg_cov = regularize(emp_cov, epochs.info, proj=True, rank='full') assert _cov_rank(reg_cov, epochs.info) == proj_rank with pytest.warns(RuntimeWarning, match='exceeds the theoretical'): _compute_rank_int(reg_cov, info=epochs.info) del reg_cov with catch_logging() as log: reg_r_cov = regularize(emp_cov, epochs.info, proj=True, rank=None, verbose=True) log = log.getvalue() assert 'jointly' in log assert _cov_rank(reg_r_cov, epochs.info) == sss_proj_rank reg_r_only_cov = regularize(emp_cov, epochs.info, proj=False, rank=None) assert _cov_rank(reg_r_only_cov, epochs.info) == sss_proj_rank assert_allclose(reg_r_only_cov['data'], reg_r_cov['data']) del reg_r_only_cov, reg_r_cov # test that rank=306 is same as rank='full' epochs_meg = epochs.copy().pick_types(meg=True) assert len(epochs_meg.ch_names) == 306 epochs_meg.info.update(bads=[], projs=[]) cov_full = compute_covariance(epochs_meg, method='oas', rank='full', verbose='error') assert _cov_rank(cov_full, epochs_meg.info) == 306 with pytest.warns(RuntimeWarning, match='few samples'): cov_dict = compute_covariance(epochs_meg, method='oas', rank=dict(meg=306)) assert _cov_rank(cov_dict, epochs_meg.info) == 306 assert_allclose(cov_full['data'], cov_dict['data']) cov_dict = compute_covariance(epochs_meg, method='oas', rank=dict(meg=306), verbose='error') assert _cov_rank(cov_dict, epochs_meg.info) == 306 assert_allclose(cov_full['data'], cov_dict['data']) # Work with just EEG data to simplify projection / rank reduction raw = raw.copy().pick_types(meg=False, eeg=True) n_proj = 2 raw.add_proj(compute_proj_raw(raw, n_eeg=n_proj)) n_ch = len(raw.ch_names) rank = n_ch - n_proj - 1 # plus avg proj assert len(raw.info['projs']) == 3 epochs = Epochs(raw, events, tmin=-0.2, tmax=0, preload=True) assert len(raw.ch_names) == n_ch emp_cov = compute_covariance(epochs, rank='full', verbose='error') assert _cov_rank(emp_cov, epochs.info) == rank reg_cov = regularize(emp_cov, epochs.info, proj=True, rank='full') assert _cov_rank(reg_cov, epochs.info) == rank reg_r_cov = regularize(emp_cov, epochs.info, proj=False, rank=None) assert _cov_rank(reg_r_cov, epochs.info) == rank dia_cov = compute_covariance(epochs, rank=None, method='diagonal_fixed', verbose='error') assert _cov_rank(dia_cov, epochs.info) == rank assert_allclose(dia_cov['data'], reg_cov['data']) epochs.pick_channels(epochs.ch_names[:103]) # degenerate with pytest.raises(ValueError, match='can.*only be used with rank="full"'): compute_covariance(epochs, rank=None, method='pca') with pytest.raises(ValueError, match='can.*only be used with rank="full"'): compute_covariance(epochs, rank=None, method='factor_analysis')
def _cov_rank(cov, info, proj=True): # ignore warnings about rank mismatches: sometimes we will intentionally # violate the computed/info assumption, such as when using SSS with # `rank='full'` with pytest.warns(None): return _compute_rank_int(cov, info=info, proj=proj)
def test_low_rank_cov(raw_epochs_events): """Test additional properties of low rank computations.""" raw, epochs, events = raw_epochs_events sss_proj_rank = 139 # 80 MEG + 60 EEG - 1 proj n_ch = 366 proj_rank = 365 # one EEG proj with pytest.warns(RuntimeWarning, match='Too few samples'): emp_cov = compute_covariance(epochs) # Test equivalence with mne.cov.regularize subspace with pytest.raises(ValueError, match='are dependent.*must equal'): regularize(emp_cov, epochs.info, rank=None, mag=0.1, grad=0.2) assert _cov_rank(emp_cov, epochs.info) == sss_proj_rank reg_cov = regularize(emp_cov, epochs.info, proj=True, rank='full') assert _cov_rank(reg_cov, epochs.info) == proj_rank with pytest.warns(RuntimeWarning, match='exceeds the theoretical'): _compute_rank_int(reg_cov, info=epochs.info) del reg_cov with catch_logging() as log: reg_r_cov = regularize(emp_cov, epochs.info, proj=True, rank=None, verbose=True) log = log.getvalue() assert 'jointly' in log assert _cov_rank(reg_r_cov, epochs.info) == sss_proj_rank reg_r_only_cov = regularize(emp_cov, epochs.info, proj=False, rank=None) assert _cov_rank(reg_r_only_cov, epochs.info) == sss_proj_rank assert_allclose(reg_r_only_cov['data'], reg_r_cov['data']) del reg_r_only_cov, reg_r_cov # test that rank=306 is same as rank='full' epochs_meg = epochs.copy().pick_types() assert len(epochs_meg.ch_names) == 306 epochs_meg.info.update(bads=[], projs=[]) cov_full = compute_covariance(epochs_meg, method='oas', rank='full', verbose='error') assert _cov_rank(cov_full, epochs_meg.info) == 306 with pytest.deprecated_call(match='int is deprecated'): cov_dict = compute_covariance(epochs_meg, method='oas', rank=306) assert _cov_rank(cov_dict, epochs_meg.info) == 306 assert_allclose(cov_full['data'], cov_dict['data']) cov_dict = compute_covariance(epochs_meg, method='oas', rank=dict(meg=306), verbose='error') assert _cov_rank(cov_dict, epochs_meg.info) == 306 assert_allclose(cov_full['data'], cov_dict['data']) # Work with just EEG data to simplify projection / rank reduction raw = raw.copy().pick_types(meg=False, eeg=True) n_proj = 2 raw.add_proj(compute_proj_raw(raw, n_eeg=n_proj)) n_ch = len(raw.ch_names) rank = n_ch - n_proj - 1 # plus avg proj assert len(raw.info['projs']) == 3 epochs = Epochs(raw, events, tmin=-0.2, tmax=0, preload=True) assert len(raw.ch_names) == n_ch emp_cov = compute_covariance(epochs, rank='full', verbose='error') assert _cov_rank(emp_cov, epochs.info) == rank reg_cov = regularize(emp_cov, epochs.info, proj=True, rank='full') assert _cov_rank(reg_cov, epochs.info) == rank reg_r_cov = regularize(emp_cov, epochs.info, proj=False, rank=None) assert _cov_rank(reg_r_cov, epochs.info) == rank dia_cov = compute_covariance(epochs, rank=None, method='diagonal_fixed', verbose='error') assert _cov_rank(dia_cov, epochs.info) == rank assert_allclose(dia_cov['data'], reg_cov['data']) # test our deprecation: can simply remove later epochs.pick_channels(epochs.ch_names[:103]) # degenerate with pytest.raises(ValueError, match='can.*only be used with rank="full"'): compute_covariance(epochs, rank=None, method='pca') with pytest.raises(ValueError, match='can.*only be used with rank="full"'): compute_covariance(epochs, rank=None, method='factor_analysis')