def test_maxfilter_get_rank(n_proj, fname, rank_orig, meg): """Test maxfilter rank lookup.""" raw = read_raw_fif(fname).crop(0, 5).load_data().pick_types() assert raw.info['projs'] == [] mf = raw.info['proc_history'][0]['max_info'] assert mf['sss_info']['nfree'] == rank_orig assert _get_rank_sss(raw) == rank_orig mult = 1 + (meg == 'separate') rank = rank_orig - mult * n_proj if n_proj > 0: # Let's do some projection raw.add_proj( compute_proj_raw(raw, n_mag=n_proj, n_grad=n_proj, meg=meg, verbose=True)) raw.apply_proj() data_orig = raw[:][0] # degenerate cases with pytest.raises(ValueError, match='tol must be'): _estimate_rank_raw(raw, tol='foo') with pytest.raises(TypeError, match='must be a string or a number'): _estimate_rank_raw(raw, tol=None) allowed_rank = [rank_orig if meg == 'separate' else rank] if fname == mf_fif_fname: # Here we permit a -1 because for mf_fif_fname we miss by 1, which is # probably acceptable. If we use the entire duration instead of 5 sec # this problem goes away, but the test is much slower. allowed_rank.append(allowed_rank[0] - 1) # multiple ways of hopefully getting the same thing # default tol=1e-4, scalings='norm' rank_new = _estimate_rank_raw(raw) assert rank_new in allowed_rank tol = 'float32' # temporary option until we can fix things rank_new = _estimate_rank_raw(raw, tol=tol) assert rank_new in allowed_rank rank_new = _estimate_rank_raw(raw, scalings=dict(), tol=tol) assert rank_new in allowed_rank scalings = dict(grad=1e13, mag=1e15) rank_new = _compute_rank_int(raw, None, scalings=scalings, tol=tol, verbose='debug') assert rank_new in allowed_rank # XXX default scalings mis-estimate sometimes :( if fname == hp_fif_fname: allowed_rank.append(allowed_rank[0] - 2) rank_new = _compute_rank_int(raw, None, tol=tol, verbose='debug') assert rank_new in allowed_rank del allowed_rank rank_new = _compute_rank_int(raw, 'info') assert rank_new == rank assert_array_equal(raw[:][0], data_orig)
def test_raw_rank_estimation(fname, ref_meg, scalings): """Test raw rank estimation.""" if ref_meg and scalings != 'norm': # Adjust for CTF data (scale factors are quite different) scalings = dict(mag=1e31, grad=1e11) raw = read_raw_fif(fname) raw.crop(0, min(4., raw.times[-1])).load_data() out = _picks_by_type(raw.info, ref_meg=ref_meg, meg_combined=True) has_eeg = 'eeg' in raw if has_eeg: (_, picks_meg), (_, picks_eeg) = out else: (_, picks_meg), = out picks_eeg = [] n_meg = len(picks_meg) n_eeg = len(picks_eeg) if len(raw.info['proc_history']) == 0: expected_rank = n_meg + n_eeg else: expected_rank = _get_rank_sss(raw.info) + n_eeg got_rank = _estimate_rank_raw(raw, scalings=scalings, with_ref_meg=ref_meg) assert got_rank == expected_rank if 'sss' in fname: raw.add_proj(compute_proj_raw(raw)) raw.apply_proj() n_proj = len(raw.info['projs']) want_rank = expected_rank - (0 if 'sss' in fname else n_proj) got_rank = _estimate_rank_raw(raw, scalings=scalings, with_ref_meg=ref_meg) assert got_rank == want_rank
def test_raw_rank_estimation(fname, ref_meg, scalings): """Test raw rank estimation.""" if ref_meg and scalings != 'norm': # Adjust for CTF data (scale factors are quite different) scalings = dict(mag=1e31, grad=1e11) raw = read_raw_fif(fname) raw.crop(0, min(4., raw.times[-1])).load_data() out = _picks_by_type(raw.info, ref_meg=ref_meg, meg_combined=True) has_eeg = 'eeg' in raw if has_eeg: (_, picks_meg), (_, picks_eeg) = out else: (_, picks_meg), = out picks_eeg = [] n_meg = len(picks_meg) n_eeg = len(picks_eeg) if len(raw.info['proc_history']) == 0: expected_rank = n_meg + n_eeg else: expected_rank = _get_rank_sss(raw.info) + n_eeg got_rank = _estimate_rank_raw(raw, scalings=scalings, with_ref_meg=ref_meg) assert got_rank == expected_rank if has_eeg: with pytest.deprecated_call(): assert raw.estimate_rank(picks=picks_eeg, scalings=scalings) == n_eeg if 'sss' in fname: raw.add_proj(compute_proj_raw(raw)) raw.apply_proj() n_proj = len(raw.info['projs']) want_rank = expected_rank - (0 if 'sss' in fname else n_proj) got_rank = _estimate_rank_raw(raw, scalings=scalings, with_ref_meg=ref_meg) assert got_rank == want_rank
def test_rank_estimation(): """Test raw rank estimation.""" iter_tests = itt.product( [fif_fname, hp_fif_fname], # sss ['norm', dict(mag=1e11, grad=1e9, eeg=1e5)]) for fname, scalings in iter_tests: raw = read_raw_fif(fname).crop(0, 4.).load_data() (_, picks_meg), (_, picks_eeg) = _picks_by_type(raw.info, meg_combined=True) n_meg = len(picks_meg) n_eeg = len(picks_eeg) if len(raw.info['proc_history']) == 0: expected_rank = n_meg + n_eeg else: expected_rank = _get_rank_sss(raw.info) + n_eeg assert _estimate_rank_raw(raw, scalings=scalings) == expected_rank with pytest.deprecated_call(): assert raw.estimate_rank(picks=picks_eeg, scalings=scalings) == n_eeg if 'sss' in fname: raw.add_proj(compute_proj_raw(raw)) raw.apply_proj() n_proj = len(raw.info['projs']) want_rank = expected_rank - (0 if 'sss' in fname else n_proj) assert _estimate_rank_raw(raw, scalings=scalings) == want_rank
def test_maxfilter_get_rank(): """Test maxfilter rank lookup.""" raw = read_raw_fif(hp_fif_fname) mf = raw.info['proc_history'][0]['max_info'] rank1 = mf['sss_info']['nfree'] rank2 = _get_rank_sss(raw) assert rank1 == rank2
def test_maxfilter_get_rank(n_proj, fname, rank_orig, meg): """Test maxfilter rank lookup.""" raw = read_raw_fif(fname).crop(0, 5).load_data().pick_types() assert raw.info['projs'] == [] mf = raw.info['proc_history'][0]['max_info'] assert mf['sss_info']['nfree'] == rank_orig assert _get_rank_sss(raw) == rank_orig mult = 1 + (meg == 'separate') rank = rank_orig - mult * n_proj if n_proj > 0: # Let's do some projection raw.add_proj(compute_proj_raw(raw, n_mag=n_proj, n_grad=n_proj, meg=meg, verbose=True)) raw.apply_proj() data_orig = raw[:][0] # degenerate cases with pytest.raises(ValueError, match='tol must be'): _estimate_rank_raw(raw, tol='foo') with pytest.raises(TypeError, match='must be a string or a number'): _estimate_rank_raw(raw, tol=None) allowed_rank = [rank_orig if meg == 'separate' else rank] if fname == mf_fif_fname: # Here we permit a -1 because for mf_fif_fname we miss by 1, which is # probably acceptable. If we use the entire duration instead of 5 sec # this problem goes away, but the test is much slower. allowed_rank.append(allowed_rank[0] - 1) # multiple ways of hopefully getting the same thing # default tol=1e-4, scalings='norm' rank_new = _estimate_rank_raw(raw) assert rank_new in allowed_rank tol = 'float32' # temporary option until we can fix things rank_new = _estimate_rank_raw(raw, tol=tol) assert rank_new in allowed_rank rank_new = _estimate_rank_raw(raw, scalings=dict(), tol=tol) assert rank_new in allowed_rank scalings = dict(grad=1e13, mag=1e15) rank_new = _compute_rank_int(raw, None, scalings=scalings, tol=tol, verbose='debug') assert rank_new in allowed_rank # XXX default scalings mis-estimate sometimes :( if fname == hp_fif_fname: allowed_rank.append(allowed_rank[0] - 2) rank_new = _compute_rank_int(raw, None, tol=tol, verbose='debug') assert rank_new in allowed_rank del allowed_rank rank_new = _compute_rank_int(raw, 'info') assert rank_new == rank assert_array_equal(raw[:][0], data_orig)
def test_cov_rank_estimation(rank_method, proj, meg): """Test cov rank estimation.""" # Test that our rank estimation works properly on a simple case evoked = read_evokeds(ave_fname, condition=0, baseline=(None, 0), proj=False) cov = read_cov(cov_fname) ch_names = [ ch for ch in evoked.info['ch_names'] if '053' not in ch and ch.startswith('EEG') ] cov = prepare_noise_cov(cov, evoked.info, ch_names, None) assert cov['eig'][0] <= 1e-25 # avg projector should set this to zero assert (cov['eig'][1:] > 1e-16).all() # all else should be > 0 # Now do some more comprehensive tests raw_sample = read_raw_fif(raw_fname) assert not _has_eeg_average_ref_proj(raw_sample.info['projs']) raw_sss = read_raw_fif(hp_fif_fname) assert not _has_eeg_average_ref_proj(raw_sss.info['projs']) raw_sss.add_proj(compute_proj_raw(raw_sss, meg=meg)) cov_sample = compute_raw_covariance(raw_sample) cov_sample_proj = compute_raw_covariance(raw_sample.copy().apply_proj()) cov_sss = compute_raw_covariance(raw_sss) cov_sss_proj = compute_raw_covariance(raw_sss.copy().apply_proj()) picks_all_sample = pick_types(raw_sample.info, meg=True, eeg=True) picks_all_sss = pick_types(raw_sss.info, meg=True, eeg=True) info_sample = pick_info(raw_sample.info, picks_all_sample) picks_stack_sample = [('eeg', pick_types(info_sample, meg=False, eeg=True))] picks_stack_sample += [('meg', pick_types(info_sample, meg=True))] picks_stack_sample += [('all', pick_types(info_sample, meg=True, eeg=True))] info_sss = pick_info(raw_sss.info, picks_all_sss) picks_stack_somato = [('eeg', pick_types(info_sss, meg=False, eeg=True))] picks_stack_somato += [('meg', pick_types(info_sss, meg=True))] picks_stack_somato += [('all', pick_types(info_sss, meg=True, eeg=True))] iter_tests = list( itt.product( [(cov_sample, picks_stack_sample, info_sample), (cov_sample_proj, picks_stack_sample, info_sample), (cov_sss, picks_stack_somato, info_sss), (cov_sss_proj, picks_stack_somato, info_sss)], # sss [dict(mag=1e15, grad=1e13, eeg=1e6)], )) for (cov, picks_list, iter_info), scalings in iter_tests: rank = compute_rank(cov, rank_method, scalings, iter_info, proj=proj) rank['all'] = sum(rank.values()) for ch_type, picks in picks_list: this_info = pick_info(iter_info, picks) # compute subset of projs, active and inactive n_projs_applied = sum(proj['active'] and len( set(proj['data']['col_names']) & set(this_info['ch_names'])) > 0 for proj in cov['projs']) n_projs_info = sum( len( set(proj['data']['col_names']) & set(this_info['ch_names'])) > 0 for proj in this_info['projs']) # count channel types ch_types = _get_channel_types(this_info) n_eeg, n_mag, n_grad = [ ch_types.count(k) for k in ['eeg', 'mag', 'grad'] ] n_meg = n_mag + n_grad has_sss = (n_meg > 0 and len(this_info['proc_history']) > 0) if has_sss: n_meg = _get_rank_sss(this_info) expected_rank = n_meg + n_eeg if rank_method is None: if meg == 'combined' or not has_sss: if proj: expected_rank -= n_projs_info else: expected_rank -= n_projs_applied else: # XXX for now it just uses the total count assert rank_method == 'info' if proj: expected_rank -= n_projs_info assert rank[ch_type] == expected_rank
def test_cov_rank_estimation(rank_method, proj, meg): """Test cov rank estimation.""" # Test that our rank estimation works properly on a simple case evoked = read_evokeds(ave_fname, condition=0, baseline=(None, 0), proj=False) cov = read_cov(cov_fname) ch_names = [ch for ch in evoked.info['ch_names'] if '053' not in ch and ch.startswith('EEG')] cov = prepare_noise_cov(cov, evoked.info, ch_names, None) assert cov['eig'][0] <= 1e-25 # avg projector should set this to zero assert (cov['eig'][1:] > 1e-16).all() # all else should be > 0 # Now do some more comprehensive tests raw_sample = read_raw_fif(raw_fname) assert not _has_eeg_average_ref_proj(raw_sample.info['projs']) raw_sss = read_raw_fif(hp_fif_fname) assert not _has_eeg_average_ref_proj(raw_sss.info['projs']) raw_sss.add_proj(compute_proj_raw(raw_sss, meg=meg)) cov_sample = compute_raw_covariance(raw_sample) cov_sample_proj = compute_raw_covariance(raw_sample.copy().apply_proj()) cov_sss = compute_raw_covariance(raw_sss) cov_sss_proj = compute_raw_covariance(raw_sss.copy().apply_proj()) picks_all_sample = pick_types(raw_sample.info, meg=True, eeg=True) picks_all_sss = pick_types(raw_sss.info, meg=True, eeg=True) info_sample = pick_info(raw_sample.info, picks_all_sample) picks_stack_sample = [('eeg', pick_types(info_sample, meg=False, eeg=True))] picks_stack_sample += [('meg', pick_types(info_sample, meg=True))] picks_stack_sample += [('all', pick_types(info_sample, meg=True, eeg=True))] info_sss = pick_info(raw_sss.info, picks_all_sss) picks_stack_somato = [('eeg', pick_types(info_sss, meg=False, eeg=True))] picks_stack_somato += [('meg', pick_types(info_sss, meg=True))] picks_stack_somato += [('all', pick_types(info_sss, meg=True, eeg=True))] iter_tests = list(itt.product( [(cov_sample, picks_stack_sample, info_sample), (cov_sample_proj, picks_stack_sample, info_sample), (cov_sss, picks_stack_somato, info_sss), (cov_sss_proj, picks_stack_somato, info_sss)], # sss [dict(mag=1e15, grad=1e13, eeg=1e6)], )) for (cov, picks_list, iter_info), scalings in iter_tests: rank = compute_rank(cov, rank_method, scalings, iter_info, proj=proj) rank['all'] = sum(rank.values()) for ch_type, picks in picks_list: this_info = pick_info(iter_info, picks) # compute subset of projs, active and inactive n_projs_applied = sum(proj['active'] and len(set(proj['data']['col_names']) & set(this_info['ch_names'])) > 0 for proj in cov['projs']) n_projs_info = sum(len(set(proj['data']['col_names']) & set(this_info['ch_names'])) > 0 for proj in this_info['projs']) # count channel types ch_types = [channel_type(this_info, idx) for idx in range(len(picks))] n_eeg, n_mag, n_grad = [ch_types.count(k) for k in ['eeg', 'mag', 'grad']] n_meg = n_mag + n_grad has_sss = (n_meg > 0 and len(this_info['proc_history']) > 0) if has_sss: n_meg = _get_rank_sss(this_info) expected_rank = n_meg + n_eeg if rank_method is None: if meg == 'combined' or not has_sss: if proj: expected_rank -= n_projs_info else: expected_rank -= n_projs_applied else: # XXX for now it just uses the total count assert rank_method == 'info' if proj: expected_rank -= n_projs_info assert rank[ch_type] == expected_rank
def test_basic(): """Test Maxwell filter basic version.""" # Load testing data (raw, SSS std origin, SSS non-standard origin) raw = read_crop(raw_fname, (0., 1.)) raw_err = read_crop(raw_fname).apply_proj() raw_erm = read_crop(erm_fname) pytest.raises(RuntimeError, maxwell_filter, raw_err) pytest.raises(TypeError, maxwell_filter, 1.) # not a raw pytest.raises(ValueError, maxwell_filter, raw, int_order=20) # too many n_int_bases = int_order ** 2 + 2 * int_order n_ext_bases = ext_order ** 2 + 2 * ext_order nbases = n_int_bases + n_ext_bases # Check number of bases computed correctly assert _get_n_moments([int_order, ext_order]).sum() == nbases # Test SSS computation at the standard head origin assert len(raw.info['projs']) == 12 # 11 MEG projs + 1 AVG EEG with use_coil_def(elekta_def_fname): raw_sss = maxwell_filter(raw, origin=mf_head_origin, regularize=None, bad_condition='ignore') assert len(raw_sss.info['projs']) == 1 # avg EEG assert raw_sss.info['projs'][0]['desc'] == 'Average EEG reference' assert_meg_snr(raw_sss, read_crop(sss_std_fname), 200., 1000.) py_cal = raw_sss.info['proc_history'][0]['max_info']['sss_cal'] assert len(py_cal) == 0 py_ctc = raw_sss.info['proc_history'][0]['max_info']['sss_ctc'] assert len(py_ctc) == 0 py_st = raw_sss.info['proc_history'][0]['max_info']['max_st'] assert len(py_st) == 0 pytest.raises(RuntimeError, maxwell_filter, raw_sss) # Test SSS computation at non-standard head origin with use_coil_def(elekta_def_fname): raw_sss = maxwell_filter(raw, origin=[0., 0.02, 0.02], regularize=None, bad_condition='ignore') assert_meg_snr(raw_sss, read_crop(sss_nonstd_fname), 250., 700.) # Test SSS computation at device origin sss_erm_std = read_crop(sss_erm_std_fname) raw_sss = maxwell_filter(raw_erm, coord_frame='meg', origin=mf_meg_origin, regularize=None, bad_condition='ignore') assert_meg_snr(raw_sss, sss_erm_std, 70., 260.) for key in ('job', 'frame'): vals = [x.info['proc_history'][0]['max_info']['sss_info'][key] for x in [raw_sss, sss_erm_std]] assert vals[0] == vals[1] # Two equivalent things: at device origin in device coords (0., 0., 0.) # and at device origin at head coords info['dev_head_t'][:3, 3] raw_sss_meg = maxwell_filter( raw, coord_frame='meg', origin=(0., 0., 0.)) raw_sss_head = maxwell_filter( raw, origin=raw.info['dev_head_t']['trans'][:3, 3]) assert_meg_snr(raw_sss_meg, raw_sss_head, 100., 900.) # Check against SSS functions from proc_history assert _get_n_moments(int_order) == _get_rank_sss(raw_sss) # Degenerate cases pytest.raises(ValueError, maxwell_filter, raw, coord_frame='foo') pytest.raises(ValueError, maxwell_filter, raw, origin='foo') pytest.raises(ValueError, maxwell_filter, raw, origin=[0] * 4) pytest.raises(ValueError, maxwell_filter, raw, mag_scale='foo') raw_missing = raw.copy().load_data() raw_missing.info['bads'] = ['MEG0111'] raw_missing.pick_types(meg=True) # will be missing the bad maxwell_filter(raw_missing) with pytest.warns(RuntimeWarning, match='not in data'): maxwell_filter(raw_missing, calibration=fine_cal_fname)