def test_boosting_epochs(): """Test boosting with epoched data""" ds = datasets.get_uts(True, vector3d=True) p1 = epoch_impulse_predictor('uts', 'A=="a1"', name='a1', ds=ds) p0 = epoch_impulse_predictor('uts', 'A=="a0"', name='a0', ds=ds) p1 = p1.smooth('time', .05, 'hamming') p0 = p0.smooth('time', .05, 'hamming') # 1d for tstart, basis in product((-0.1, 0.1, 0), (0, 0.05)): print(f"tstart={tstart}, basis={basis}") res = boosting('uts', [p0, p1], tstart, 0.6, model='A', ds=ds, basis=basis, partitions=10, debug=True) y = convolve(res.h_scaled, [p0, p1]) assert correlation_coefficient(y, res.y_pred) > .999 r = correlation_coefficient(y, ds['uts']) assert res.r == approx(r, abs=1e-3) assert res.partitions == 10 # 2d res = boosting('utsnd', [p0, p1], 0, 0.6, model='A', ds=ds, partitions=10) assert len(res.h) == 2 assert res.h[0].shape == (5, 60) assert res.h[1].shape == (5, 60) y = convolve(res.h_scaled, [p0, p1]) r = correlation_coefficient(y, ds['utsnd'], ('case', 'time')) assert_dataobj_equal(res.r, r, decimal=3, name=False) # vector res = boosting('v3d', [p0, p1], 0, 0.6, error='l1', model='A', ds=ds, partitions=10) assert res.residual.ndim == 0
def test_corr(): "Test testnd.corr()" ds = datasets.get_uts(True) # add correlation Y = ds['Y'] utsnd = ds['utsnd'] utsnd.x[:, 3:5, 50:65] += Y.x[:, None, None] res = testnd.corr('utsnd', 'Y', ds=ds, samples=0) assert repr(res) == "<corr 'utsnd', 'Y', samples=0>" for s, t in product('01234', (0.1, 0.2, 0.35)): target = test.Correlation(utsnd.sub(sensor=s, time=t), Y).r assert res.r.sub(sensor=s, time=t) == pytest.approx(target) res = testnd.corr('utsnd', 'Y', 'rm', ds=ds, samples=0) repr(res) res = testnd.corr('utsnd', 'Y', ds=ds, samples=10, pmin=0.05) repr(res) res = testnd.corr('utsnd', 'Y', ds=ds, samples=10, tfce=True) repr(res) # persistence string = pickle.dumps(res, protocol=pickle.HIGHEST_PROTOCOL) res_ = pickle.loads(string) assert repr(res_) == repr(res) assert_dataobj_equal(res.p_uncorrected, res_.p_uncorrected) assert_dataobj_equal(res.p, res_.p)
def test_corr(): "Test testnd.corr()" ds = datasets.get_uts(True) # add correlation Y = ds['Y'] utsnd = ds['utsnd'] utsnd.x[:, 3:5, 50:65] += Y.x[:, None, None] res = testnd.corr('utsnd', 'Y', ds=ds) assert repr(res) == "<corr 'utsnd', 'Y'>" for s, t in product('01234', (0.1, 0.2, 0.35)): target = test.Correlation(utsnd.sub(sensor=s, time=t), Y).r assert res.r.sub(sensor=s, time=t) == pytest.approx(target) res = testnd.corr('utsnd', 'Y', 'rm', ds=ds) repr(res) res = testnd.corr('utsnd', 'Y', ds=ds, samples=10, pmin=0.05) repr(res) res = testnd.corr('utsnd', 'Y', ds=ds, samples=10, tfce=True) repr(res) # persistence string = pickle.dumps(res, protocol=pickle.HIGHEST_PROTOCOL) res_ = pickle.loads(string) assert repr(res_) == repr(res) assert_dataobj_equal(res.p_uncorrected, res_.p_uncorrected) assert_dataobj_equal(res.p, res_.p)
def test_t_contrast(): ds = datasets.get_uts() # simple contrast res = testnd.t_contrast_rel('uts', 'A', 'a1>a0', 'rm', ds=ds, samples=10, pmin=0.05) assert repr( res ) == "<t_contrast_rel 'uts', 'A', 'a1>a0', match='rm', samples=10, pmin=0.05, 7 clusters, p < .001>" res_ = testnd.ttest_rel('uts', 'A', 'a1', 'a0', 'rm', ds=ds) assert_array_equal(res.t.x, res_.t.x) # complex contrast res = testnd.t_contrast_rel('uts', 'A%B', 'min(a0|b0>a1|b0, a0|b1>a1|b1)', 'rm', ds=ds, samples=10, pmin=0.05) res_b0 = testnd.ttest_rel('uts', 'A%B', ('a0', 'b0'), ('a1', 'b0'), 'rm', ds=ds) res_b1 = testnd.ttest_rel('uts', 'A%B', ('a0', 'b1'), ('a1', 'b1'), 'rm', ds=ds) assert_array_equal(res.t.x, np.min([res_b0.t.x, res_b1.t.x], axis=0)) # persistence string = pickle.dumps(res, protocol=pickle.HIGHEST_PROTOCOL) res_ = pickle.loads(string) assert repr(res_) == repr(res) assert_dataobj_equal(res.p, res_.p) # contrast with "*" res = testnd.t_contrast_rel('uts', 'A%B', 'min(a1|b0>a0|b0, a1|b1>a0|b1)', 'rm', ds=ds, tail=1, samples=0) # zero variance ds['uts'].x[:, 10] = 0. with pytest.raises(ZeroVariance): testnd.t_contrast_rel('uts', 'A%B', 'min(a1|b0>a0|b0, a1|b1>a0|b1)', 'rm', tail=1, ds=ds, samples=0)
def test_concatenate(): """Test concatenate() Concatenation of SourceSpace is tested in .test_mne.test_source_estimate """ ds = datasets.get_uts(True) v0 = ds[0, 'utsnd'] v1 = ds[1, 'utsnd'] vc = concatenate((v1, v0)) assert_array_equal(vc.sub(time=(0, 1)).x, v1.x) assert_array_equal(vc.sub(time=(1, 2)).x, v0.x) assert_array_equal(vc.info, ds['utsnd'].info) # scalar psd = psd_welch(ds['utsnd'], n_fft=100) v0 = psd.sub(frequency=(None, 5)) v1 = psd.sub(frequency=(45, None)) conc = concatenate((v0, v1), 'frequency') assert_array_equal(conc.frequency.values[:5], psd.frequency.values[:5]) assert_array_equal(conc.frequency.values[5:], psd.frequency.values[45:]) conc_data = conc.get_data(v1.dimnames) assert_array_equal(conc_data[:, :, 5:], v1.x) # cat x = get_ndvar(2, frequency=0, cat=4) x_re = concatenate([x.sub(cat=(None, 'c')), x.sub(cat=('c', None))], 'cat') assert_dataobj_equal(x_re, x)
def test_t_contrast(): ds = datasets.get_uts() # simple contrast res = testnd.t_contrast_rel('uts', 'A', 'a1>a0', 'rm', ds=ds, samples=10, pmin=0.05) assert repr(res) == "<t_contrast_rel 'uts', 'A', 'a1>a0', match='rm', samples=10, pmin=0.05, 7 clusters, p < .001>" res_ = testnd.ttest_rel('uts', 'A', 'a1', 'a0', 'rm', ds=ds) assert_array_equal(res.t.x, res_.t.x) # complex contrast res = testnd.t_contrast_rel('uts', 'A%B', 'min(a0|b0>a1|b0, a0|b1>a1|b1)', 'rm', ds=ds, samples=10, pmin=0.05) res_b0 = testnd.ttest_rel('uts', 'A%B', ('a0', 'b0'), ('a1', 'b0'), 'rm', ds=ds) res_b1 = testnd.ttest_rel('uts', 'A%B', ('a0', 'b1'), ('a1', 'b1'), 'rm', ds=ds) assert_array_equal(res.t.x, np.min([res_b0.t.x, res_b1.t.x], axis=0)) # persistence string = pickle.dumps(res, protocol=pickle.HIGHEST_PROTOCOL) res_ = pickle.loads(string) assert repr(res_) == repr(res) assert_dataobj_equal(res.p, res_.p) # contrast with "*" res = testnd.t_contrast_rel('uts', 'A%B', 'min(a1|b0>a0|b0, a1|b1>a0|b1)', 'rm', ds=ds, tail=1) # zero variance ds['uts'].x[:, 10] = 0. with pytest.raises(ZeroVariance): testnd.t_contrast_rel('uts', 'A%B', 'min(a1|b0>a0|b0, a1|b1>a0|b1)', 'rm', tail=1, ds=ds)
def test_pickle(): ds = datasets.get_uts() ds_2 = load.unpickle(file_path('uts-py2.pickle')) assert_dataobj_equal(ds_2, ds) ds_3 = load.unpickle(file_path('uts-py3.pickle')) assert_dataobj_equal(ds_3, ds)
def test_ncrf(): meg = load('meg').sub(time=(0, 5)) stim = load('stim').sub(time=(0, 5)) fwd = load('fwd_sol') emptyroom = load('emptyroom') # 1 stimulus model = fit_ncrf(meg, stim, fwd, emptyroom, tstop=0.2, normalize='l1', mu=0.0019444, n_iter=3, n_iterc=3, n_iterf=10) # check residual assert model.residual == pytest.approx(172.714, 0.001) # check scaling stim_baseline = stim.mean() assert model._stim_baseline[0] == stim_baseline assert model._stim_scaling[0] == (stim - stim_baseline).abs().mean() assert model.h.norm('time').norm('source').norm('space') == pytest.approx(6.043e-10, rel=0.001) # test persistence model_2 = pickle.loads(pickle.dumps(model, pickle.HIGHEST_PROTOCOL)) assert_dataobj_equal(model_2.h, model.h) assert_dataobj_equal(model_2.h_scaled, model.h_scaled) assert model_2.residual == model.residual # 2 stimuli, one of them 2-d, normalize='l2' diff = stim.diff('time') stim2 = concatenate([diff.clip(0), diff.clip(max=0)], Categorial('rep', ['on', 'off'])) model = fit_ncrf(meg, [stim, stim2], fwd, emptyroom, tstop=0.2, normalize='l2', mu=0.0019444, n_iter=3, n_iterc=3, n_iterf=10) # check scaling assert model._stim_baseline[0] == stim.mean() assert model._stim_scaling[0] == stim.std() assert model.h[0].norm('time').norm('source').norm('space') == pytest.approx(4.732e-10, 0.001) # cross-validation model = fit_ncrf(meg, stim, fwd, emptyroom, tstop=0.2, normalize='l1', mu='auto', n_iter=1, n_iterc=2, n_iterf=2, n_workers=1) assert model.mu == pytest.approx(0.0203, 0.001) model.cv_info()
def test_melt_ndvar(): "Test table.melt_ndvar()" ds = datasets.get_uts(True) ds = ds.sub("A == 'a1'") lds = table.melt_ndvar('uts', ds=ds) assert 'time' in lds assert isinstance(lds['time'], Var) assert_array_equal(np.unique(lds['time'].x), ds['uts'].time) # no ds lds2 = table.melt_ndvar(ds['uts']) assert_dataobj_equal(lds2['uts'], lds['uts']) # sensor lds = table.melt_ndvar("utsnd.summary(time=(0.1, 0.2))", ds=ds, varname='summary') assert set(lds['sensor'].cells) == set(ds['utsnd'].sensor.names) # NDVar out lds = table.melt_ndvar("utsnd", 'sensor', ds=ds) assert 'utsnd' in lds assert isinstance(lds['utsnd'], NDVar) assert_dataobj_equal(lds[:ds.n_cases, 'utsnd'], ds.eval("utsnd.sub(sensor='0')")) # more than one dimensions with pytest.raises(ValueError): table.melt_ndvar('utsnd', ds=ds)
def test_pickle(): ds = datasets.get_uts() decimal = None if IS_OSX else 15 ds_2 = load.unpickle(file_path('uts-py2.pickle')) assert_dataobj_equal(ds_2, ds, decimal) ds_3 = load.unpickle(file_path('uts-py3.pickle')) assert_dataobj_equal(ds_3, ds, decimal)
def test_sample_sessions(): set_log_level('warning', 'mne') SampleExperiment = import_attr( sample_path / 'sample_experiment_sessions.py', 'SampleExperiment') tempdir = TempDir() datasets.setup_samples_experiment(tempdir, 2, 1, 2) class Experiment(SampleExperiment): raw = { 'ica': RawICA('raw', ('sample1', 'sample2'), 'fastica', max_iter=1), **SampleExperiment.raw, } root = join(tempdir, 'SampleExperiment') e = Experiment(root) # bad channels e.make_bad_channels('0111') assert e.load_bad_channels() == ['MEG 0111'] assert e.load_bad_channels(session='sample2') == [] e.show_bad_channels() e.merge_bad_channels() assert e.load_bad_channels(session='sample2') == ['MEG 0111'] e.show_bad_channels() # rejection for _ in e: for epoch in ('target1', 'target2'): e.set(epoch=epoch) e.make_epoch_selection(auto=2e-12) ds = e.load_evoked('R0000', epoch='target2') e.set(session='sample1') ds2 = e.load_evoked('R0000') assert_dataobj_equal(ds2, ds) # super-epoch ds1 = e.load_epochs(epoch='target1') ds2 = e.load_epochs(epoch='target2') ds_super = e.load_epochs(epoch='super') assert_dataobj_equal(ds_super['meg'], combine((ds1['meg'], ds2['meg']))) # conflicting session and epoch settings rej_path = join(root, 'meg', 'R0000', 'epoch selection', 'sample2_1-40_target2-man.pickled') e.set(epoch='target2', raw='1-40') assert not exists(rej_path) e.set(session='sample1') e.make_epoch_selection(auto=2e-12) assert exists(rej_path) # ica e.set('R0000', raw='ica') with catch_warnings(): filterwarnings('ignore', "FastICA did not converge", UserWarning) assert e.make_ica() == join(root, 'meg', 'R0000', 'R0000 ica-ica.fif')
def test_dot(): ds = datasets.get_uts(True) # x subset of y index = ['3', '2'] utsnd = ds['utsnd'] topo = utsnd.mean(('case', 'time')) y1 = topo.sub(sensor=index).dot(utsnd.sub(sensor=index)) assert_dataobj_equal(topo[index].dot(utsnd), y1) assert_dataobj_equal(topo.dot(utsnd.sub(sensor=index)), y1)
def test_sample_sessions(): set_log_level('warning', 'mne') SampleExperiment = import_attr(sample_path / 'sample_experiment_sessions.py', 'SampleExperiment') tempdir = TempDir() datasets.setup_samples_experiment(tempdir, 2, 1, 2) class Experiment(SampleExperiment): raw = { 'ica': RawICA('raw', ('sample1', 'sample2'), 'fastica', max_iter=1), **SampleExperiment.raw, } root = join(tempdir, 'SampleExperiment') e = Experiment(root) # bad channels e.make_bad_channels('0111') assert e.load_bad_channels() == ['MEG 0111'] assert e.load_bad_channels(session='sample2') == [] e.show_bad_channels() e.merge_bad_channels() assert e.load_bad_channels(session='sample2') == ['MEG 0111'] e.show_bad_channels() # rejection for _ in e: for epoch in ('target1', 'target2'): e.set(epoch=epoch) e.make_epoch_selection(auto=2e-12) ds = e.load_evoked('R0000', epoch='target2') e.set(session='sample1') ds2 = e.load_evoked('R0000') assert_dataobj_equal(ds2, ds) # super-epoch ds1 = e.load_epochs(epoch='target1') ds2 = e.load_epochs(epoch='target2') ds_super = e.load_epochs(epoch='super') assert_dataobj_equal(ds_super['meg'], combine((ds1['meg'], ds2['meg']))) # conflicting session and epoch settings rej_path = join(root, 'meg', 'R0000', 'epoch selection', 'sample2_1-40_target2-man.pickled') e.set(epoch='target2', raw='1-40') assert not exists(rej_path) e.set(session='sample1') e.make_epoch_selection(auto=2e-12) assert exists(rej_path) # ica e.set('R0000', raw='ica') with catch_warnings(): filterwarnings('ignore', "FastICA did not converge", UserWarning) assert e.make_ica() == join(root, 'meg', 'R0000', 'R0000 ica-ica.fif')
def test_sample_source(): set_log_level('warning', 'mne') SampleExperiment = import_attr(sample_path / 'sample_experiment.py', 'SampleExperiment') tempdir = TempDir() datasets.setup_samples_experiment( tempdir, 3, 2, mris=True) # TODO: use sample MRI which already has forward solution root = join(tempdir, 'SampleExperiment') e = SampleExperiment(root) # source space tests e.set(src='ico-4', rej='', epoch='auditory') # These two tests are only identical if the evoked has been cached before the first test is loaded ds = e.load_evoked(-1, model='side') resp = e.load_test('left=right', 0.05, 0.2, 0.05, samples=100, parc='ac', make=True) resm = e.load_test('left=right', 0.05, 0.2, 0.05, samples=100, mask='ac', make=True) assert_dataobj_equal(resp.t, resm.t) # ROI tests e.set(epoch='target') ress = e.load_test('left=right', 0.05, 0.2, 0.05, samples=100, data='source.rms', parc='ac', make=True) res = ress.res['ac-lh'] assert res.p.min() == pytest.approx(0.429, abs=.001) ress = e.load_test('twostage', 0.05, 0.2, 0.05, samples=100, data='source.rms', parc='ac', make=True) res = ress.res['ac-lh']
def test_tsv_io(): """Test tsv I/O""" tempdir = TempDir() names = ['A', 'B', 'rm', 'intvar', 'fltvar', 'fltvar2', 'index'] ds = datasets.get_uv() ds['fltvar'][5:10] = np.nan ds[:4, 'rm'] = '' # save and load dst = Path(tempdir) / 'ds.txt' ds.save_txt(dst) ds1 = load.tsv(dst, random='rm') assert_dataset_equal(ds1, ds, decimal=10) ds1 = load.tsv(dst, skiprows=1, names=names, random='rm') assert_dataset_equal(ds1, ds, decimal=10) # delimiter for delimiter in [' ', ',']: ds.save_txt(dst, delimiter=delimiter) ds1 = load.tsv(dst, delimiter=delimiter, random='rm') assert_dataset_equal(ds1, ds, decimal=10) # guess data types with missing intvar2 = ds['intvar'].as_factor() intvar2[10:] = '' ds_intvar = Dataset((intvar2, )) ds_intvar.save_txt(dst) ds_intvar1 = load.tsv(dst, empty='nan') assert_dataobj_equal(ds_intvar1['intvar', :10], ds['intvar', :10]) assert_array_equal(ds_intvar1['intvar', 10:], np.nan) # str with space ds[:5, 'A'] = 'a 1' ds.save_txt(dst) ds1 = load.tsv(dst, random='rm') assert_dataset_equal(ds1, ds, decimal=10) ds.save_txt(dst, delimiter=' ') ds1 = load.tsv(dst, delimiter=' ', random='rm') assert_dataset_equal(ds1, ds, decimal=10) # Fixed column width path = file_path('fox-prestige') ds = load.tsv(path, delimiter=' ', skipinitialspace=True) assert ds[1] == { 'id': 'GENERAL.MANAGERS', 'education': 12.26, 'income': 25879, 'women': 4.02, 'prestige': 69.1, 'census': 1130, 'type': 'prof' }
def test_load_fiff_from_raw(): "Test loading data from a fiff raw file" data_path = mne.datasets.sample.data_path() meg_path = os.path.join(data_path, 'MEG', 'sample') raw_path = os.path.join(meg_path, 'sample_audvis_filt-0-40_raw.fif') evt_path = os.path.join(meg_path, 'sample_audvis_filt-0-40_raw-eve.fif') # load events ds = load.fiff.events(raw_path) assert ds['i_start'].x.dtype.kind == 'i' # compare with mne ds_evt = load.fiff.events(events=evt_path) ds = ds[np.arange(ds.n_cases) != 289] # mne is missing an event assert_dataobj_equal(ds, ds_evt, name=False) # add epochs as ndvar ds = ds.sub('trigger == 32') with catch_warnings(): filterwarnings('ignore', message=FILTER_WARNING) ds_ndvar = load.fiff.add_epochs(ds, -0.1, 0.3, decim=10, data='mag', proj=False, reject=2e-12) meg = ds_ndvar['meg'] assert meg.ndim == 3 data = meg.get_data(('case', 'sensor', 'time')) # compare with mne epochs with catch_warnings(): filterwarnings('ignore', message=FILTER_WARNING) ds_mne = load.fiff.add_mne_epochs(ds, -0.1, 0.3, decim=10, proj=False, reject={'mag': 2e-12}) epochs = ds_mne['epochs'] # events assert_array_equal(epochs.events[:, 1], 0) assert_array_equal(epochs.events[:, 2], 32) # data picks = pick_types(epochs.info, meg='mag') mne_data = epochs.get_data()[:, picks] assert_array_equal(meg.sensor.names, [epochs.info['ch_names'][i] for i in picks]) assert_array_equal(data, mne_data) assert_array_almost_equal(meg.time, epochs.times) # with proj with catch_warnings(): filterwarnings('ignore', message=FILTER_WARNING) meg = load.fiff.epochs(ds, -0.1, 0.3, decim=10, data='mag', proj=True, reject=2e-12) epochs = load.fiff.mne_epochs(ds, -0.1, 0.3, decim=10, proj=True, reject={'mag': 2e-12}) picks = pick_types(epochs.info, meg='mag') mne_data = epochs.get_data()[:, picks] assert_array_almost_equal(meg.x, mne_data, 10)
def test_boosting_epochs(): """Test boosting with epoched data""" ds = datasets.get_uts(True, vector3d=True) p1 = epoch_impulse_predictor('uts', 'A=="a1"', name='a1', ds=ds) p0 = epoch_impulse_predictor('uts', 'A=="a0"', name='a0', ds=ds) p1 = p1.smooth('time', .05, 'hamming') p0 = p0.smooth('time', .05, 'hamming') # 1d for tstart, basis in product((-0.1, 0.1, 0), (0, 0.05)): print(f"tstart={tstart}, basis={basis}") res = boosting('uts', [p0, p1], tstart, 0.6, model='A', ds=ds, basis=basis, partitions=10, debug=True) y = convolve(res.h_scaled, [p0, p1]) assert correlation_coefficient(y, res.y_pred) > .999 r = correlation_coefficient(y, ds['uts']) assert res.r == approx(r, abs=1e-3) assert res.partitions == 10 # prefit res1 = boosting('uts', p1, 0, 0.6, model='A', ds=ds, partitions=10) res0 = boosting('uts', p0, 0, 0.6, model='A', ds=ds, partitions=10) res01 = boosting('uts', [p0, p1], 0, 0.6, model='A', ds=ds, partitions=10, prefit=res1) # 2d res = boosting('utsnd', [p0, p1], 0, 0.6, model='A', ds=ds, partitions=10) assert len(res.h) == 2 assert res.h[0].shape == (5, 60) assert res.h[1].shape == (5, 60) y = convolve(res.h_scaled, [p0, p1]) r = correlation_coefficient(y, ds['utsnd'], ('case', 'time')) assert_dataobj_equal(res.r, r, decimal=3, name=False) # vector res = boosting('v3d', [p0, p1], 0, 0.6, error='l1', model='A', ds=ds, partitions=10) assert res.residual.ndim == 0
def test_test_experiment(root_dir): "Test event labeling with the EventExperiment subclass of MneExperiment" e = EventExperiment() # test defaults assert e.get('session') == 'cheese' assert e.get('model') == 'name' # test event labeling ds = e._label_events(gen_triggers()) name = Factor([e.variables['name'][t] for t in TRIGGERS], name='name') assert_dataobj_equal(ds['name'], name) tgt = ds['trigger'].as_factor(e.variables['backorder'], 'backorder') assert_dataobj_equal(ds['backorder'], tgt) tgt = ds['trigger'].as_factor(e.variables['taste'], 'taste') assert_dataobj_equal(ds['taste'], tgt) assert_array_equal(ds['i_start'], I_START) assert_array_equal(ds['subject'] == SUBJECT, True) # epochs assert e._epochs['cheese'].tmin == -0.2 assert e._epochs['cheese-leicester'].tmin == -0.1 assert e._epochs['cheese-tilsit'].tmin == -0.2 # tests e = EventExperiment(root_dir) # add test EventExperiment.tests['aov'] = ANOVA('backorder * taste * subject') e = EventExperiment(root_dir) e.set(test='aov') assert e.get('model') == 'backorder%taste' # remove test del EventExperiment.tests['aov'] e = EventExperiment(root_dir)
def test_result(): "Test boosting results" ds = datasets._get_continuous() x1 = ds['x1'] # convolve function y = convolve([ds['h1'], ds['h2']], [ds['x1'], ds['x2']]) assert_dataobj_equal(y, ds['y'], name=False) # test prediction with res.h and res.h_scaled res = boosting(ds['y'], ds['x1'], 0, 1) y1 = convolve(res.h_scaled, ds['x1']) x_scaled = ds['x1'] / res.x_scale y2 = convolve(res.h, x_scaled) y2 *= res.y_scale y2 += y1.mean() - y2.mean() # mean can't be reconstructed assert_dataobj_equal(y1, y2, decimal=12) # reconstruction res = boosting(x1, y, -1, 0, debug=True) x1r = convolve(res.h_scaled, y) assert correlation_coefficient(res.y_pred, x1r) > .999 assert correlation_coefficient(x1r[0.9:], x1[0.9:]) == approx(res.r, abs=1e-3) # test NaN checks (modifies data) ds['x2'].x[1, 50] = np.nan with pytest.raises(ValueError): boosting(ds['y'], ds['x2'], 0, .5) with pytest.raises(ValueError): boosting(ds['y'], ds['x2'], 0, .5, False) ds['x2'].x[1, :] = 1 with catch_warnings(): filterwarnings('ignore', category=RuntimeWarning) with pytest.raises(ValueError): boosting(ds['y'], ds['x2'], 0, .5) ds['y'].x[50] = np.nan with pytest.raises(ValueError): boosting(ds['y'], ds['x1'], 0, .5) with pytest.raises(ValueError): boosting(ds['y'], ds['x1'], 0, .5, False)
def test_ttest_ind(): "Test testnd.ttest_ind()" ds = datasets.get_uts(True) # basic res = testnd.ttest_ind('uts', 'A', 'a1', 'a0', ds=ds, samples=0) assert repr( res) == "<ttest_ind 'uts', 'A', 'a1' (n=30), 'a0' (n=30), samples=0>" assert res.p_uncorrected.min() < 0.05 # persistence string = pickle.dumps(res, pickle.HIGHEST_PROTOCOL) res_ = pickle.loads(string) assert repr( res_) == "<ttest_ind 'uts', 'A', 'a1' (n=30), 'a0' (n=30), samples=0>" assert_dataobj_equal(res.p_uncorrected, res_.p_uncorrected) # alternate argspec res_ = testnd.ttest_ind("uts[A == 'a1']", "uts[A == 'a0']", ds=ds, samples=0) assert repr(res_) == "<ttest_ind 'uts' (n=30), 'uts' (n=30), samples=0>" assert_dataobj_equal(res_.t, res.t) # cluster res = testnd.ttest_ind('uts', 'A', 'a1', 'a0', ds=ds, tail=1, samples=1) # persistence string = pickle.dumps(res, pickle.HIGHEST_PROTOCOL) res_ = pickle.loads(string) assert repr(res_) == repr(res) assert_dataobj_equal(res.p_uncorrected, res_.p_uncorrected) # nd res = testnd.ttest_ind('utsnd', 'A', 'a1', 'a0', ds=ds, pmin=0.05, samples=2) assert res._cdist.n_clusters == 10 # zero variance ds['utsnd'].x[:, 1, 10] = 0. res_zv = testnd.ttest_ind('utsnd', 'A', 'a1', 'a0', ds=ds, samples=0) assert_array_equal(res_zv.t.x[0], res.t.x[0]) assert res_zv.t.x[1, 10] == 0. # argument mismatch with pytest.raises(ValueError): testnd.ttest_ind(ds['utsnd'], ds[:-1, 'A'], samples=0)
def test_ttest_ind(): "Test testnd.ttest_ind()" ds = datasets.get_uts(True) # basic res = testnd.ttest_ind('uts', 'A', 'a1', 'a0', ds=ds) assert repr(res) == "<ttest_ind 'uts', 'A', 'a1' (n=30), 'a0' (n=30)>" assert res.p_uncorrected.min() < 0.05 # persistence string = pickle.dumps(res, pickle.HIGHEST_PROTOCOL) res_ = pickle.loads(string) assert repr(res_) == "<ttest_ind 'uts', 'A', 'a1' (n=30), 'a0' (n=30)>" assert_dataobj_equal(res.p_uncorrected, res_.p_uncorrected) # alternate argspec res_ = testnd.ttest_ind("uts[A == 'a1']", "uts[A == 'a0']", ds=ds) assert repr(res_) == "<ttest_ind 'uts' (n=30), 'uts' (n=30)>" assert_dataobj_equal(res_.t, res.t) # cluster res = testnd.ttest_ind('uts', 'A', 'a1', 'a0', ds=ds, tail=1, samples=1) # persistence string = pickle.dumps(res, pickle.HIGHEST_PROTOCOL) res_ = pickle.loads(string) assert repr(res_) == repr(res) assert_dataobj_equal(res.p_uncorrected, res_.p_uncorrected) # nd res = testnd.ttest_ind('utsnd', 'A', 'a1', 'a0', ds=ds, pmin=0.05, samples=2) assert res._cdist.n_clusters == 10 # zero variance ds['utsnd'].x[:, 1, 10] = 0. res_zv = testnd.ttest_ind('utsnd', 'A', 'a1', 'a0', ds=ds) assert_array_equal(res_zv.t.x[0], res.t.x[0]) assert res_zv.t.x[1, 10] == 0. # argument mismatch with pytest.raises(ValueError): testnd.ttest_ind(ds['utsnd'], ds[:-1, 'A'])
def test_clusterdist(): "Test NDPermutationDistribution class" shape = (10, 6, 6, 4) locs = [[0, 0, 0], [1, 0, 0], [1, 1, 0], [0, 1, 0]] x = np.random.normal(0, 1, shape) sensor = Sensor(locs, ['0', '1', '2', '3']) sensor.set_connectivity(connect_dist=1.1) dims = ('case', UTS(-0.1, 0.1, 6), Scalar('dim2', range(6), 'unit'), sensor) y = NDVar(x, dims) # test connecting sensors logging.info("TEST: connecting sensors") bin_map = np.zeros(shape[1:], dtype=np.bool8) bin_map[:3, :3, :2] = True pmap = np.random.normal(0, 1, shape[1:]) np.clip(pmap, -1, 1, pmap) pmap[bin_map] = 2 cdist = NDPermutationDistribution(y, 0, 1.5) print(repr(cdist)) cdist.add_original(pmap) print(repr(cdist)) assert cdist.n_clusters == 1 assert_array_equal(cdist._original_cluster_map == cdist._cids[0], cdist._crop(bin_map).swapaxes(0, cdist._nad_ax)) assert cdist.parameter_map.dims == y.dims[1:] # test connecting many sensors logging.info("TEST: connecting sensors") bin_map = np.zeros(shape[1:], dtype=np.bool8) bin_map[:3, :3] = True pmap = np.random.normal(0, 1, shape[1:]) np.clip(pmap, -1, 1, pmap) pmap[bin_map] = 2 cdist = NDPermutationDistribution(y, 0, 1.5) cdist.add_original(pmap) assert cdist.n_clusters == 1 assert_array_equal(cdist._original_cluster_map == cdist._cids[0], cdist._crop(bin_map).swapaxes(0, cdist._nad_ax)) # test keeping sensors separate logging.info("TEST: keeping sensors separate") bin_map = np.zeros(shape[1:], dtype=np.bool8) bin_map[:3, :3, 0] = True bin_map[:3, :3, 2] = True pmap = np.random.normal(0, 1, shape[1:]) np.clip(pmap, -1, 1, pmap) pmap[bin_map] = 2 cdist = NDPermutationDistribution(y, 1, 1.5) cdist.add_original(pmap) assert cdist.n_clusters == 2 # criteria ds = datasets.get_uts(True) res = testnd.ttest_rel('utsnd', 'A', match='rm', ds=ds, samples=0, pmin=0.05) assert res.clusters['duration'].min() < 0.01 assert res.clusters['n_sensors'].min() == 1 res = testnd.ttest_rel('utsnd', 'A', match='rm', ds=ds, samples=0, pmin=0.05, mintime=0.02, minsensor=2) assert res.clusters['duration'].min() >= 0.02 assert res.clusters['n_sensors'].min() == 2 # 1d res1d = testnd.ttest_rel('utsnd.sub(time=0.1)', 'A', match='rm', ds=ds, samples=0, pmin=0.05) assert_dataobj_equal(res1d.p_uncorrected, res.p_uncorrected.sub(time=0.1)) # TFCE logging.info("TEST: TFCE") sensor = Sensor(locs, ['0', '1', '2', '3']) sensor.set_connectivity(connect_dist=1.1) time = UTS(-0.1, 0.1, 4) scalar = Scalar('scalar', range(10), 'unit') dims = ('case', time, sensor, scalar) rng = np.random.RandomState(0) y = NDVar(rng.normal(0, 1, (10, 4, 4, 10)), dims) cdist = NDPermutationDistribution(y, 3, None) cdist.add_original(y.x[0]) cdist.finalize() assert cdist.dist.shape == (3, ) # I/O string = pickle.dumps(cdist, pickle.HIGHEST_PROTOCOL) cdist_ = pickle.loads(string) assert repr(cdist_) == repr(cdist) # find peaks x = np.array([[[0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [7, 7, 0, 0, 0, 0, 0, 0, 0, 0], [0, 7, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [5, 7, 0, 0, 0, 0, 0, 0, 0, 0], [0, 6, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 7, 5, 5, 0, 0], [0, 0, 0, 0, 5, 4, 4, 4, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 4, 0, 0], [0, 0, 0, 0, 7, 0, 0, 3, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]]) tgt = np.equal(x, 7) peaks = find_peaks(x, cdist._connectivity) logging.debug(' detected: \n%s' % (peaks.astype(int))) logging.debug(' target: \n%s' % (tgt.astype(int))) assert_array_equal(peaks, tgt) # testnd permutation result res = testnd.ttest_1samp(y, tfce=True, samples=3) if sys.version_info[0] == 3: target = [96.84232967, 205.83207424, 425.65942084] else: target = [77.5852307, 119.1976153, 217.6270428] assert_allclose(np.sort(res._cdist.dist), target) # parc with TFCE on unconnected dimension configure(False) x = rng.normal(0, 1, (10, 5, 2, 4)) time = UTS(-0.1, 0.1, 5) categorial = Categorial('categorial', ('a', 'b')) y = NDVar(x, ('case', time, categorial, sensor)) y0 = NDVar(x[:, :, 0], ('case', time, sensor)) y1 = NDVar(x[:, :, 1], ('case', time, sensor)) res = testnd.ttest_1samp(y, tfce=True, samples=3) res_parc = testnd.ttest_1samp(y, tfce=True, samples=3, parc='categorial') res0 = testnd.ttest_1samp(y0, tfce=True, samples=3) res1 = testnd.ttest_1samp(y1, tfce=True, samples=3) # cdist assert res._cdist.shape == (4, 2, 5) # T-maps don't depend on connectivity assert_array_equal(res.t.x[:, 0], res0.t.x) assert_array_equal(res.t.x[:, 1], res1.t.x) assert_array_equal(res_parc.t.x[:, 0], res0.t.x) assert_array_equal(res_parc.t.x[:, 1], res1.t.x) # TFCE-maps should always be the same because they're unconnected assert_array_equal(res.tfce_map.x[:, 0], res0.tfce_map.x) assert_array_equal(res.tfce_map.x[:, 1], res1.tfce_map.x) assert_array_equal(res_parc.tfce_map.x[:, 0], res0.tfce_map.x) assert_array_equal(res_parc.tfce_map.x[:, 1], res1.tfce_map.x) # Probability-maps should depend on what is taken into account p_a = res0.compute_probability_map().x p_b = res1.compute_probability_map().x assert_array_equal(res_parc.compute_probability_map(categorial='a').x, p_a) assert_array_equal(res_parc.compute_probability_map(categorial='b').x, p_b) p_parc = res_parc.compute_probability_map() assert_array_equal(p_parc.x, res.compute_probability_map().x) assert np.all(p_parc.sub(categorial='a').x >= p_a) assert np.all(p_parc.sub(categorial='b').x >= p_b) configure(True)
def test_vector(): """Test vector tests""" # single vector ds = datasets.get_uv(vector=True) res = testnd.Vector('v[:40]', ds=ds, samples=10) assert res.p == 0.0 res = testnd.Vector('v[40:]', ds=ds, samples=10) assert res.p == 1.0 # single vector with norm stat res_t = testnd.Vector('v[:40]', ds=ds, samples=10, norm=True) assert res_t.p == 0.0 res_t = testnd.Vector('v[40:]', ds=ds, samples=10, norm=True) assert res_t.p == 1.0 # non-space tests should raise error with pytest.raises(WrongDimension): testnd.ttest_1samp('v', ds=ds) with pytest.raises(WrongDimension): testnd.ttest_rel('v', 'A', match='rm', ds=ds) with pytest.raises(WrongDimension): testnd.ttest_ind('v', 'A', ds=ds) with pytest.raises(WrongDimension): testnd.t_contrast_rel('v', 'A', 'a0 > a1', 'rm', ds=ds) with pytest.raises(WrongDimension): testnd.corr('v', 'fltvar', ds=ds) with pytest.raises(WrongDimension): testnd.anova('v', 'A * B', ds=ds) # vector in time ds = datasets.get_uts(vector3d=True) v1 = ds[30:, 'v3d'] v2 = ds[:30, 'v3d'] vd = v1 - v2 res = testnd.Vector(vd, samples=10) assert res.p.min() == 0.2 difference = res.masked_difference(0.5) assert difference.x.mask.sum() == 288 # diff related resd = testnd.VectorDifferenceRelated(v1, v2, samples=10) assert_dataobj_equal(resd.p, res.p, name=False) assert_dataobj_equal(resd.t2, res.t2, name=False) # diff independent res = VectorDifferenceIndependent(v1, v2, samples=10, norm=True) assert_dataobj_equal(res.difference, v1.mean('case') - v2.mean('case'), name=False) assert res.p.max() == 1 assert res.p.min() == 0 # with mp res = testnd.Vector(v1, samples=10) assert res.p.min() == 0.4 # without mp configure(n_workers=0) res0 = testnd.Vector(v1, samples=10) assert_array_equal(np.sort(res0._cdist.dist), np.sort(res._cdist.dist)) configure(n_workers=True) # time window res = testnd.Vector(v2, samples=10, tstart=0.1, tstop=0.4) assert res.p.min() == 0.3 difference = res.masked_difference(0.5) assert difference.x.mask.sum() == 294 # vector in time with norm stat res = testnd.Vector(vd, samples=10, norm=True) assert res.p.min() == 0 difference = res.masked_difference() assert difference.x.mask.sum() == 297 resd = testnd.VectorDifferenceRelated(v1, v2, samples=10, norm=True) assert_dataobj_equal(resd.p, res.p, name=False) assert_dataobj_equal(resd.difference, res.difference, name=False) v_small = v2 / 100 res = testnd.Vector(v_small, tfce=True, samples=10, norm=True) assert 'WARNING' in repr(res) res = testnd.Vector(v_small, tfce=0.1, samples=10) assert res.p.min() == 0.0
def test_ttest_rel(): "Test testnd.ttest_rel()" ds = datasets.get_uts(True) # basic res = testnd.ttest_rel('uts', 'A%B', ('a1', 'b1'), ('a0', 'b0'), 'rm', ds=ds, samples=100) assert repr(res) == "<ttest_rel 'uts', 'A x B', ('a1', 'b1'), ('a0', 'b0'), 'rm' (n=15), samples=100, p < .001>" difference = res.masked_difference() assert difference.x.mask.sum() == 84 c1 = res.masked_c1() assert c1.x.mask.sum() == 84 assert_array_equal(c1.x.data, res.c1_mean.x) # alternate argspec res_ = testnd.ttest_rel("uts[A%B == ('a1', 'b1')]", "uts[A%B == ('a0', 'b0')]", ds=ds, samples=100) assert repr(res_) == "<ttest_rel 'uts', 'uts' (n=15), samples=100, p < .001>" assert_dataobj_equal(res_.t, res.t) # alternate argspec 2 ds1 = Dataset() ds1['a1b1'] = ds.eval("uts[A%B == ('a1', 'b1')]") ds1['a0b0'] = ds.eval("uts[A%B == ('a0', 'b0')]") res1 = testnd.ttest_rel('a1b1', 'a0b0', ds=ds1, samples=100) assert_dataobj_equal(res1.t, res.t) assert repr(res1) == "<ttest_rel 'a1b1', 'a0b0' (n=15), samples=100, p < .001>" # persistence string = pickle.dumps(res, pickle.HIGHEST_PROTOCOL) res_ = pickle.loads(string) assert repr(res_) == repr(res) assert_dataobj_equal(res.p_uncorrected, res_.p_uncorrected) # collapsing cells res2 = testnd.ttest_rel('uts', 'A', 'a1', 'a0', 'rm', ds=ds) assert res2.p_uncorrected.min() < 0.05 assert res2.n == res.n # reproducibility res3 = testnd.ttest_rel('uts', 'A%B', ('a1', 'b1'), ('a0', 'b0'), 'rm', ds=ds, samples=100) assert_dataset_equal(res3.find_clusters(maps=True), res.clusters) configure(n_workers=0) res4 = testnd.ttest_rel('uts', 'A%B', ('a1', 'b1'), ('a0', 'b0'), 'rm', ds=ds, samples=100) assert_dataset_equal(res4.find_clusters(maps=True), res.clusters) configure(n_workers=True) sds = ds.sub("B=='b0'") # thresholded, UTS configure(n_workers=0) res0 = testnd.ttest_rel('uts', 'A', 'a1', 'a0', 'rm', ds=sds, pmin=0.1, samples=100) tgt = res0.find_clusters() configure(n_workers=True) res1 = testnd.ttest_rel('uts', 'A', 'a1', 'a0', 'rm', ds=sds, pmin=0.1, samples=100) assert_dataset_equal(res1.find_clusters(), tgt) # thresholded, UTSND configure(n_workers=0) res0 = testnd.ttest_rel('utsnd', 'A', 'a1', 'a0', 'rm', ds=sds, pmin=0.1, samples=100) tgt = res0.find_clusters() configure(n_workers=True) res1 = testnd.ttest_rel('utsnd', 'A', 'a1', 'a0', 'rm', ds=sds, pmin=0.1, samples=100) assert_dataset_equal(res1.find_clusters(), tgt) # TFCE, UTS configure(n_workers=0) res0 = testnd.ttest_rel('uts', 'A', 'a1', 'a0', 'rm', ds=sds, tfce=True, samples=10) tgt = res0.compute_probability_map() configure(n_workers=True) res1 = testnd.ttest_rel('uts', 'A', 'a1', 'a0', 'rm', ds=sds, tfce=True, samples=10) assert_dataobj_equal(res1.compute_probability_map(), tgt) # zero variance ds['utsnd'].x[:, 1, 10] = 0. res = testnd.ttest_rel('utsnd', 'A', match='rm', ds=ds) assert res.t.x[1, 10] == 0 # argument length with pytest.raises(ValueError): testnd.ttest_rel('utsnd', 'A[:-1]', match='rm', ds=ds) with pytest.raises(ValueError): testnd.ttest_rel('utsnd', 'A', match='rm[:-1]', ds=ds)
def test_ttest_1samp(): "Test testnd.ttest_1samp()" ds = datasets.get_uts(True) # no clusters res0 = testnd.ttest_1samp('uts', sub="A == 'a0'", ds=ds) assert res0.p_uncorrected.min() < 0.05 assert repr(res0) == "<ttest_1samp 'uts', sub=\"A == 'a0'\">" # sub as array res1 = testnd.ttest_1samp('uts', sub=ds.eval("A == 'a0'"), ds=ds) assert repr(res1) == "<ttest_1samp 'uts', sub=<array>>" # clusters without resampling res1 = testnd.ttest_1samp('uts', sub="A == 'a0'", ds=ds, samples=0, pmin=0.05, tstart=0, tstop=0.6, mintime=0.05) assert res1.clusters.n_cases == 1 assert 'p' not in res1.clusters assert repr(res1) == "<ttest_1samp 'uts', sub=\"A == 'a0'\", samples=0, pmin=0.05, tstart=0, tstop=0.6, mintime=0.05, 1 clusters>" # persistence string = pickle.dumps(res1, pickle.HIGHEST_PROTOCOL) res1_ = pickle.loads(string) assert repr(res1_) == repr(res1) assert_dataobj_equal(res1.p_uncorrected, res1_.p_uncorrected) # clusters with resampling res2 = testnd.ttest_1samp('uts', sub="A == 'a0'", ds=ds, samples=10, pmin=0.05, tstart=0, tstop=0.6, mintime=0.05) assert res2.clusters.n_cases == 1 assert res2.samples == 10 assert 'p' in res2.clusters assert repr(res2) == "<ttest_1samp 'uts', sub=\"A == 'a0'\", samples=10, pmin=0.05, tstart=0, tstop=0.6, mintime=0.05, 1 clusters, p < .001>" # clusters with permutations dss = ds.sub("logical_and(A=='a0', B=='b0')")[:8] res3 = testnd.ttest_1samp('uts', sub="A == 'a0'", ds=dss, samples=10000, pmin=0.05, tstart=0, tstop=0.6, mintime=0.05) assert repr(res3) == "<ttest_1samp 'uts', sub=\"A == 'a0'\", samples=255, pmin=0.05, tstart=0, tstop=0.6, mintime=0.05, 2 clusters, p = .020>" assert res3.clusters.n_cases == 2 assert res3.samples == -1 assert str(res3.clusters) == ( 'id tstart tstop duration v p sig\n' '--------------------------------------------------------\n' '3 0.08 0.34 0.26 95.692 0.015686 * \n' '4 0.35 0.56 0.21 81.819 0.019608 * ') # nd dss = ds.sub("A == 'a0'") res = testnd.ttest_1samp('utsnd', ds=dss, samples=1) res = testnd.ttest_1samp('utsnd', ds=dss, pmin=0.05, samples=1) res = testnd.ttest_1samp('utsnd', ds=dss, tfce=True, samples=1) # TFCE properties res = testnd.ttest_1samp('utsnd', sub="A == 'a0'", ds=ds, samples=1) string = pickle.dumps(res, pickle.HIGHEST_PROTOCOL) res = pickle.loads(string) tfce_clusters = res.find_clusters(pmin=0.05) peaks = res.find_peaks() assert tfce_clusters.eval("p.min()") == peaks.eval("p.min()") masked = res.masked_parameter_map(pmin=0.05) assert_array_equal(masked.abs().x <= res.t.abs().x, True) # zero variance ds['utsnd'].x[:, 1, 10] = 0. ds['utsnd'].x[:, 2, 10] = 0.1 res = testnd.ttest_1samp('utsnd', ds=ds) assert res.t.x[1, 10] == 0. assert res.t.x[2, 10] > 1e10 # argument length with pytest.raises(ValueError): testnd.ttest_1samp('utsnd', sub="A[:-1] == 'a0'", ds=ds)
def test_sample(): set_log_level('warning', 'mne') # import from file: http://stackoverflow.com/a/67692/166700 SampleExperiment = import_attr(sample_path / 'sample_experiment.py', 'SampleExperiment') tempdir = TempDir() datasets.setup_samples_experiment(tempdir, 3, 2) root = join(tempdir, 'SampleExperiment') e = SampleExperiment(root) assert e.get('raw') == '1-40' assert e.get('subject') == 'R0000' assert e.get('subject', subject='R0002') == 'R0002' # events e.set('R0001', rej='') ds = e.load_selected_events(epoch='target') assert ds.n_cases == 39 ds = e.load_selected_events(epoch='auditory') assert ds.n_cases == 20 ds = e.load_selected_events(epoch='av') assert ds.n_cases == 39 # evoked cache invalidated by change in bads e.set('R0001', rej='', epoch='target') ds = e.load_evoked() assert ds[0, 'evoked'].info['bads'] == [] e.make_bad_channels(['MEG 0331']) ds = e.load_evoked() assert ds[0, 'evoked'].info['bads'] == ['MEG 0331'] e.set(rej='man', model='modality') sds = [] for _ in e: e.make_epoch_selection(auto=2.5e-12) sds.append(e.load_evoked()) ds = e.load_evoked('all') assert_dataobj_equal(combine(sds), ds) # sensor space tests megs = [e.load_evoked(cat='auditory')['meg'] for _ in e] res = e.load_test('a>v', 0.05, 0.2, 0.05, samples=100, data='sensor.rms', baseline=False, make=True) meg_rms = combine(meg.rms('sensor') for meg in megs).mean('case', name='auditory') assert_dataobj_equal(res.c1_mean, meg_rms, decimal=21) res = e.load_test('a>v', 0.05, 0.2, 0.05, samples=100, data='sensor.mean', baseline=False, make=True) meg_mean = combine(meg.mean('sensor') for meg in megs).mean('case', name='auditory') assert_dataobj_equal(res.c1_mean, meg_mean, decimal=21) with pytest.raises(IOError): res = e.load_test('a>v', 0.05, 0.2, 0.05, samples=20, data='sensor', baseline=False) res = e.load_test('a>v', 0.05, 0.2, 0.05, samples=20, data='sensor', baseline=False, make=True) assert res.p.min() == pytest.approx(.143, abs=.001) assert res.difference.max() == pytest.approx(4.47e-13, 1e-15) # plot e.plot_evoked(1, epoch='target', model='') # e._report_subject_info() broke with non-alphabetic subject order subjects = e.get_field_values('subject') ds = Dataset() ds['subject'] = Factor(reversed(subjects)) ds['n'] = Var(range(3)) s_table = e._report_subject_info(ds, '') # post_baseline_trigger_shift # use multiple of tstep to shift by even number of samples tstep = 0.008324800548266162 shift = -7 * tstep class Experiment(SampleExperiment): epochs = { **SampleExperiment.epochs, 'visual-s': SecondaryEpoch('target', "modality == 'visual'", post_baseline_trigger_shift='shift', post_baseline_trigger_shift_max=0, post_baseline_trigger_shift_min=shift), } variables = { **SampleExperiment.variables, 'shift': LabelVar('side', {'left': 0, 'right': shift}), 'shift_t': LabelVar('trigger', {(1, 3): 0, (2, 4): shift}) } e = Experiment(root) # test shift in events ds = e.load_events() assert_dataobj_equal(ds['shift_t'], ds['shift'], name=False) # compare against epochs (baseline correction on epoch level rather than evoked for smaller numerical error) ep = e.load_epochs(baseline=True, epoch='visual', rej='').aggregate('side') evs = e.load_evoked(baseline=True, epoch='visual-s', rej='', model='side') tstart = ep['meg'].time.tmin - shift assert_dataobj_equal(evs[0, 'meg'], ep[0, 'meg'].sub(time=(tstart, None)), decimal=20) tstop = ep['meg'].time.tstop + shift assert_almost_equal(evs[1, 'meg'].x, ep[1, 'meg'].sub(time=(None, tstop)).x, decimal=20) # post_baseline_trigger_shift & multiple epochs with same time stamp class Experiment(SampleExperiment): epochs = { **SampleExperiment.epochs, 'v1': {'base': 'visual', 'vars': {'shift': 'Var([0.0], repeat=len(side))'}}, 'v2': {'base': 'visual', 'vars': {'shift': 'Var([0.1], repeat=len(side))'}}, 'vc': {'sub_epochs': ('v1', 'v2'), 'post_baseline_trigger_shift': 'shift', 'post_baseline_trigger_shift_max': 0.1, 'post_baseline_trigger_shift_min': 0.0}, } groups = { 'group0': Group(['R0000']), 'group1': SubGroup('all', ['R0000']), } variables = { 'group': GroupVar(['group0', 'group1']), **SampleExperiment.variables, } e = Experiment(root) events = e.load_selected_events(epoch='vc') ds = e.load_epochs(baseline=True, epoch='vc') v1 = ds.sub("epoch=='v1'", 'meg').sub(time=(0, 0.199)) v2 = ds.sub("epoch=='v2'", 'meg').sub(time=(-0.1, 0.099)) assert_almost_equal(v1.x, v2.x, decimal=20) # duplicate subject class BadExperiment(SampleExperiment): groups = {'group': ('R0001', 'R0002', 'R0002')} with pytest.raises(DefinitionError): BadExperiment(root) # non-existing subject class BadExperiment(SampleExperiment): groups = {'group': ('R0001', 'R0003', 'R0002')} with pytest.raises(DefinitionError): BadExperiment(root) # unsorted subjects class Experiment(SampleExperiment): groups = {'group': ('R0002', 'R0000', 'R0001')} e = Experiment(root) assert [s for s in e] == ['R0000', 'R0001', 'R0002'] # changes class Changed(SampleExperiment): variables = { 'event': {(1, 2, 3, 4): 'target', 5: 'smiley', 32: 'button'}, 'side': {(1, 3): 'left', (2, 4): 'right_changed'}, 'modality': {(1, 2): 'auditory', (3, 4): 'visual'} } tests = { 'twostage': TwoStageTest( 'side_left + modality_a', {'side_left': "side == 'left'", 'modality_a': "modality == 'auditory'"}), 'novars': TwoStageTest('side + modality'), } e = Changed(root) # changed variable, while a test with model=None is not changed class Changed(Changed): variables = { 'side': {(1, 3): 'left', (2, 4): 'right_changed'}, 'modality': {(1, 2): 'auditory', (3, 4): 'visual_changed'} } e = Changed(root) # changed variable, unchanged test with vardef=None class Changed(Changed): variables = { 'side': {(1, 3): 'left', (2, 4): 'right_changed'}, 'modality': {(1, 2): 'auditory', (3, 4): 'visual_changed'} } e = Changed(root) # ICA # --- class Experiment(SampleExperiment): raw = { 'apply-ica': RawApplyICA('tsss', 'ica'), **SampleExperiment.raw, } e = Experiment(root) ica_path = e.make_ica(raw='ica') e.set(raw='ica1-40', model='') e.make_epoch_selection(auto=2e-12, overwrite=True) ds1 = e.load_evoked(raw='ica1-40') ica = e.load_ica(raw='ica') ica.exclude = [0, 1, 2] ica.save(ica_path) ds2 = e.load_evoked(raw='ica1-40') assert not np.allclose(ds1['meg'].x, ds2['meg'].x, atol=1e-20), "ICA change ignored" # apply-ICA with catch_warnings(): filterwarnings('ignore', "The measurement information indicates a low-pass frequency", RuntimeWarning) ds1 = e.load_evoked(raw='ica', rej='') ds2 = e.load_evoked(raw='apply-ica', rej='') assert_dataobj_equal(ds2, ds1) # rename subject # -------------- src = Path(e.get('raw-dir', subject='R0001')) dst = Path(e.get('raw-dir', subject='R0003', match=False)) shutil.move(src, dst) for path in dst.glob('*.fif'): shutil.move(path, dst / path.parent / path.name.replace('R0001', 'R0003')) # check subject list e = SampleExperiment(root) assert list(e) == ['R0000', 'R0002', 'R0003'] # check that cached test got deleted assert e.get('raw') == '1-40' with pytest.raises(IOError): e.load_test('a>v', 0.05, 0.2, 0.05, samples=20, data='sensor', baseline=False) res = e.load_test('a>v', 0.05, 0.2, 0.05, samples=20, data='sensor', baseline=False, make=True) assert res.df == 2 assert res.p.min() == pytest.approx(.143, abs=.001) assert res.difference.max() == pytest.approx(4.47e-13, 1e-15) # remove subject # -------------- shutil.rmtree(dst) # check cache e = SampleExperiment(root) assert list(e) == ['R0000', 'R0002'] # check that cached test got deleted assert e.get('raw') == '1-40' with pytest.raises(IOError): e.load_test('a>v', 0.05, 0.2, 0.05, samples=20, data='sensor', baseline=False)
def test_clusterdist(): "Test NDPermutationDistribution class" shape = (10, 6, 6, 4) locs = [[0, 0, 0], [1, 0, 0], [1, 1, 0], [0, 1, 0]] x = np.random.normal(0, 1, shape) sensor = Sensor(locs, ['0', '1', '2', '3']) sensor.set_connectivity(connect_dist=1.1) dims = ('case', UTS(-0.1, 0.1, 6), Scalar('dim2', range(6), 'unit'), sensor) y = NDVar(x, dims) # test connecting sensors logging.info("TEST: connecting sensors") bin_map = np.zeros(shape[1:], dtype=np.bool8) bin_map[:3, :3, :2] = True pmap = np.random.normal(0, 1, shape[1:]) np.clip(pmap, -1, 1, pmap) pmap[bin_map] = 2 cdist = NDPermutationDistribution(y, 0, 1.5) print(repr(cdist)) cdist.add_original(pmap) print(repr(cdist)) assert cdist.n_clusters == 1 assert_array_equal(cdist._original_cluster_map == cdist._cids[0], cdist._crop(bin_map).swapaxes(0, cdist._nad_ax)) assert cdist.parameter_map.dims == y.dims[1:] # test connecting many sensors logging.info("TEST: connecting sensors") bin_map = np.zeros(shape[1:], dtype=np.bool8) bin_map[:3, :3] = True pmap = np.random.normal(0, 1, shape[1:]) np.clip(pmap, -1, 1, pmap) pmap[bin_map] = 2 cdist = NDPermutationDistribution(y, 0, 1.5) cdist.add_original(pmap) assert cdist.n_clusters == 1 assert_array_equal(cdist._original_cluster_map == cdist._cids[0], cdist._crop(bin_map).swapaxes(0, cdist._nad_ax)) # test keeping sensors separate logging.info("TEST: keeping sensors separate") bin_map = np.zeros(shape[1:], dtype=np.bool8) bin_map[:3, :3, 0] = True bin_map[:3, :3, 2] = True pmap = np.random.normal(0, 1, shape[1:]) np.clip(pmap, -1, 1, pmap) pmap[bin_map] = 2 cdist = NDPermutationDistribution(y, 1, 1.5) cdist.add_original(pmap) assert cdist.n_clusters == 2 # criteria ds = datasets.get_uts(True) res = testnd.ttest_rel('utsnd', 'A', match='rm', ds=ds, samples=0, pmin=0.05) assert res.clusters['duration'].min() < 0.01 assert res.clusters['n_sensors'].min() == 1 res = testnd.ttest_rel('utsnd', 'A', match='rm', ds=ds, samples=0, pmin=0.05, mintime=0.02, minsensor=2) assert res.clusters['duration'].min() >= 0.02 assert res.clusters['n_sensors'].min() == 2 # 1d res1d = testnd.ttest_rel('utsnd.sub(time=0.1)', 'A', match='rm', ds=ds, samples=0, pmin=0.05) assert_dataobj_equal(res1d.p_uncorrected, res.p_uncorrected.sub(time=0.1)) # TFCE logging.info("TEST: TFCE") sensor = Sensor(locs, ['0', '1', '2', '3']) sensor.set_connectivity(connect_dist=1.1) time = UTS(-0.1, 0.1, 4) scalar = Scalar('scalar', range(10), 'unit') dims = ('case', time, sensor, scalar) np.random.seed(0) y = NDVar(np.random.normal(0, 1, (10, 4, 4, 10)), dims) cdist = NDPermutationDistribution(y, 3, None) cdist.add_original(y.x[0]) cdist.finalize() assert cdist.dist.shape == (3,) # I/O string = pickle.dumps(cdist, pickle.HIGHEST_PROTOCOL) cdist_ = pickle.loads(string) assert repr(cdist_) == repr(cdist) # find peaks x = np.array([[[0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [7, 7, 0, 0, 0, 0, 0, 0, 0, 0], [0, 7, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [5, 7, 0, 0, 0, 0, 0, 0, 0, 0], [0, 6, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 7, 5, 5, 0, 0], [0, 0, 0, 0, 5, 4, 4, 4, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 4, 0, 0], [0, 0, 0, 0, 7, 0, 0, 3, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]]) tgt = np.equal(x, 7) peaks = find_peaks(x, cdist._connectivity) logging.debug(' detected: \n%s' % (peaks.astype(int))) logging.debug(' target: \n%s' % (tgt.astype(int))) assert_array_equal(peaks, tgt) # testnd permutation result res = testnd.ttest_1samp(y, tfce=True, samples=3) if sys.version_info[0] == 3: target = [96.84232967, 205.83207424, 425.65942084] else: target = [77.5852307, 119.1976153, 217.6270428] assert_allclose(np.sort(res._cdist.dist), target) # parc with TFCE on unconnected dimension configure(False) x = np.random.normal(0, 1, (10, 5, 2, 4)) time = UTS(-0.1, 0.1, 5) categorial = Categorial('categorial', ('a', 'b')) y = NDVar(x, ('case', time, categorial, sensor)) y0 = NDVar(x[:, :, 0], ('case', time, sensor)) y1 = NDVar(x[:, :, 1], ('case', time, sensor)) res = testnd.ttest_1samp(y, tfce=True, samples=3) res_parc = testnd.ttest_1samp(y, tfce=True, samples=3, parc='categorial') res0 = testnd.ttest_1samp(y0, tfce=True, samples=3) res1 = testnd.ttest_1samp(y1, tfce=True, samples=3) # cdist assert res._cdist.shape == (4, 2, 5) # T-maps don't depend on connectivity assert_array_equal(res.t.x[:, 0], res0.t.x) assert_array_equal(res.t.x[:, 1], res1.t.x) assert_array_equal(res_parc.t.x[:, 0], res0.t.x) assert_array_equal(res_parc.t.x[:, 1], res1.t.x) # TFCE-maps should always be the same because they're unconnected assert_array_equal(res.tfce_map.x[:, 0], res0.tfce_map.x) assert_array_equal(res.tfce_map.x[:, 1], res1.tfce_map.x) assert_array_equal(res_parc.tfce_map.x[:, 0], res0.tfce_map.x) assert_array_equal(res_parc.tfce_map.x[:, 1], res1.tfce_map.x) # Probability-maps should depend on what is taken into account p_a = res0.compute_probability_map().x p_b = res1.compute_probability_map().x assert_array_equal(res_parc.compute_probability_map(categorial='a').x, p_a) assert_array_equal(res_parc.compute_probability_map(categorial='b').x, p_b) p_parc = res_parc.compute_probability_map() assert_array_equal(p_parc.x, res.compute_probability_map().x) assert np.all(p_parc.sub(categorial='a').x >= p_a) assert np.all(p_parc.sub(categorial='b').x >= p_b) configure(True)
def test_anova(): "Test testnd.anova()" ds = datasets.get_uts(True, nrm=True) testnd.anova('utsnd', 'A*B', ds=ds) for samples in (0, 2): logging.info("TEST: samples=%r" % samples) testnd.anova('utsnd', 'A*B', ds=ds, samples=samples) testnd.anova('utsnd', 'A*B', ds=ds, samples=samples, pmin=0.05) res = testnd.anova('utsnd', 'A*B', ds=ds, samples=samples, tfce=True) assert res._plot_model() == 'A%B' asfmtext(res) res = testnd.anova('utsnd', 'A*B*rm', match=False, ds=ds, samples=0, pmin=0.05) assert repr( res ) == "<anova 'utsnd', 'A*B*rm', match=False, samples=0, pmin=0.05, 'A': 17 clusters, 'B': 20 clusters, 'A x B': 22 clusters>" assert res._plot_model() == 'A%B' res = testnd.anova('utsnd', 'A*B*rm', ds=ds, samples=2, pmin=0.05) assert res.match == 'rm' assert repr( res ) == "<anova 'utsnd', 'A*B*rm', match='rm', samples=2, pmin=0.05, 'A': 17 clusters, p < .001, 'B': 20 clusters, p < .001, 'A x B': 22 clusters, p < .001>" assert res._plot_model() == 'A%B' # persistence string = pickle.dumps(res, protocol=pickle.HIGHEST_PROTOCOL) res_ = pickle.loads(string) assert repr(res_) == repr(res) assert res_._plot_model() == 'A%B' # threshold-free res = testnd.anova('utsnd', 'A*B*rm', ds=ds, samples=10) assert res.match == 'rm' assert repr( res ) == "<anova 'utsnd', 'A*B*rm', match='rm', samples=10, 'A': p < .001, 'B': p < .001, 'A x B': p < .001>" assert 'A clusters' in res.clusters.info assert 'B clusters' in res.clusters.info assert 'A x B clusters' in res.clusters.info # no clusters res = testnd.anova('uts', 'B', sub="A=='a1'", ds=ds, samples=5, pmin=0.05, mintime=0.02) repr(res) assert 'v' in res.clusters assert 'p' in res.clusters assert res._plot_model() == 'B' # all effects with clusters res = testnd.anova('uts', 'A*B*rm', match=False, ds=ds, samples=5, pmin=0.05, tstart=0.1, mintime=0.02) assert set(res.clusters['effect'].cells) == set(res.effects) # some effects with clusters, some without res = testnd.anova('uts', 'A*B*rm', ds=ds, samples=5, pmin=0.05, tstart=0.37, mintime=0.02) assert res.match == 'rm' string = pickle.dumps(res, pickle.HIGHEST_PROTOCOL) res_ = pickle.loads(string) assert_dataobj_equal(res.clusters, res_.clusters) # test multi-effect results (with persistence) # UTS res = testnd.anova('uts', 'A*B*rm', ds=ds, samples=5) assert res.match == 'rm' repr(res) string = pickle.dumps(res, pickle.HIGHEST_PROTOCOL) resr = pickle.loads(string) tf_clusters = resr.find_clusters(pmin=0.05) peaks = resr.find_peaks() assert_dataobj_equal(tf_clusters, res.find_clusters(pmin=0.05)) assert_dataobj_equal(peaks, res.find_peaks()) assert tf_clusters.eval("p.min()") == peaks.eval("p.min()") unmasked = resr.f[0] masked = resr.masked_parameter_map(effect=0, pmin=0.05) assert_array_equal(masked.x <= unmasked.x, True) # reproducibility decimal = 12 if IS_WINDOWS else None # FIXME: why is Windows sometimes different??? res0 = testnd.anova('utsnd', 'A*B*rm', ds=ds, pmin=0.05, samples=5) res = testnd.anova('utsnd', 'A*B*rm', ds=ds, pmin=0.05, samples=5) assert_dataset_equal(res.clusters, res0.clusters, decimal=decimal) configure(n_workers=0) res = testnd.anova('utsnd', 'A*B*rm', ds=ds, pmin=0.05, samples=5) assert_dataset_equal(res.clusters, res0.clusters, decimal=decimal) configure(n_workers=True) # permutation eelbrain._stats.permutation._YIELD_ORIGINAL = 1 samples = 4 # raw res = testnd.anova('utsnd', 'A*B*rm', ds=ds, samples=samples) for dist in res._cdist: assert len(dist.dist) == samples assert_array_equal(dist.dist, dist.parameter_map.abs().max()) # TFCE res = testnd.anova('utsnd', 'A*B*rm', ds=ds, tfce=True, samples=samples) for dist in res._cdist: assert len(dist.dist) == samples assert_array_equal(dist.dist, dist.tfce_map.abs().max()) # thresholded res1 = testnd.anova('utsnd', 'A*B*rm', ds=ds, pmin=0.05, samples=samples) clusters = res1.find_clusters() for dist, effect in zip(res1._cdist, res1.effects): effect_idx = clusters.eval("effect == %r" % effect) vmax = clusters[effect_idx, 'v'].abs().max() assert len(dist.dist) == samples assert_array_equal(dist.dist, vmax) eelbrain._stats.permutation._YIELD_ORIGINAL = 0 # 1d TFCE configure(n_workers=0) res = testnd.anova('utsnd.rms(time=(0.1, 0.3))', 'A*B*rm', ds=ds, tfce=True, samples=samples) configure(n_workers=True) # zero variance res2 = testnd.anova('utsnd', 'A', ds=ds) ds['utsnd'].x[:, 1, 10] = 0. zero_var = ds['utsnd'].var('case') == 0 zv_index = tuple(i[0] for i in zero_var.nonzero()) res1_zv = testnd.anova('utsnd', 'A*B*rm', ds=ds) res2_zv = testnd.anova('utsnd', 'A', ds=ds) for res, res_zv in ((res1, res1_zv), (res2, res2_zv)): for f, f_zv in zip(res.f, res_zv.f): assert_array_equal((f_zv == 0).x, zero_var.x) assert f_zv[zv_index] == 0 f_zv[zv_index] = f[zv_index] assert_dataobj_equal(f_zv, f, decimal=decimal) # nested random effect res = testnd.anova('uts', 'A * B * nrm(A)', ds=ds, samples=10, tstart=.4) assert res.match == 'nrm(A)' assert [p.min() for p in res.p] == [0.0, 0.6, 0.9] # unequal argument length with pytest.raises(ValueError): testnd.anova('uts', 'A[:-1]', ds=ds) with pytest.raises(ValueError): testnd.anova('uts[:-1]', 'A * B * nrm(A)', ds=ds)
def test_ttest_1samp(): "Test testnd.ttest_1samp()" ds = datasets.get_uts(True) # no clusters res0 = testnd.ttest_1samp('uts', sub="A == 'a0'", ds=ds, samples=0) assert res0.p_uncorrected.min() < 0.05 assert repr(res0) == "<ttest_1samp 'uts', sub=\"A == 'a0'\", samples=0>" # sub as array res1 = testnd.ttest_1samp('uts', sub=ds.eval("A == 'a0'"), ds=ds, samples=0) assert repr(res1) == "<ttest_1samp 'uts', sub=<array>, samples=0>" # clusters without resampling res1 = testnd.ttest_1samp('uts', sub="A == 'a0'", ds=ds, samples=0, pmin=0.05, tstart=0, tstop=0.6, mintime=0.05) assert res1.clusters.n_cases == 1 assert 'p' not in res1.clusters assert repr( res1 ) == "<ttest_1samp 'uts', sub=\"A == 'a0'\", samples=0, pmin=0.05, tstart=0, tstop=0.6, mintime=0.05, 1 clusters>" # persistence string = pickle.dumps(res1, pickle.HIGHEST_PROTOCOL) res1_ = pickle.loads(string) assert repr(res1_) == repr(res1) assert_dataobj_equal(res1.p_uncorrected, res1_.p_uncorrected) # clusters with resampling res2 = testnd.ttest_1samp('uts', sub="A == 'a0'", ds=ds, samples=10, pmin=0.05, tstart=0, tstop=0.6, mintime=0.05) assert res2.clusters.n_cases == 1 assert res2.samples == 10 assert 'p' in res2.clusters assert repr( res2 ) == "<ttest_1samp 'uts', sub=\"A == 'a0'\", samples=10, pmin=0.05, tstart=0, tstop=0.6, mintime=0.05, 1 clusters, p < .001>" # clusters with permutations dss = ds.sub("logical_and(A=='a0', B=='b0')")[:8] res3 = testnd.ttest_1samp('uts', sub="A == 'a0'", ds=dss, samples=10000, pmin=0.05, tstart=0, tstop=0.6, mintime=0.05) assert repr( res3 ) == "<ttest_1samp 'uts', sub=\"A == 'a0'\", samples=255, pmin=0.05, tstart=0, tstop=0.6, mintime=0.05, 2 clusters, p = .020>" assert res3.clusters.n_cases == 2 assert res3.samples == -1 assert str(res3.clusters) == ( 'id tstart tstop duration v p sig\n' '--------------------------------------------------------\n' '3 0.08 0.34 0.26 95.692 0.015686 * \n' '4 0.35 0.56 0.21 81.819 0.019608 * ') # nd dss = ds.sub("A == 'a0'") res = testnd.ttest_1samp('utsnd', ds=dss, samples=1) res = testnd.ttest_1samp('utsnd', ds=dss, pmin=0.05, samples=1) res = testnd.ttest_1samp('utsnd', ds=dss, tfce=True, samples=1) # TFCE properties res = testnd.ttest_1samp('utsnd', sub="A == 'a0'", ds=ds, samples=1) string = pickle.dumps(res, pickle.HIGHEST_PROTOCOL) res = pickle.loads(string) tfce_clusters = res.find_clusters(pmin=0.05) peaks = res.find_peaks() assert tfce_clusters.eval("p.min()") == peaks.eval("p.min()") masked = res.masked_parameter_map(pmin=0.05) assert_array_equal(masked.abs().x <= res.t.abs().x, True) # zero variance ds['utsnd'].x[:, 1, 10] = 0. ds['utsnd'].x[:, 2, 10] = 0.1 res = testnd.ttest_1samp('utsnd', ds=ds, samples=0) assert res.t.x[1, 10] == 0. assert res.t.x[2, 10] > 1e10 # argument length with pytest.raises(ValueError): testnd.ttest_1samp('utsnd', sub="A[:-1] == 'a0'", ds=ds, samples=0)
def test_smoothing(): x = get_ndvar(2) xt = NDVar(x.x.swapaxes(1, 2), [x.dims[i] for i in [0, 2, 1]], x.name, x.info) # smoothing across time ma = x.smooth('time', 0.2, 'blackman') assert_dataobj_equal( x.smooth('time', window='blackman', window_samples=20), ma) with pytest.raises(TypeError): x.smooth('time') with pytest.raises(TypeError): x.smooth('time', 0.2, 'blackman', window_samples=20) mas = xt.smooth('time', 0.2, 'blackman') assert_allclose(ma.x, mas.x.swapaxes(1, 2), 1e-10) ma_mean = x.mean('case').smooth('time', 0.2, 'blackman') assert_allclose(ma.mean('case').x, ma_mean.x) # against raw scipy.signal window = signal.get_window('blackman', 20, False) window /= window.sum() window.shape = (1, 20, 1) assert_array_equal(ma.x[:, 10:-10], signal.convolve(x.x, window, 'same')[:, 10:-10]) # mode parameter full = signal.convolve(x.x, window, 'full') ma = x.smooth('time', 0.2, 'blackman', mode='left') assert_array_equal(ma.x[:], full[:, :100]) ma = x.smooth('time', 0.2, 'blackman', mode='right') assert_array_equal(ma.x[:], full[:, 19:]) # fix_edges: smooth with constant sum xs = x.smooth('frequency', window_samples=1, fix_edges=True) assert_dataobj_equal(xs.sum('frequency'), x.sum('frequency')) xs = x.smooth('frequency', window_samples=2, fix_edges=True) assert_dataobj_equal(xs.sum('frequency'), x.sum('frequency'), 14) xs = x.smooth('frequency', window_samples=3, fix_edges=True) assert_dataobj_equal(xs.sum('frequency'), x.sum('frequency'), 14) xs = x.smooth('frequency', window_samples=5, fix_edges=True) assert_dataobj_equal(xs.sum('frequency'), x.sum('frequency'), 14) xs = x.smooth('frequency', window_samples=4, fix_edges=True) assert_dataobj_equal(xs.sum('frequency'), x.sum('frequency'), 14) # gaussian x = get_ndvar(2, frequency=0, sensor=5) x.smooth('sensor', 0.1, 'gaussian') x = get_ndvar(2, sensor=5) x.smooth('sensor', 0.1, 'gaussian')
def test_sample(): set_log_level('warning', 'mne') # import from file: http://stackoverflow.com/a/67692/166700 SampleExperiment = import_attr(sample_path / 'sample_experiment.py', 'SampleExperiment') tempdir = TempDir() datasets.setup_samples_experiment(tempdir, 3, 2) root = join(tempdir, 'SampleExperiment') e = SampleExperiment(root) assert e.get('raw') == '1-40' assert e.get('subject') == 'R0000' assert e.get('subject', subject='R0002') == 'R0002' # events e.set('R0001', rej='') ds = e.load_selected_events(epoch='target') assert ds.n_cases == 39 ds = e.load_selected_events(epoch='auditory') assert ds.n_cases == 20 ds = e.load_selected_events(epoch='av') assert ds.n_cases == 39 # evoked cache invalidated by change in bads e.set('R0001', rej='', epoch='target') ds = e.load_evoked() assert ds[0, 'evoked'].info['bads'] == [] e.make_bad_channels(['MEG 0331']) ds = e.load_evoked() assert ds[0, 'evoked'].info['bads'] == ['MEG 0331'] e.set(rej='man', model='modality') sds = [] for _ in e: e.make_epoch_selection(auto=2.5e-12) sds.append(e.load_evoked()) ds = e.load_evoked('all') assert_dataobj_equal(combine(sds), ds) # sensor space tests megs = [e.load_evoked(cat='auditory')['meg'] for _ in e] res = e.load_test('a>v', 0.05, 0.2, 0.05, samples=100, data='sensor.rms', baseline=False, make=True) meg_rms = combine(meg.rms('sensor') for meg in megs).mean('case', name='auditory') assert_dataobj_equal(res.c1_mean, meg_rms, decimal=21) res = e.load_test('a>v', 0.05, 0.2, 0.05, samples=100, data='sensor.mean', baseline=False, make=True) meg_mean = combine(meg.mean('sensor') for meg in megs).mean('case', name='auditory') assert_dataobj_equal(res.c1_mean, meg_mean, decimal=21) with pytest.raises(IOError): res = e.load_test('a>v', 0.05, 0.2, 0.05, samples=20, data='sensor', baseline=False) res = e.load_test('a>v', 0.05, 0.2, 0.05, samples=20, data='sensor', baseline=False, make=True) assert res.p.min() == pytest.approx(.143, abs=.001) assert res.difference.max() == pytest.approx(4.47e-13, 1e-15) # plot e.plot_evoked(1, epoch='target', model='') # e._report_subject_info() broke with non-alphabetic subject order subjects = e.get_field_values('subject') ds = Dataset() ds['subject'] = Factor(reversed(subjects)) ds['n'] = Var(range(3)) s_table = e._report_subject_info(ds, '') # post_baseline_trigger_shift # use multiple of tstep to shift by even number of samples tstep = 0.008324800548266162 shift = -7 * tstep class Experiment(SampleExperiment): epochs = { **SampleExperiment.epochs, 'visual-s': SecondaryEpoch('target', "modality == 'visual'", post_baseline_trigger_shift='shift', post_baseline_trigger_shift_max=0, post_baseline_trigger_shift_min=shift), } variables = { **SampleExperiment.variables, 'shift': LabelVar('side', { 'left': 0, 'right': shift }), 'shift_t': LabelVar('trigger', { (1, 3): 0, (2, 4): shift }) } e = Experiment(root) # test shift in events ds = e.load_events() assert_dataobj_equal(ds['shift_t'], ds['shift'], name=False) # compare against epochs (baseline correction on epoch level rather than evoked for smaller numerical error) ep = e.load_epochs(baseline=True, epoch='visual', rej='').aggregate('side') evs = e.load_evoked(baseline=True, epoch='visual-s', rej='', model='side') tstart = ep['meg'].time.tmin - shift assert_dataobj_equal(evs[0, 'meg'], ep[0, 'meg'].sub(time=(tstart, None)), decimal=20) tstop = ep['meg'].time.tstop + shift assert_almost_equal(evs[1, 'meg'].x, ep[1, 'meg'].sub(time=(None, tstop)).x, decimal=20) # post_baseline_trigger_shift & multiple epochs with same time stamp class Experiment(SampleExperiment): epochs = { **SampleExperiment.epochs, 'v1': { 'base': 'visual', 'vars': { 'shift': 'Var([0.0], repeat=len(side))' } }, 'v2': { 'base': 'visual', 'vars': { 'shift': 'Var([0.1], repeat=len(side))' } }, 'vc': { 'sub_epochs': ('v1', 'v2'), 'post_baseline_trigger_shift': 'shift', 'post_baseline_trigger_shift_max': 0.1, 'post_baseline_trigger_shift_min': 0.0 }, } groups = { 'group0': Group(['R0000']), 'group1': SubGroup('all', ['R0000']), } variables = { 'group': GroupVar(['group0', 'group1']), **SampleExperiment.variables, } e = Experiment(root) events = e.load_selected_events(epoch='vc') ds = e.load_epochs(baseline=True, epoch='vc') v1 = ds.sub("epoch=='v1'", 'meg').sub(time=(0, 0.199)) v2 = ds.sub("epoch=='v2'", 'meg').sub(time=(-0.1, 0.099)) assert_almost_equal(v1.x, v2.x, decimal=20) # duplicate subject class BadExperiment(SampleExperiment): groups = {'group': ('R0001', 'R0002', 'R0002')} with pytest.raises(DefinitionError): BadExperiment(root) # non-existing subject class BadExperiment(SampleExperiment): groups = {'group': ('R0001', 'R0003', 'R0002')} with pytest.raises(DefinitionError): BadExperiment(root) # unsorted subjects class Experiment(SampleExperiment): groups = {'group': ('R0002', 'R0000', 'R0001')} e = Experiment(root) assert [s for s in e] == ['R0000', 'R0001', 'R0002'] # changes class Changed(SampleExperiment): variables = { 'event': { (1, 2, 3, 4): 'target', 5: 'smiley', 32: 'button' }, 'side': { (1, 3): 'left', (2, 4): 'right_changed' }, 'modality': { (1, 2): 'auditory', (3, 4): 'visual' } } tests = { 'twostage': TwoStageTest( 'side_left + modality_a', { 'side_left': "side == 'left'", 'modality_a': "modality == 'auditory'" }), 'novars': TwoStageTest('side + modality'), } e = Changed(root) # changed variable, while a test with model=None is not changed class Changed(Changed): variables = { 'side': { (1, 3): 'left', (2, 4): 'right_changed' }, 'modality': { (1, 2): 'auditory', (3, 4): 'visual_changed' } } e = Changed(root) # changed variable, unchanged test with vardef=None class Changed(Changed): variables = { 'side': { (1, 3): 'left', (2, 4): 'right_changed' }, 'modality': { (1, 2): 'auditory', (3, 4): 'visual_changed' } } e = Changed(root) # ICA # --- class Experiment(SampleExperiment): raw = { 'apply-ica': RawApplyICA('tsss', 'ica'), **SampleExperiment.raw, } e = Experiment(root) ica_path = e.make_ica(raw='ica') e.set(raw='ica1-40', model='') e.make_epoch_selection(auto=2e-12, overwrite=True) ds1 = e.load_evoked(raw='ica1-40') ica = e.load_ica(raw='ica') ica.exclude = [0, 1, 2] ica.save(ica_path) ds2 = e.load_evoked(raw='ica1-40') assert not np.allclose(ds1['meg'].x, ds2['meg'].x, atol=1e-20), "ICA change ignored" # apply-ICA with catch_warnings(): filterwarnings( 'ignore', "The measurement information indicates a low-pass frequency", RuntimeWarning) ds1 = e.load_evoked(raw='ica', rej='') ds2 = e.load_evoked(raw='apply-ica', rej='') assert_dataobj_equal(ds2, ds1) # rename subject # -------------- src = Path(e.get('raw-dir', subject='R0001')) dst = Path(e.get('raw-dir', subject='R0003', match=False)) shutil.move(src, dst) for path in dst.glob('*.fif'): shutil.move(path, dst / path.parent / path.name.replace('R0001', 'R0003')) # check subject list e = SampleExperiment(root) assert list(e) == ['R0000', 'R0002', 'R0003'] # check that cached test got deleted assert e.get('raw') == '1-40' with pytest.raises(IOError): e.load_test('a>v', 0.05, 0.2, 0.05, samples=20, data='sensor', baseline=False) res = e.load_test('a>v', 0.05, 0.2, 0.05, samples=20, data='sensor', baseline=False, make=True) assert res.df == 2 assert res.p.min() == pytest.approx(.143, abs=.001) assert res.difference.max() == pytest.approx(4.47e-13, 1e-15) # remove subject # -------------- shutil.rmtree(dst) # check cache e = SampleExperiment(root) assert list(e) == ['R0000', 'R0002'] # check that cached test got deleted assert e.get('raw') == '1-40' with pytest.raises(IOError): e.load_test('a>v', 0.05, 0.2, 0.05, samples=20, data='sensor', baseline=False) # label_events # ------------ class Experiment(SampleExperiment): def label_events(self, ds): SampleExperiment.label_events(self, ds) ds = ds.sub("event == 'smiley'") ds['new_var'] = Var([i + 1 for i in ds['i_start']]) return ds e = Experiment(root) events = e.load_events() assert_array_equal(events['new_var'], [67402, 75306])
def test_anova(): "Test testnd.anova()" ds = datasets.get_uts(True, nrm=True) testnd.anova('utsnd', 'A*B', ds=ds) for samples in (0, 2): logging.info("TEST: samples=%r" % samples) testnd.anova('utsnd', 'A*B', ds=ds, samples=samples) testnd.anova('utsnd', 'A*B', ds=ds, samples=samples, pmin=0.05) res = testnd.anova('utsnd', 'A*B', ds=ds, samples=samples, tfce=True) assert res._plot_model() == 'A%B' asfmtext(res) res = testnd.anova('utsnd', 'A*B*rm', match=False, ds=ds, samples=0, pmin=0.05) assert repr(res) == "<anova 'utsnd', 'A*B*rm', match=False, samples=0, pmin=0.05, 'A': 17 clusters, 'B': 20 clusters, 'A x B': 22 clusters>" assert res._plot_model() == 'A%B' res = testnd.anova('utsnd', 'A*B*rm', ds=ds, samples=2, pmin=0.05) assert res.match == 'rm' assert repr(res) == "<anova 'utsnd', 'A*B*rm', match='rm', samples=2, pmin=0.05, 'A': 17 clusters, p < .001, 'B': 20 clusters, p < .001, 'A x B': 22 clusters, p < .001>" assert res._plot_model() == 'A%B' # persistence string = pickle.dumps(res, protocol=pickle.HIGHEST_PROTOCOL) res_ = pickle.loads(string) assert repr(res_) == repr(res) assert res_._plot_model() == 'A%B' # threshold-free res = testnd.anova('utsnd', 'A*B*rm', ds=ds, samples=10) assert res.match == 'rm' assert repr(res) == "<anova 'utsnd', 'A*B*rm', match='rm', samples=10, 'A': p < .001, 'B': p < .001, 'A x B': p < .001>" assert 'A clusters' in res.clusters.info assert 'B clusters' in res.clusters.info assert 'A x B clusters' in res.clusters.info # no clusters res = testnd.anova('uts', 'B', sub="A=='a1'", ds=ds, samples=5, pmin=0.05, mintime=0.02) repr(res) assert 'v' in res.clusters assert 'p' in res.clusters assert res._plot_model() == 'B' # all effects with clusters res = testnd.anova('uts', 'A*B*rm', match=False, ds=ds, samples=5, pmin=0.05, tstart=0.1, mintime=0.02) assert set(res.clusters['effect'].cells) == set(res.effects) # some effects with clusters, some without res = testnd.anova('uts', 'A*B*rm', ds=ds, samples=5, pmin=0.05, tstart=0.37, mintime=0.02) assert res.match == 'rm' string = pickle.dumps(res, pickle.HIGHEST_PROTOCOL) res_ = pickle.loads(string) assert_dataobj_equal(res.clusters, res_.clusters) # test multi-effect results (with persistence) # UTS res = testnd.anova('uts', 'A*B*rm', ds=ds, samples=5) assert res.match == 'rm' repr(res) string = pickle.dumps(res, pickle.HIGHEST_PROTOCOL) resr = pickle.loads(string) tf_clusters = resr.find_clusters(pmin=0.05) peaks = resr.find_peaks() assert_dataobj_equal(tf_clusters, res.find_clusters(pmin=0.05)) assert_dataobj_equal(peaks, res.find_peaks()) assert tf_clusters.eval("p.min()") == peaks.eval("p.min()") unmasked = resr.f[0] masked = resr.masked_parameter_map(effect=0, pmin=0.05) assert_array_equal(masked.x <= unmasked.x, True) # reproducibility decimal = 12 if IS_WINDOWS else None # FIXME: why is Windows sometimes different??? res0 = testnd.anova('utsnd', 'A*B*rm', ds=ds, pmin=0.05, samples=5) res = testnd.anova('utsnd', 'A*B*rm', ds=ds, pmin=0.05, samples=5) assert_dataset_equal(res.clusters, res0.clusters, decimal=decimal) configure(n_workers=0) res = testnd.anova('utsnd', 'A*B*rm', ds=ds, pmin=0.05, samples=5) assert_dataset_equal(res.clusters, res0.clusters, decimal=decimal) configure(n_workers=True) # permutation eelbrain._stats.permutation._YIELD_ORIGINAL = 1 samples = 4 # raw res = testnd.anova('utsnd', 'A*B*rm', ds=ds, samples=samples) for dist in res._cdist: assert len(dist.dist) == samples assert_array_equal(dist.dist, dist.parameter_map.abs().max()) # TFCE res = testnd.anova('utsnd', 'A*B*rm', ds=ds, tfce=True, samples=samples) for dist in res._cdist: assert len(dist.dist) == samples assert_array_equal(dist.dist, dist.tfce_map.abs().max()) # thresholded res1 = testnd.anova('utsnd', 'A*B*rm', ds=ds, pmin=0.05, samples=samples) clusters = res1.find_clusters() for dist, effect in zip(res1._cdist, res1.effects): effect_idx = clusters.eval("effect == %r" % effect) vmax = clusters[effect_idx, 'v'].abs().max() assert len(dist.dist) == samples assert_array_equal(dist.dist, vmax) eelbrain._stats.permutation._YIELD_ORIGINAL = 0 # 1d TFCE configure(n_workers=0) res = testnd.anova('utsnd.rms(time=(0.1, 0.3))', 'A*B*rm', ds=ds, tfce=True, samples=samples) configure(n_workers=True) # zero variance res2 = testnd.anova('utsnd', 'A', ds=ds) ds['utsnd'].x[:, 1, 10] = 0. zero_var = ds['utsnd'].var('case') == 0 zv_index = tuple(i[0] for i in zero_var.nonzero()) res1_zv = testnd.anova('utsnd', 'A*B*rm', ds=ds) res2_zv = testnd.anova('utsnd', 'A', ds=ds) for res, res_zv in ((res1, res1_zv), (res2, res2_zv)): for f, f_zv in zip(res.f, res_zv.f): assert_array_equal((f_zv == 0).x, zero_var.x) assert f_zv[zv_index] == 0 f_zv[zv_index] = f[zv_index] assert_dataobj_equal(f_zv, f, decimal=decimal) # nested random effect res = testnd.anova('uts', 'A * B * nrm(A)', ds=ds, samples=10, tstart=.4) assert res.match == 'nrm(A)' assert [p.min() for p in res.p] == [0.0, 0.6, 0.9] # unequal argument length with pytest.raises(ValueError): testnd.anova('uts', 'A[:-1]', ds=ds) with pytest.raises(ValueError): testnd.anova('uts[:-1]', 'A * B * nrm(A)', ds=ds)
def test_ttest_rel(): "Test testnd.ttest_rel()" ds = datasets.get_uts(True) # basic res = testnd.ttest_rel('uts', 'A%B', ('a1', 'b1'), ('a0', 'b0'), 'rm', ds=ds, samples=100) assert repr( res ) == "<ttest_rel 'uts', 'A x B', ('a1', 'b1'), ('a0', 'b0'), 'rm' (n=15), samples=100, p < .001>" difference = res.masked_difference() assert difference.x.mask.sum() == 84 c1 = res.masked_c1() assert c1.x.mask.sum() == 84 assert_array_equal(c1.x.data, res.c1_mean.x) # alternate argspec res_ = testnd.ttest_rel("uts[A%B == ('a1', 'b1')]", "uts[A%B == ('a0', 'b0')]", ds=ds, samples=100) assert repr( res_) == "<ttest_rel 'uts', 'uts' (n=15), samples=100, p < .001>" assert_dataobj_equal(res_.t, res.t) # alternate argspec 2 ds1 = Dataset() ds1['a1b1'] = ds.eval("uts[A%B == ('a1', 'b1')]") ds1['a0b0'] = ds.eval("uts[A%B == ('a0', 'b0')]") res1 = testnd.ttest_rel('a1b1', 'a0b0', ds=ds1, samples=100) assert_dataobj_equal(res1.t, res.t) assert repr( res1) == "<ttest_rel 'a1b1', 'a0b0' (n=15), samples=100, p < .001>" # persistence string = pickle.dumps(res, pickle.HIGHEST_PROTOCOL) res_ = pickle.loads(string) assert repr(res_) == repr(res) assert_dataobj_equal(res.p_uncorrected, res_.p_uncorrected) # collapsing cells res2 = testnd.ttest_rel('uts', 'A', 'a1', 'a0', 'rm', ds=ds, samples=0) assert res2.p_uncorrected.min() < 0.05 assert res2.n == res.n # reproducibility res3 = testnd.ttest_rel('uts', 'A%B', ('a1', 'b1'), ('a0', 'b0'), 'rm', ds=ds, samples=100) assert_dataset_equal(res3.find_clusters(maps=True), res.clusters) configure(n_workers=0) res4 = testnd.ttest_rel('uts', 'A%B', ('a1', 'b1'), ('a0', 'b0'), 'rm', ds=ds, samples=100) assert_dataset_equal(res4.find_clusters(maps=True), res.clusters) configure(n_workers=True) sds = ds.sub("B=='b0'") # thresholded, UTS configure(n_workers=0) res0 = testnd.ttest_rel('uts', 'A', 'a1', 'a0', 'rm', ds=sds, pmin=0.1, samples=100) tgt = res0.find_clusters() configure(n_workers=True) res1 = testnd.ttest_rel('uts', 'A', 'a1', 'a0', 'rm', ds=sds, pmin=0.1, samples=100) assert_dataset_equal(res1.find_clusters(), tgt) # thresholded, UTSND configure(n_workers=0) res0 = testnd.ttest_rel('utsnd', 'A', 'a1', 'a0', 'rm', ds=sds, pmin=0.1, samples=100) tgt = res0.find_clusters() configure(n_workers=True) res1 = testnd.ttest_rel('utsnd', 'A', 'a1', 'a0', 'rm', ds=sds, pmin=0.1, samples=100) assert_dataset_equal(res1.find_clusters(), tgt) # TFCE, UTS configure(n_workers=0) res0 = testnd.ttest_rel('uts', 'A', 'a1', 'a0', 'rm', ds=sds, tfce=True, samples=10) tgt = res0.compute_probability_map() configure(n_workers=True) res1 = testnd.ttest_rel('uts', 'A', 'a1', 'a0', 'rm', ds=sds, tfce=True, samples=10) assert_dataobj_equal(res1.compute_probability_map(), tgt) # zero variance ds['utsnd'].x[:, 1, 10] = 0. res = testnd.ttest_rel('utsnd', 'A', match='rm', ds=ds) assert res.t.x[1, 10] == 0 # argument length with pytest.raises(ValueError): testnd.ttest_rel('utsnd', 'A[:-1]', match='rm', ds=ds) with pytest.raises(ValueError): testnd.ttest_rel('utsnd', 'A', match='rm[:-1]', ds=ds)
def test_ncrf(): meg = load('meg').sub(time=(0, 5)) stim = load('stim').sub(time=(0, 5)) fwd = load('fwd_sol') emptyroom = load('emptyroom') # 1 stimulus model = fit_ncrf(meg, stim, fwd, emptyroom, tstop=0.2, normalize='l1', mu=0.0019444, n_iter=3, n_iterc=3, n_iterf=10, do_post_normalization=False) # check residual and explained var assert model.explained_var == pytest.approx(0.00641890144769941, rel=0.001) assert model.voxelwise_explained_variance.sum() == pytest.approx( 0.08261162457414245, rel=0.001) assert model.residual == pytest.approx(178.512, 0.001) # check scaling stim_baseline = stim.mean() assert model._stim_baseline[0] == stim_baseline assert model._stim_scaling[0] == (stim - stim_baseline).abs().mean() assert model.h.norm('time').norm('source').norm('space') == pytest.approx( 6.601677e-10, rel=0.001) # test persistence model_2 = pickle.loads(pickle.dumps(model, pickle.HIGHEST_PROTOCOL)) assert_dataobj_equal(model_2.h, model.h) assert_dataobj_equal(model_2.h_scaled, model.h_scaled) assert model_2.residual == model.residual assert model_2.gaussian_fwhm == model.gaussian_fwhm # test gaussian fwhm model = fit_ncrf(meg, stim, fwd, emptyroom, tstop=0.2, normalize='l1', mu=0.0019444, n_iter=1, n_iterc=1, n_iterf=1, gaussian_fwhm=50.0) assert model.gaussian_fwhm == 50.0 # 2 stimuli, one of them 2-d, normalize='l2' diff = stim.diff('time') stim2 = concatenate([diff.clip(0), diff.clip(max=0)], Categorial('rep', ['on', 'off'])) model = fit_ncrf(meg, [stim, stim2], fwd, emptyroom, tstop=[0.2, 0.2], normalize='l2', mu=0.0019444, n_iter=3, n_iterc=3, n_iterf=10, do_post_normalization=False) # check scaling assert model._stim_baseline[0] == stim.mean() assert model._stim_scaling[0] == stim.std() assert model.h[0].norm('time').norm('source').norm( 'space') == pytest.approx(7.0088e-10, rel=0.001) # cross-validation model = fit_ncrf(meg, stim, fwd, emptyroom, tstop=0.2, normalize='l1', mu='auto', n_iter=1, n_iterc=2, n_iterf=2, n_workers=1, do_post_normalization=False) assert model.mu == pytest.approx(0.0203, 0.001) model.cv_info()