def test_pickle(): ds = datasets.get_uts() ds_2 = load.unpickle(file_path('uts-py2.pickle')) assert_dataobj_equal(ds_2, ds) ds_3 = load.unpickle(file_path('uts-py3.pickle')) assert_dataobj_equal(ds_3, ds)
def test_melt_ndvar(): "Test table.melt_ndvar()" ds = datasets.get_uts(True) ds = ds.sub("A == 'a1'") lds = table.melt_ndvar('uts', ds=ds) ok_('time' in lds) assert_is_instance(lds['time'], Var) assert_array_equal(np.unique(lds['time'].x), ds['uts'].time) # no ds lds2 = table.melt_ndvar(ds['uts']) assert_dataobj_equal(lds2['uts'], lds['uts']) # sensor lds = table.melt_ndvar("utsnd.summary(time=(0.1, 0.2))", ds=ds, varname='summary') eq_(set(lds['sensor'].cells), set(ds['utsnd'].sensor.names)) # NDVar out lds = table.melt_ndvar("utsnd", 'sensor', ds=ds) ok_('utsnd' in lds) assert_is_instance(lds['utsnd'], NDVar) assert_dataobj_equal(lds[:ds.n_cases, 'utsnd'], ds.eval("utsnd.sub(sensor='0')")) # more than one dimensions assert_raises(ValueError, table.melt_ndvar, 'utsnd', ds=ds)
def test_melt_ndvar(): "Test table.melt_ndvar()" ds = datasets.get_uts(True) ds = ds.sub("A == 'a1'") lds = table.melt_ndvar("uts", ds=ds) ok_("time" in lds) ok_(isvar(lds["time"])) eq_(set(lds["time"].x), set(ds["uts"].time.x)) # no ds lds2 = table.melt_ndvar(ds["uts"]) assert_dataobj_equal(lds2["uts"], lds["uts"]) # sensor lds = table.melt_ndvar("utsnd.summary(time=(0.1, 0.2))", ds=ds, varname="summary") eq_(set(lds["sensor"].cells), set(ds["utsnd"].sensor.names)) # NDVar out lds = table.melt_ndvar("utsnd", "sensor", ds=ds) ok_("utsnd" in lds) ok_(isndvar(lds["utsnd"])) assert_dataobj_equal(lds[: ds.n_cases, "utsnd"], ds.eval("utsnd.sub(sensor='0')")) # more than one dimensions assert_raises(ValueError, table.melt_ndvar, "utsnd", ds=ds)
def test_ttest_ind(): "Test testnd.ttest_ind()" ds = datasets.get_uts(True) # basic res = testnd.ttest_ind('uts', 'A', 'a1', 'a0', ds=ds) eq_(repr(res), "<ttest_ind 'uts', 'A', 'a1' (n=30), 'a0' (n=30)>") assert_less(res.p_uncorrected.min(), 0.05) # persistence string = pickle.dumps(res, pickle.HIGHEST_PROTOCOL) res_ = pickle.loads(string) eq_(repr(res_), "<ttest_ind 'uts', 'A', 'a1' (n=30), 'a0' (n=30)>") assert_dataobj_equal(res.p_uncorrected, res_.p_uncorrected) # cluster res = testnd.ttest_ind('uts', 'A', 'a1', 'a0', ds=ds, tail=1, samples=1) # persistence string = pickle.dumps(res, pickle.HIGHEST_PROTOCOL) res_ = pickle.loads(string) assert_equal(repr(res_), repr(res)) assert_dataobj_equal(res.p_uncorrected, res_.p_uncorrected) # nd res = testnd.ttest_ind('utsnd', 'A', 'a1', 'a0', ds=ds, pmin=0.05, samples=2) eq_(res._cdist.n_clusters, 10) # zero variance ds['utsnd'].x[:, 1, 10] = 0. assert_raises(ZeroVariance, testnd.ttest_ind, 'utsnd', 'A', ds=ds)
def test_corr(): "Test testnd.corr()" ds = datasets.get_uts(True) # add correlation Y = ds['Y'] utsnd = ds['utsnd'] utsnd.x[:, 3:5, 50:65] += Y.x[:, None, None] res = testnd.corr('utsnd', 'Y', ds=ds) repr(res) for s, t in product('01234', (0.1, 0.2, 0.35)): target = test.Correlation(utsnd.sub(sensor=s, time=t), Y).r assert_almost_equal(res.r.sub(sensor=s, time=t), target, 10) res = testnd.corr('utsnd', 'Y', 'rm', ds=ds) repr(res) res = testnd.corr('utsnd', 'Y', ds=ds, samples=10, pmin=0.05) repr(res) res = testnd.corr('utsnd', 'Y', ds=ds, samples=10, tfce=True) repr(res) # persistence string = pickle.dumps(res, protocol=pickle.HIGHEST_PROTOCOL) res_ = pickle.loads(string) assert_equal(repr(res_), repr(res)) assert_dataobj_equal(res.p_uncorrected, res_.p_uncorrected) assert_dataobj_equal(res.p, res_.p)
def test_t_contrast(): ds = datasets.get_uts() # simple contrast res = testnd.t_contrast_rel('uts', 'A', 'a1>a0', 'rm', ds=ds, samples=10, pmin=0.05) repr(res) res_ = testnd.ttest_rel('uts', 'A', 'a1', 'a0', 'rm', ds=ds) assert_array_equal(res.t.x, res_.t.x) assert_in('samples', repr(res)) # complex contrast res = testnd.t_contrast_rel('uts', 'A%B', 'min(a0|b0>a1|b0, a0|b1>a1|b1)', 'rm', ds=ds, samples=10, pmin=0.05) res_b0 = testnd.ttest_rel('uts', 'A%B', ('a0', 'b0'), ('a1', 'b0'), 'rm', ds=ds) res_b1 = testnd.ttest_rel('uts', 'A%B', ('a0', 'b1'), ('a1', 'b1'), 'rm', ds=ds) assert_array_equal(res.t.x, np.min([res_b0.t.x, res_b1.t.x], axis=0)) # persistence string = pickle.dumps(res, protocol=pickle.HIGHEST_PROTOCOL) res_ = pickle.loads(string) assert_equal(repr(res_), repr(res)) assert_dataobj_equal(res.p, res_.p) # contrast with "*" res = testnd.t_contrast_rel('uts', 'A%B', 'min(a1|b0>a0|b0, a1|b1>a0|b1)', 'rm', ds=ds, tail=1)
def test_ttest_ind(): "Test testnd.ttest_ind()" ds = datasets.get_uts(True) # basic res = testnd.ttest_ind('uts', 'A', 'a1', 'a0', ds=ds) repr(res) assert_less(res.p_uncorrected.min(), 0.05) # persistence string = pickle.dumps(res, pickle.HIGHEST_PROTOCOL) res_ = pickle.loads(string) repr(res_) assert_dataobj_equal(res.p_uncorrected, res_.p_uncorrected) # cluster res = testnd.ttest_ind('uts', 'A', 'a1', 'a0', ds=ds, tail=1, samples=1) # persistence string = pickle.dumps(res, pickle.HIGHEST_PROTOCOL) res_ = pickle.loads(string) assert_equal(repr(res_), repr(res)) assert_dataobj_equal(res.p_uncorrected, res_.p_uncorrected) # nd res = testnd.ttest_ind('utsnd', 'A', 'a1', 'a0', ds=ds, pmin=0.05, samples=2) eq_(res._cdist.n_clusters, 10)
def test_samples_sesssions(): set_log_level('warning', 'mne') SampleExperiment = import_attr(sample_path / 'sample_experiment_sessions.py', 'SampleExperiment') tempdir = TempDir() datasets.setup_samples_experiment(tempdir, 2, 1, 2) root = join(tempdir, 'SampleExperiment') e = SampleExperiment(root) # bad channels e.make_bad_channels('0111') eq_(e.load_bad_channels(), ['MEG 0111']) eq_(e.load_bad_channels(session='sample2'), []) e.show_bad_channels() e.merge_bad_channels() eq_(e.load_bad_channels(session='sample2'), ['MEG 0111']) e.show_bad_channels() # rejection for _ in e: for epoch in ('target1', 'target2'): e.set(epoch=epoch) e.make_rej(auto=2e-12) ds = e.load_evoked('R0000', epoch='target2') e.set(session='sample1') ds2 = e.load_evoked('R0000') assert_dataobj_equal(ds2, ds) # super-epoch ds1 = e.load_epochs(epoch='target1') ds2 = e.load_epochs(epoch='target2') ds_super = e.load_epochs(epoch='super') assert_dataobj_equal(ds_super['meg'], combine((ds1['meg'], ds2['meg'])))
def test_result(): "Test boosting results" ds = datasets._get_continuous() # convolve function y = convolve([ds['h1'], ds['h2']], [ds['x1'], ds['x2']]) y.name = 'y' assert_dataobj_equal(y, ds['y']) # test prediction with res.h and res.h_scaled res = boosting(ds['y'], ds['x1'], 0, 1) y1 = convolve(res.h_scaled, ds['x1']) x_scaled = ds['x1'] / res.x_scale y2 = convolve(res.h, x_scaled) y2 *= res.y_scale y2 += y1.mean() - y2.mean() # mean can't be reconstructed assert_dataobj_equal(y1, y2, decimal=12) # test NaN checks (modifies data) ds['x2'].x[1, 50] = np.nan assert_raises(ValueError, boosting, ds['y'], ds['x2'], 0, .5) assert_raises(ValueError, boosting, ds['y'], ds['x2'], 0, .5, False) ds['x2'].x[1, :] = 1 assert_raises(ValueError, boosting, ds['y'], ds['x2'], 0, .5) ds['y'].x[50] = np.nan assert_raises(ValueError, boosting, ds['y'], ds['x1'], 0, .5) assert_raises(ValueError, boosting, ds['y'], ds['x1'], 0, .5, False)
def test_melt_ndvar(): "Test table.melt_ndvar()" ds = datasets.get_uts(True) ds = ds.sub("A == 'a1'") lds = table.melt_ndvar('uts', ds=ds) ok_('time' in lds) ok_(isvar(lds['time'])) eq_(set(lds['time'].x), set(ds['uts'].time.x)) # no ds lds2 = table.melt_ndvar(ds['uts']) assert_dataobj_equal(lds2['uts'], lds['uts']) # sensor lds = table.melt_ndvar("utsnd.summary(time=(0.1, 0.2))", ds=ds, varname='summary') eq_(set(lds['sensor'].cells), set(ds['utsnd'].sensor.names)) # NDVar out lds = table.melt_ndvar("utsnd", 'sensor', ds=ds) ok_('utsnd' in lds) ok_(isndvar(lds['utsnd'])) assert_dataobj_equal(lds[:ds.n_cases, 'utsnd'], ds.eval("utsnd.sub(sensor='0')")) # more than one dimensions assert_raises(ValueError, table.melt_ndvar, 'utsnd', ds=ds)
def test_ttest_rel(): "Test testnd.ttest_rel()" ds = datasets.get_uts(True) # basic res = testnd.ttest_rel('uts', 'A%B', ('a1', 'b1'), ('a0', 'b0'), 'rm', ds=ds, samples=100) eq_(repr(res), "<ttest_rel 'uts', 'A x B', ('a1', 'b1'), ('a0', 'b0'), " "'rm' (n=15), samples=100, p >= 0.000>") # persistence string = pickle.dumps(res, pickle.HIGHEST_PROTOCOL) res_ = pickle.loads(string) repr(res_) assert_equal(repr(res_), repr(res)) assert_dataobj_equal(res.p_uncorrected, res_.p_uncorrected) # collapsing cells res2 = testnd.ttest_rel('uts', 'A', 'a1', 'a0', 'rm', ds=ds) assert_less(res2.p_uncorrected.min(), 0.05) assert_equal(res2.n, res.n) # reproducibility res3 = testnd.ttest_rel('uts', 'A%B', ('a1', 'b1'), ('a0', 'b0'), 'rm', ds=ds, samples=100) assert_dataset_equal(res3.find_clusters(maps=True), res.clusters) testnd.configure(0) res4 = testnd.ttest_rel('uts', 'A%B', ('a1', 'b1'), ('a0', 'b0'), 'rm', ds=ds, samples=100) assert_dataset_equal(res4.find_clusters(maps=True), res.clusters) testnd.configure(-1) sds = ds.sub("B=='b0'") # thresholded, UTS testnd.configure(0) res0 = testnd.ttest_rel('uts', 'A', 'a1', 'a0', 'rm', ds=sds, pmin=0.1, samples=100) tgt = res0.find_clusters() testnd.configure(-1) res1 = testnd.ttest_rel('uts', 'A', 'a1', 'a0', 'rm', ds=sds, pmin=0.1, samples=100) assert_dataset_equal(res1.find_clusters(), tgt) # thresholded, UTSND testnd.configure(0) res0 = testnd.ttest_rel('utsnd', 'A', 'a1', 'a0', 'rm', ds=sds, pmin=0.1, samples=100) tgt = res0.find_clusters() testnd.configure(-1) res1 = testnd.ttest_rel('utsnd', 'A', 'a1', 'a0', 'rm', ds=sds, pmin=0.1, samples=100) assert_dataset_equal(res1.find_clusters(), tgt) # TFCE, UTS testnd.configure(0) res0 = testnd.ttest_rel('uts', 'A', 'a1', 'a0', 'rm', ds=sds, tfce=True, samples=10) tgt = res0.compute_probability_map() testnd.configure(-1) res1 = testnd.ttest_rel('uts', 'A', 'a1', 'a0', 'rm', ds=sds, tfce=True, samples=10) assert_dataobj_equal(res1.compute_probability_map(), tgt)
def test_ttest_rel(): "Test testnd.ttest_rel()" ds = datasets.get_uts(True) # basic res = testnd.ttest_rel('uts', 'A%B', ('a1', 'b1'), ('a0', 'b0'), 'rm', ds=ds, samples=100) repr(res) # persistence string = pickle.dumps(res, pickle.HIGHEST_PROTOCOL) res_ = pickle.loads(string) repr(res_) assert_equal(repr(res_), repr(res)) assert_dataobj_equal(res.p_uncorrected, res_.p_uncorrected) # collapsing cells res2 = testnd.ttest_rel('uts', 'A', 'a1', 'a0', 'rm', ds=ds) assert_less(res2.p_uncorrected.min(), 0.05) assert_equal(res2.n, res.n) # reproducibility res3 = testnd.ttest_rel('uts', 'A%B', ('a1', 'b1'), ('a0', 'b0'), 'rm', ds=ds, samples=100) assert_dataset_equal(res3.find_clusters(maps=True), res.clusters) testnd.configure(0) res4 = testnd.ttest_rel('uts', 'A%B', ('a1', 'b1'), ('a0', 'b0'), 'rm', ds=ds, samples=100) assert_dataset_equal(res4.find_clusters(maps=True), res.clusters) testnd.configure(-1) sds = ds.sub("B=='b0'") # thresholded, UTS testnd.configure(0) res0 = testnd.ttest_rel('uts', 'A', 'a1', 'a0', 'rm', ds=sds, pmin=0.1, samples=100) tgt = res0.find_clusters() testnd.configure(-1) res1 = testnd.ttest_rel('uts', 'A', 'a1', 'a0', 'rm', ds=sds, pmin=0.1, samples=100) assert_dataset_equal(res1.find_clusters(), tgt) # thresholded, UTSND testnd.configure(0) res0 = testnd.ttest_rel('utsnd', 'A', 'a1', 'a0', 'rm', ds=sds, pmin=0.1, samples=100) tgt = res0.find_clusters() testnd.configure(-1) res1 = testnd.ttest_rel('utsnd', 'A', 'a1', 'a0', 'rm', ds=sds, pmin=0.1, samples=100) assert_dataset_equal(res1.find_clusters(), tgt) # TFCE, UTS testnd.configure(0) res0 = testnd.ttest_rel('uts', 'A', 'a1', 'a0', 'rm', ds=sds, tfce=True, samples=10) tgt = res0.compute_probability_map() testnd.configure(-1) res1 = testnd.ttest_rel('uts', 'A', 'a1', 'a0', 'rm', ds=sds, tfce=True, samples=10) assert_dataobj_equal(res1.compute_probability_map(), tgt)
def test_ndvar_index(x, dimname, index, a_index, index_repr=True): "Helper function for test_ndvar_indexing" ax = x.get_axis(dimname) index_prefix = (full_slice,) * ax if dimname != 'case': dim = x.get_dim(dimname) assert_equal(dim.dimindex(index), a_index) if index_repr is not False: if index_repr is True: index_repr = index eq_(dim._index_repr(a_index), index_repr) x_array = x.x[index_prefix + (a_index,)] x1 = x.sub(**{dimname: index}) x2 = x[index_prefix + (index,)] assert_array_equal(x1.x, x_array) assert_dataobj_equal(x2, x1)
def test_dataset_indexing(): """Test Dataset indexing""" ds = datasets.get_uv() # indexing values eq_(ds['A', 1], ds['A'][1]) eq_(ds[1, 'A'], ds['A'][1]) # indexing variables assert_dataobj_equal(ds[:, 'A'], ds['A']) assert_dataobj_equal(ds['A', :], ds['A']) assert_dataobj_equal(ds[:10, 'A'], ds['A'][:10]) assert_dataobj_equal(ds['A', :10], ds['A'][:10]) # new Dataset through indexing ds2 = Dataset() ds2['A'] = ds['A'] assert_dataset_equal(ds[('A',)], ds2) ds2['B'] = ds['B'] assert_dataset_equal(ds['A', 'B'], ds2) assert_dataset_equal(ds[('A', 'B'), :10], ds2[:10]) assert_dataset_equal(ds[:10, ('A', 'B')], ds2[:10]) # assigning value ds[2, 'A'] = 'hello' eq_(ds[2, 'A'], 'hello') ds['A', 2] = 'not_hello' eq_(ds[2, 'A'], 'not_hello') # assigning new factor ds['C', :] = 'c' ok_(np.all(ds.eval("C == 'c'"))) # assigning new Var ds['D1', :] = 5. ds[:, 'D2'] = 5. assert_array_equal(ds['D1'], 5) assert_array_equal(ds['D2'], 5) # test illegal names f = Factor('aaabbb') assert_raises(ValueError, ds.__setitem__, '%dsa', f) assert_raises(ValueError, ds.__setitem__, '432', f) assert_raises(ValueError, ds.__setitem__, ('%dsa', slice(None)), 'value') assert_raises(ValueError, ds.__setitem__, (slice(None), '%dsa'), 'value') assert_raises(ValueError, ds.__setitem__, ('432', slice(None)), 4.) assert_raises(ValueError, ds.__setitem__, (slice(None), '432'), 4.) # deleting items del ds['A'] ok_('A' not in ds) assert_raises(KeyError, ds.__getitem__, 'A') del ds['B', 'rm'] ok_('B' not in ds and 'rm' not in ds)
def test_corr(): "Test testnd.corr()" ds = datasets.get_uts(True) # add correlation Y = ds['Y'] utsnd = ds['utsnd'] utsnd.x[:, 3:5, 50:65] += Y.x[:, None, None] res = testnd.corr('utsnd', 'Y', 'rm', ds=ds) repr(res) res = testnd.corr('utsnd', 'Y', 'rm', ds=ds, samples=10, pmin=0.05) repr(res) # persistence string = pickle.dumps(res, protocol=pickle.HIGHEST_PROTOCOL) res_ = pickle.loads(string) assert_equal(repr(res_), repr(res)) assert_dataobj_equal(res.p_uncorrected, res_.p_uncorrected) assert_dataobj_equal(res.p, res_.p)
def test_interaction(): "Test Interaction" ds = datasets.get_uv() A = ds['A'] B = ds['B'] i = A % B # eq for sequence assert_array_equal(i == A % B, True) assert_array_equal(i == B % A, False) assert_array_equal(i == A, False) assert_array_equal(i == ds['fltvar'], False) assert_array_equal(ds.eval("A%B") == Factor(ds['A']) % B, True) # eq for element for a, b in product(A.cells, B.cells): assert_array_equal(i == (a, b), np.logical_and(A == a, B == b)) # Interaction.as_factor() a = Factor('aabb') i = a % Factor('cdcd') assert_dataobj_equal(i.as_factor(), Factor(['a c', 'a d', 'b c', 'b d'])) i = a % Factor(['c', '', 'c', '']) assert_dataobj_equal(i.as_factor(), Factor(['a c', 'a', 'b c', 'b'])) # pickling ip = pickle.loads(pickle.dumps(i)) assert_dataobj_equal(ip, i)
def test_test_experiment(): "Test event labeling with the EventExperiment subclass of MneExperiment" e = EventExperiment() # test defaults eq_(e.get('session'), 'cheese') eq_(e.get('model'), 'name') # test event labeling ds = e.label_events(gen_triggers()) name = Factor([e.variables['name'][t] for t in TRIGGERS], name='name') assert_dataobj_equal(ds['name'], name) tgt = ds['trigger'].as_factor(e.variables['backorder'], 'backorder') assert_dataobj_equal(ds['backorder'], tgt) tgt = ds['trigger'].as_factor(e.variables['taste'], 'taste') assert_dataobj_equal(ds['taste'], tgt) assert_array_equal(ds['i_start'], I_START) assert_array_equal(ds['subject'] == SUBJECT, True) # tests disabled (trigger-shift applied in load_events): # --- # assert_equal(ds['i_start'], I_START + round(0.03 * SAMPLINGRATE)) # # test without trigger shift # e.trigger_shift = 0 # ds = e.label_events(gen_triggers()) # assert_equal(ds['i_start'], I_START) # # trigger shift dict # e2 = EventExperimentTriggerShiftDict('', False) # ds = e2.label_events(gen_triggers()) # assert_equal(ds['i_start'], I_START + round(0.04 * SAMPLINGRATE)) # epochs eq_(e._epochs['cheese'].tmin, -0.2) eq_(e._epochs['cheese-leicester'].tmin, -0.1) eq_(e._epochs['cheese-tilsit'].tmin, -0.2)
def test_ttest_1samp(): "Test testnd.ttest_1samp()" ds = datasets.get_uts(True) # no clusters res0 = testnd.ttest_1samp('uts', sub="A == 'a0'", ds=ds) assert_less(res0.p_uncorrected.min(), 0.05) repr0 = repr(res0) assert_in("'uts'", repr0) assert_not_in('clusters', repr0) assert_not_in('mintime', repr0) # sub as array res1 = testnd.ttest_1samp('uts', sub=ds.eval("A == 'a0'"), ds=ds) repr1 = repr(res1) assert_not_equal(repr1, repr0) # clusters without resampling res1 = testnd.ttest_1samp('uts', sub="A == 'a0'", ds=ds, samples=0, pmin=0.05, tstart=0, tstop=0.6, mintime=0.05) assert_equal(res1.clusters.n_cases, 1) assert_not_in('p', res1.clusters) repr1 = repr(res1) assert_in('clusters', repr1) assert_in('samples', repr1) assert_in('mintime', repr1) # persistence string = pickle.dumps(res1, pickle.HIGHEST_PROTOCOL) res1_ = pickle.loads(string) assert_equal(repr(res1_), repr1) assert_dataobj_equal(res1.p_uncorrected, res1_.p_uncorrected) # clusters with resampling res2 = testnd.ttest_1samp('uts', sub="A == 'a0'", ds=ds, samples=10, pmin=0.05, tstart=0, tstop=0.6, mintime=0.05) assert_equal(res2.clusters.n_cases, 1) assert_equal(res2.samples, 10) assert_in('p', res2.clusters) repr2 = repr(res2) assert_in('samples', repr2) # clusters with permutations dss = ds.sub("logical_and(A=='a0', B=='b0')")[:8] res3 = testnd.ttest_1samp('uts', sub="A == 'a0'", ds=dss, samples=10000, pmin=0.05, tstart=0, tstop=0.6, mintime=0.05) assert_equal(res3.clusters.n_cases, 2) assert_equal(res3.samples, -1) assert_less(res3.clusters['p'].x.min(), 0.05) repr3 = repr(res3) assert_in('samples', repr3) # nd dss = ds.sub("A == 'a0'") res = testnd.ttest_1samp('utsnd', ds=dss, samples=1) res = testnd.ttest_1samp('utsnd', ds=dss, pmin=0.05, samples=1) res = testnd.ttest_1samp('utsnd', ds=dss, tfce=True, samples=1) # TFCE properties res = testnd.ttest_1samp('utsnd', sub="A == 'a0'", ds=ds, samples=1) string = pickle.dumps(res, pickle.HIGHEST_PROTOCOL) res = pickle.loads(string) tfce_clusters = res.find_clusters(pmin=0.05) peaks = res.find_peaks() assert_equal(tfce_clusters.eval("p.min()"), peaks.eval("p.min()")) masked = res.masked_parameter_map(pmin=0.05) assert_array_equal(masked.abs().x <= res.t.abs().x, True)
def test_var(): "Test Var objects" base = Factor('aabbcde') # initialization x = np.arange(4) y = Var(x) assert_array_equal(y, x) y = Var(x, repeat=2) assert_array_equal(y, x.repeat(2)) y = Var(x, repeat=x) assert_array_equal(y, x.repeat(x)) y = Var.from_dict(base, {'a': 5, 'e': 8}, default=0) assert_array_equal(y.x, [5, 5, 0, 0, 0, 0, 8]) assert_raises(TypeError, Var, x, info=1) # basic operations info = {'a': 1} v = Var([1., 2., 3., -4.], 'v', info=info) c = 2 v2 = Var([2., 2., 3., 3.], 'w', info=info) eq_(v.info, info) for op, iop, desc in OPERATORS: target = op(v.x, c) vtarget = op(v.x, v2.x) # op if desc == '+': w = v.copy() w.x = iop(w.x, c) else: w = op(v, c) eq_(w.info, {'a': 1, 'longname': 'v %s %s' % (desc, c)}) assert_array_equal(w, target) # with Var w = op(v, v2) eq_(w.info, {'a': 1, 'longname': 'v %s w' % desc}) assert_array_equal(w, vtarget) # i-op w = v.copy() w = iop(w, c) assert_array_equal(w, target) # i-op with Var w = v.copy() w = iop(w, v2) assert_array_equal(w, vtarget) # methods w = v.abs() eq_(w.info, {'a': 1, 'longname': 'abs(v)'}) assert_array_equal(w, np.abs(v.x)) x = w.log() eq_(x.info, {'a': 1, 'longname': 'log(abs(v))'}) assert_array_equal(x, np.log(w.x)) # assignment tgt1 = np.arange(10) tgt2 = np.tile(np.arange(5), 2) v = Var(np.arange(10)) v[v > 4] = np.arange(5) assert_array_equal(v, tgt2) v[5:] = np.arange(5, 10) assert_array_equal(v, tgt1) v = Var(np.arange(10)) v[v > 4] = Var(np.arange(5)) assert_array_equal(v, tgt2) v[5:] = Var(np.arange(5, 10)) assert_array_equal(v, tgt1) # .count() v = Var([1., 2., 1.11, 2., 1.11, 4.]) assert_array_equal(v.count(), [0, 0, 0, 1, 1, 0]) # .split() y = Var(np.arange(16)) for i in xrange(1, 9): split = y.split(i) eq_(len(split.cells), i) # .as_factor() v = Var(np.arange(4)) assert_dataobj_equal(v.as_factor(), Factor('0123')) assert_dataobj_equal(v.as_factor({0: 'a'}), Factor(['a', '', '', ''])) assert_dataobj_equal(v.as_factor({(0, 1): 'a', (2, 3): 'b'}), Factor('aabb')) assert_dataobj_equal(v.as_factor({(0, 1): 'a', 2: 'b', 'default': 'c'}), Factor('aabc')) assert_dataobj_equal(v.as_factor({(0, 1): 'a', (2, 'default'): 'b'}), Factor('aabb'))
def test_ttest_rel(): "Test testnd.ttest_rel()" ds = datasets.get_uts(True) # basic res = testnd.ttest_rel('uts', 'A%B', ('a1', 'b1'), ('a0', 'b0'), 'rm', ds=ds, samples=100) eq_( repr(res), "<ttest_rel 'uts', 'A x B', ('a1', 'b1'), ('a0', 'b0'), " "'rm' (n=15), samples=100, p=.000>") # alternate argspec ds1 = Dataset() ds1['a1b1'] = ds.eval("uts[A%B == ('a1', 'b1')]") ds1['a0b0'] = ds.eval("uts[A%B == ('a0', 'b0')]") res1 = testnd.ttest_rel('a1b1', 'a0b0', ds=ds1, samples=100) assert_dataobj_equal(res1.t, res.t) eq_(repr(res1), "<ttest_rel 'a1b1', 'a0b0' (n=15), samples=100, p=.000>") # persistence string = pickle.dumps(res, pickle.HIGHEST_PROTOCOL) res_ = pickle.loads(string) repr(res_) assert_equal(repr(res_), repr(res)) assert_dataobj_equal(res.p_uncorrected, res_.p_uncorrected) # collapsing cells res2 = testnd.ttest_rel('uts', 'A', 'a1', 'a0', 'rm', ds=ds) assert_less(res2.p_uncorrected.min(), 0.05) assert_equal(res2.n, res.n) # reproducibility res3 = testnd.ttest_rel('uts', 'A%B', ('a1', 'b1'), ('a0', 'b0'), 'rm', ds=ds, samples=100) assert_dataset_equal(res3.find_clusters(maps=True), res.clusters) configure(n_workers=0) res4 = testnd.ttest_rel('uts', 'A%B', ('a1', 'b1'), ('a0', 'b0'), 'rm', ds=ds, samples=100) assert_dataset_equal(res4.find_clusters(maps=True), res.clusters) configure(n_workers=True) sds = ds.sub("B=='b0'") # thresholded, UTS configure(n_workers=0) res0 = testnd.ttest_rel('uts', 'A', 'a1', 'a0', 'rm', ds=sds, pmin=0.1, samples=100) tgt = res0.find_clusters() configure(n_workers=True) res1 = testnd.ttest_rel('uts', 'A', 'a1', 'a0', 'rm', ds=sds, pmin=0.1, samples=100) assert_dataset_equal(res1.find_clusters(), tgt) # thresholded, UTSND configure(n_workers=0) res0 = testnd.ttest_rel('utsnd', 'A', 'a1', 'a0', 'rm', ds=sds, pmin=0.1, samples=100) tgt = res0.find_clusters() configure(n_workers=True) res1 = testnd.ttest_rel('utsnd', 'A', 'a1', 'a0', 'rm', ds=sds, pmin=0.1, samples=100) assert_dataset_equal(res1.find_clusters(), tgt) # TFCE, UTS configure(n_workers=0) res0 = testnd.ttest_rel('uts', 'A', 'a1', 'a0', 'rm', ds=sds, tfce=True, samples=10) tgt = res0.compute_probability_map() configure(n_workers=True) res1 = testnd.ttest_rel('uts', 'A', 'a1', 'a0', 'rm', ds=sds, tfce=True, samples=10) assert_dataobj_equal(res1.compute_probability_map(), tgt) # zero variance ds['utsnd'].x[:, 1, 10] = 0. res = testnd.ttest_rel('utsnd', 'A', match='rm', ds=ds) eq_(res.t.x[1, 10], 0)
def test_anova(): "Test testnd.anova()" ds = datasets.get_uts(True) testnd.anova('utsnd', 'A*B', ds=ds) for samples in (0, 2): logging.info("TEST: samples=%r" % samples) testnd.anova('utsnd', 'A*B', ds=ds, samples=samples) testnd.anova('utsnd', 'A*B', ds=ds, samples=samples, pmin=0.05) testnd.anova('utsnd', 'A*B', ds=ds, samples=samples, tfce=True) res = testnd.anova('utsnd', 'A*B*rm', ds=ds, samples=0, pmin=0.05) repr(res) res = testnd.anova('utsnd', 'A*B*rm', ds=ds, samples=2, pmin=0.05) repr(res) # persistence string = pickle.dumps(res, protocol=pickle.HIGHEST_PROTOCOL) res_ = pickle.loads(string) assert_equal(repr(res_), repr(res)) # threshold-free res = testnd.anova('utsnd', 'A*B*rm', ds=ds, samples=10) repr(res) assert_in('A clusters', res.clusters.info) assert_in('B clusters', res.clusters.info) assert_in('A x B clusters', res.clusters.info) # no clusters res = testnd.anova('uts', 'B', sub="A=='a1'", ds=ds, samples=5, pmin=0.05, mintime=0.02) repr(res) assert_in('v', res.clusters) assert_in('p', res.clusters) # all effects with clusters res = testnd.anova('uts', 'A*B*rm', ds=ds, samples=5, pmin=0.05, tstart=0.1, mintime=0.02) assert_equal(set(res.clusters['effect'].cells), set(res.effects)) # some effects with clusters, some without res = testnd.anova('uts', 'A*B*rm', ds=ds, samples=5, pmin=0.05, tstart=0.37, mintime=0.02) string = pickle.dumps(res, pickle.HIGHEST_PROTOCOL) res_ = pickle.loads(string) assert_dataobj_equal(res.clusters, res_.clusters) # test multi-effect results (with persistence) # UTS res = testnd.anova('uts', 'A*B*rm', ds=ds, samples=5) repr(res) string = pickle.dumps(res, pickle.HIGHEST_PROTOCOL) resr = pickle.loads(string) tf_clusters = resr.find_clusters(pmin=0.05) peaks = resr.find_peaks() assert_dataobj_equal(tf_clusters, res.find_clusters(pmin=0.05)) assert_dataobj_equal(peaks, res.find_peaks()) assert_equal(tf_clusters.eval("p.min()"), peaks.eval("p.min()")) unmasked = resr.f[0] masked = resr.masked_parameter_map(effect=0, pmin=0.05) assert_array_equal(masked.x <= unmasked.x, True) # reproducibility res0 = testnd.anova('utsnd', 'A*B*rm', ds=ds, pmin=0.05, samples=5) res = testnd.anova('utsnd', 'A*B*rm', ds=ds, pmin=0.05, samples=5) assert_dataset_equal(res.clusters, res0.clusters) testnd.configure(0) res = testnd.anova('utsnd', 'A*B*rm', ds=ds, pmin=0.05, samples=5) assert_dataset_equal(res.clusters, res0.clusters) testnd.configure(-1) # permutation eelbrain._stats.permutation._YIELD_ORIGINAL = 1 samples = 4 # raw res = testnd.anova('utsnd', 'A*B*rm', ds=ds, samples=samples) for dist in res._cdist: eq_(len(dist.dist), samples) assert_array_equal(dist.dist, dist.parameter_map.abs().max()) # TFCE res = testnd.anova('utsnd', 'A*B*rm', ds=ds, tfce=True, samples=samples) for dist in res._cdist: eq_(len(dist.dist), samples) assert_array_equal(dist.dist, dist.tfce_map.abs().max()) # thresholded res = testnd.anova('utsnd', 'A*B*rm', ds=ds, pmin=0.05, samples=samples) clusters = res.find_clusters() for dist, effect in izip(res._cdist, res.effects): effect_idx = clusters.eval("effect == %r" % effect) vmax = clusters[effect_idx, 'v'].abs().max() eq_(len(dist.dist), samples) assert_array_equal(dist.dist, vmax) eelbrain._stats.permutation._YIELD_ORIGINAL = 0 # 1d TFCE testnd.configure(0) res = testnd.anova('utsnd.rms(time=(0.1, 0.3))', 'A*B*rm', ds=ds, tfce=True, samples=samples) testnd.configure(-1)
def test_clusterdist(): "Test _ClusterDist class" shape = (10, 6, 6, 4) locs = [[0, 0, 0], [1, 0, 0], [1, 1, 0], [0, 1, 0]] x = np.random.normal(0, 1, shape) sensor = Sensor(locs, ['0', '1', '2', '3']) sensor.set_connectivity(connect_dist=1.1) dims = ('case', UTS(-0.1, 0.1, 6), Ordered('dim2', range(6), 'unit'), sensor) y = NDVar(x, dims) # test connecting sensors logging.info("TEST: connecting sensors") bin_map = np.zeros(shape[1:], dtype=np.bool8) bin_map[:3, :3, :2] = True pmap = np.random.normal(0, 1, shape[1:]) np.clip(pmap, -1, 1, pmap) pmap[bin_map] = 2 cdist = _ClusterDist(y, 0, 1.5) print(repr(cdist)) cdist.add_original(pmap) print(repr(cdist)) assert_equal(cdist.n_clusters, 1) assert_array_equal(cdist._original_cluster_map == cdist._cids[0], cdist._crop(bin_map).swapaxes(0, cdist._nad_ax)) assert_equal(cdist.parameter_map.dims, y.dims[1:]) # test connecting many sensors logging.info("TEST: connecting sensors") bin_map = np.zeros(shape[1:], dtype=np.bool8) bin_map[:3, :3] = True pmap = np.random.normal(0, 1, shape[1:]) np.clip(pmap, -1, 1, pmap) pmap[bin_map] = 2 cdist = _ClusterDist(y, 0, 1.5) cdist.add_original(pmap) assert_equal(cdist.n_clusters, 1) assert_array_equal(cdist._original_cluster_map == cdist._cids[0], cdist._crop(bin_map).swapaxes(0, cdist._nad_ax)) # test keeping sensors separate logging.info("TEST: keeping sensors separate") bin_map = np.zeros(shape[1:], dtype=np.bool8) bin_map[:3, :3, 0] = True bin_map[:3, :3, 2] = True pmap = np.random.normal(0, 1, shape[1:]) np.clip(pmap, -1, 1, pmap) pmap[bin_map] = 2 cdist = _ClusterDist(y, 1, 1.5) cdist.add_original(pmap) assert_equal(cdist.n_clusters, 2) # criteria ds = datasets.get_uts(True) res = testnd.ttest_rel('utsnd', 'A', match='rm', ds=ds, samples=0, pmin=0.05) assert_less(res.clusters['duration'].min(), 0.01) eq_(res.clusters['n_sensors'].min(), 1) res = testnd.ttest_rel('utsnd', 'A', match='rm', ds=ds, samples=0, pmin=0.05, mintime=0.02, minsensor=2) assert_greater_equal(res.clusters['duration'].min(), 0.02) eq_(res.clusters['n_sensors'].min(), 2) # 1d res1d = testnd.ttest_rel('utsnd.sub(time=0.1)', 'A', match='rm', ds=ds, samples=0, pmin=0.05) assert_dataobj_equal(res1d.p_uncorrected, res.p_uncorrected.sub(time=0.1)) # TFCE logging.info("TEST: TFCE") sensor = Sensor(locs, ['0', '1', '2', '3']) sensor.set_connectivity(connect_dist=1.1) dims = ('case', UTS(-0.1, 0.1, 4), sensor, Ordered('dim2', range(10), 'unit')) y = NDVar(np.random.normal(0, 1, (10, 4, 4, 10)), dims) cdist = _ClusterDist(y, 3, None) cdist.add_original(y.x[0]) cdist.finalize() assert_equal(cdist.dist.shape, (3,)) # I/O string = pickle.dumps(cdist, pickle.HIGHEST_PROTOCOL) cdist_ = pickle.loads(string) assert_equal(repr(cdist_), repr(cdist)) # find peaks x = np.array([[[0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [7, 7, 0, 0, 0, 0, 0, 0, 0, 0], [0, 7, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [5, 7, 0, 0, 0, 0, 0, 0, 0, 0], [0, 6, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 7, 5, 5, 0, 0], [0, 0, 0, 0, 5, 4, 4, 4, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 4, 0, 0], [0, 0, 0, 0, 7, 0, 0, 3, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]]) tgt = np.equal(x, 7) peaks = cdist._find_peaks(x) logging.debug(' detected: \n%s' % (peaks.astype(int))) logging.debug(' target: \n%s' % (tgt.astype(int))) assert_array_equal(peaks, tgt)
def test_clusterdist(): "Test _ClusterDist class" shape = (10, 6, 6, 4) locs = [[0, 0, 0], [1, 0, 0], [1, 1, 0], [0, 1, 0]] x = np.random.normal(0, 1, shape) sensor = Sensor(locs, ['0', '1', '2', '3']) sensor.set_connectivity(connect_dist=1.1) dims = ('case', UTS(-0.1, 0.1, 6), Scalar('dim2', range(6), 'unit'), sensor) y = NDVar(x, dims) # test connecting sensors logging.info("TEST: connecting sensors") bin_map = np.zeros(shape[1:], dtype=np.bool8) bin_map[:3, :3, :2] = True pmap = np.random.normal(0, 1, shape[1:]) np.clip(pmap, -1, 1, pmap) pmap[bin_map] = 2 cdist = _ClusterDist(y, 0, 1.5) print(repr(cdist)) cdist.add_original(pmap) print(repr(cdist)) assert_equal(cdist.n_clusters, 1) assert_array_equal(cdist._original_cluster_map == cdist._cids[0], cdist._crop(bin_map).swapaxes(0, cdist._nad_ax)) assert_equal(cdist.parameter_map.dims, y.dims[1:]) # test connecting many sensors logging.info("TEST: connecting sensors") bin_map = np.zeros(shape[1:], dtype=np.bool8) bin_map[:3, :3] = True pmap = np.random.normal(0, 1, shape[1:]) np.clip(pmap, -1, 1, pmap) pmap[bin_map] = 2 cdist = _ClusterDist(y, 0, 1.5) cdist.add_original(pmap) assert_equal(cdist.n_clusters, 1) assert_array_equal(cdist._original_cluster_map == cdist._cids[0], cdist._crop(bin_map).swapaxes(0, cdist._nad_ax)) # test keeping sensors separate logging.info("TEST: keeping sensors separate") bin_map = np.zeros(shape[1:], dtype=np.bool8) bin_map[:3, :3, 0] = True bin_map[:3, :3, 2] = True pmap = np.random.normal(0, 1, shape[1:]) np.clip(pmap, -1, 1, pmap) pmap[bin_map] = 2 cdist = _ClusterDist(y, 1, 1.5) cdist.add_original(pmap) assert_equal(cdist.n_clusters, 2) # criteria ds = datasets.get_uts(True) res = testnd.ttest_rel('utsnd', 'A', match='rm', ds=ds, samples=0, pmin=0.05) assert_less(res.clusters['duration'].min(), 0.01) eq_(res.clusters['n_sensors'].min(), 1) res = testnd.ttest_rel('utsnd', 'A', match='rm', ds=ds, samples=0, pmin=0.05, mintime=0.02, minsensor=2) assert_greater_equal(res.clusters['duration'].min(), 0.02) eq_(res.clusters['n_sensors'].min(), 2) # 1d res1d = testnd.ttest_rel('utsnd.sub(time=0.1)', 'A', match='rm', ds=ds, samples=0, pmin=0.05) assert_dataobj_equal(res1d.p_uncorrected, res.p_uncorrected.sub(time=0.1)) # TFCE logging.info("TEST: TFCE") sensor = Sensor(locs, ['0', '1', '2', '3']) sensor.set_connectivity(connect_dist=1.1) time = UTS(-0.1, 0.1, 4) scalar = Scalar('scalar', range(10), 'unit') dims = ('case', time, sensor, scalar) np.random.seed(0) y = NDVar(np.random.normal(0, 1, (10, 4, 4, 10)), dims) cdist = _ClusterDist(y, 3, None) cdist.add_original(y.x[0]) cdist.finalize() assert_equal(cdist.dist.shape, (3, )) # I/O string = pickle.dumps(cdist, pickle.HIGHEST_PROTOCOL) cdist_ = pickle.loads(string) assert_equal(repr(cdist_), repr(cdist)) # find peaks x = np.array([[[0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [7, 7, 0, 0, 0, 0, 0, 0, 0, 0], [0, 7, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [5, 7, 0, 0, 0, 0, 0, 0, 0, 0], [0, 6, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 7, 5, 5, 0, 0], [0, 0, 0, 0, 5, 4, 4, 4, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 4, 0, 0], [0, 0, 0, 0, 7, 0, 0, 3, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]]) tgt = np.equal(x, 7) peaks = find_peaks(x, cdist._connectivity) logging.debug(' detected: \n%s' % (peaks.astype(int))) logging.debug(' target: \n%s' % (tgt.astype(int))) assert_array_equal(peaks, tgt) # testnd permutation result res = testnd.ttest_1samp(y, tfce=True, samples=3) assert_allclose(np.sort(res._cdist.dist), [77.5852307, 119.1976153, 217.6270428]) # parc with TFCE on unconnected dimension configure(False) x = np.random.normal(0, 1, (10, 5, 2, 4)) time = UTS(-0.1, 0.1, 5) categorial = Categorial('categorial', ('a', 'b')) y = NDVar(x, ('case', time, categorial, sensor)) y0 = NDVar(x[:, :, 0], ('case', time, sensor)) y1 = NDVar(x[:, :, 1], ('case', time, sensor)) res = testnd.ttest_1samp(y, tfce=True, samples=3) res_parc = testnd.ttest_1samp(y, tfce=True, samples=3, parc='categorial') res0 = testnd.ttest_1samp(y0, tfce=True, samples=3) res1 = testnd.ttest_1samp(y1, tfce=True, samples=3) # cdist eq_(res._cdist.shape, (4, 2, 5)) # T-maps don't depend on connectivity assert_array_equal(res.t.x[:, 0], res0.t.x) assert_array_equal(res.t.x[:, 1], res1.t.x) assert_array_equal(res_parc.t.x[:, 0], res0.t.x) assert_array_equal(res_parc.t.x[:, 1], res1.t.x) # TFCE-maps should always be the same because they're unconnected assert_array_equal(res.tfce_map.x[:, 0], res0.tfce_map.x) assert_array_equal(res.tfce_map.x[:, 1], res1.tfce_map.x) assert_array_equal(res_parc.tfce_map.x[:, 0], res0.tfce_map.x) assert_array_equal(res_parc.tfce_map.x[:, 1], res1.tfce_map.x) # Probability-maps should depend on what is taken into account p_a = res0.compute_probability_map().x p_b = res1.compute_probability_map().x assert_array_equal(res_parc.compute_probability_map(categorial='a').x, p_a) assert_array_equal(res_parc.compute_probability_map(categorial='b').x, p_b) p_parc = res_parc.compute_probability_map() assert_array_equal(p_parc.x, res.compute_probability_map().x) ok_(np.all(p_parc.sub(categorial='a').x >= p_a)) ok_(np.all(p_parc.sub(categorial='b').x >= p_b)) configure(True)
def test_ndvar(): "Test the NDVar class" ds = datasets.get_uts(utsnd=True) x = ds['utsnd'] # meaningful slicing assert_raises(KeyError, x.sub, sensor='5') assert_equal(x.sub(sensor='4'), x.x[:, 4]) assert_equal(x.sub(sensor=['4', '3', '2']), x.x[:, [4, 3, 2]]) assert_equal(x.sub(sensor=['4']), x.x[:, [4]]) assert_equal(x.sub(case=1, sensor='4'), x.x[1, 4]) # setup indices s_case = slice(10, 13) s_sensor = slice('2', '4') s_time = slice(0.1, 0.2) b_case = np.bincount([10, 11, 12], minlength=len(x)).astype(bool) b_sensor = np.array([False, False, True, True, False]) b_time = np.bincount(range(30, 40), minlength=len(x.time)).astype(bool) a_case = np.arange(10, 13) a_sensor = ['2', '3'] a_time = np.arange(0.1, 0.2, 0.01) # slicing with different index kinds tgt = x.x[s_case, 2:4, 30:40] eq_(tgt.shape, (3, 2, 10)) # single assert_equal(x.sub(case=s_case, sensor=s_sensor, time=s_time), tgt) assert_equal(x.sub(case=a_case, sensor=a_sensor, time=a_time), tgt) assert_equal(x.sub(case=b_case, sensor=b_sensor, time=b_time), tgt) # bool & slice assert_equal(x.sub(case=b_case, sensor=s_sensor, time=s_time), tgt) assert_equal(x.sub(case=s_case, sensor=b_sensor, time=s_time), tgt) assert_equal(x.sub(case=s_case, sensor=s_sensor, time=b_time), tgt) assert_equal(x.sub(case=b_case, sensor=b_sensor, time=s_time), tgt) assert_equal(x.sub(case=s_case, sensor=b_sensor, time=b_time), tgt) assert_equal(x.sub(case=b_case, sensor=s_sensor, time=b_time), tgt) # bool & array assert_equal(x.sub(case=b_case, sensor=a_sensor, time=a_time), tgt) assert_equal(x.sub(case=a_case, sensor=b_sensor, time=a_time), tgt) assert_equal(x.sub(case=a_case, sensor=a_sensor, time=b_time), tgt) assert_equal(x.sub(case=b_case, sensor=b_sensor, time=a_time), tgt) assert_equal(x.sub(case=a_case, sensor=b_sensor, time=b_time), tgt) assert_equal(x.sub(case=b_case, sensor=a_sensor, time=b_time), tgt) # slice & array assert_equal(x.sub(case=s_case, sensor=a_sensor, time=a_time), tgt) assert_equal(x.sub(case=a_case, sensor=s_sensor, time=a_time), tgt) assert_equal(x.sub(case=a_case, sensor=a_sensor, time=s_time), tgt) assert_equal(x.sub(case=s_case, sensor=s_sensor, time=a_time), tgt) assert_equal(x.sub(case=a_case, sensor=s_sensor, time=s_time), tgt) assert_equal(x.sub(case=s_case, sensor=a_sensor, time=s_time), tgt) # all three assert_equal(x.sub(case=a_case, sensor=b_sensor, time=s_time), tgt) assert_equal(x.sub(case=a_case, sensor=s_sensor, time=b_time), tgt) assert_equal(x.sub(case=b_case, sensor=a_sensor, time=s_time), tgt) assert_equal(x.sub(case=b_case, sensor=s_sensor, time=a_time), tgt) assert_equal(x.sub(case=s_case, sensor=a_sensor, time=b_time), tgt) assert_equal(x.sub(case=s_case, sensor=b_sensor, time=a_time), tgt) # norm y = x / x.norm('sensor') assert_allclose(y.norm('sensor'), 1.) y = ds['uts'].mean('case').norm('time') assert_is_instance(y, float) # Var v_case = Var(b_case) assert_equal(x.sub(case=v_case, sensor=b_sensor, time=a_time), tgt) # univariate result assert_dataobj_equal(x.sub(sensor='2', time=0.1), Var(x.x[:, 2, 30], x.name)) eq_(x.sub(case=0, sensor='2', time=0.1), x.x[0, 2, 30]) # baseline correction x_bl = x - x.summary(time=(None, 0)) # assert that the baseline is 0 bl = x_bl.summary('case', 'sensor', time=(None, 0)) ok_(abs(bl) < 1e-10, "Baseline correction") # NDVar as index sens_mean = x.mean(('case', 'time')) idx = sens_mean > 0 pos = sens_mean[idx] assert_array_equal(pos.x > 0, True) # NDVar as index along one dimension x_tc = x.sub(sensor='1') x_time = NDVar(x_tc.time.times >= 0.3, dims=(x_tc.time,)) assert_dataobj_equal(x_tc[x_time], x_tc.sub(time=(0.3, None))) # out of range index assert_raises(ValueError, x.sub, time=(0.1, 0.81)) assert_raises(IndexError, x.sub, time=(-0.25, 0.1)) # iteration for i, xi in enumerate(x): assert_dataobj_equal(xi, x[i]) if i > 4: break
def test_sample(): set_log_level('warning', 'mne') # import from file: http://stackoverflow.com/a/67692/166700 SampleExperiment = import_attr(sample_path / 'sample_experiment.py', 'SampleExperiment') tempdir = TempDir() datasets.setup_samples_experiment(tempdir, 3, 2) root = join(tempdir, 'SampleExperiment') e = SampleExperiment(root) eq_(e.get('subject'), 'R0000') eq_(e.get('subject', subject='R0002'), 'R0002') # events e.set('R0001', rej='') ds = e.load_selected_events(epoch='target') assert ds.n_cases == 39 ds = e.load_selected_events(epoch='auditory') assert ds.n_cases == 20 ds = e.load_selected_events(epoch='av') assert ds.n_cases == 39 # evoked cache invalidated by change in bads e.set('R0001', rej='', epoch='target') ds = e.load_evoked() eq_(ds[0, 'evoked'].info['bads'], []) e.make_bad_channels(['MEG 0331']) ds = e.load_evoked() eq_(ds[0, 'evoked'].info['bads'], ['MEG 0331']) e.set(rej='man', model='modality') sds = [] for _ in e: e.make_rej(auto=2.5e-12) sds.append(e.load_evoked()) ds = e.load_evoked('all') assert_dataobj_equal(combine(sds), ds) # test with data parameter megs = [e.load_evoked(cat='auditory')['meg'] for _ in e] res = e.load_test('a>v', 0.05, 0.2, 0.05, samples=100, data='sensor.rms', sns_baseline=False, make=True) meg_rms = combine(meg.rms('sensor') for meg in megs).mean('case', name='auditory') assert_dataobj_equal(res.c1_mean, meg_rms, decimal=21) res = e.load_test('a>v', 0.05, 0.2, 0.05, samples=100, data='sensor.mean', sns_baseline=False, make=True) meg_mean = combine(meg.mean('sensor') for meg in megs).mean('case', name='auditory') assert_dataobj_equal(res.c1_mean, meg_mean, decimal=21) # e._report_subject_info() broke with non-alphabetic subject order subjects = e.get_field_values('subject') ds = Dataset() ds['subject'] = Factor(reversed(subjects)) ds['n'] = Var(range(3)) s_table = e._report_subject_info(ds, '') # test multiple epochs with same time stamp class Experiment(SampleExperiment): epochs = SampleExperiment.epochs.copy() Experiment.epochs['v1'] = {'base': 'visual', 'vars': {'shift': 'Var([0.0], repeat=len(side))'}} Experiment.epochs['v2'] = {'base': 'visual', 'vars': {'shift': 'Var([0.1], repeat=len(side))'}} Experiment.epochs['vc'] = {'sub_epochs': ('v1', 'v2'), 'post_baseline_trigger_shift': 'shift', 'post_baseline_trigger_shift_max': 0.1, 'post_baseline_trigger_shift_min': 0.0} e = Experiment(root) ds = e.load_epochs(baseline=True, epoch='vc') v1 = ds.sub("epoch=='v1'", 'meg').sub(time=(0, 0.199)) v2 = ds.sub("epoch=='v2'", 'meg').sub(time=(-0.1, 0.099)) assert_dataobj_equal(v1, v2, decimal=20) # duplicate subject class BadExperiment(SampleExperiment): groups = {'group': ('R0001', 'R0002', 'R0002')} assert_raises(DefinitionError, BadExperiment, root) # non-existing subject class BadExperiment(SampleExperiment): groups = {'group': ('R0001', 'R0003', 'R0002')} assert_raises(DefinitionError, BadExperiment, root) # unsorted subjects class Experiment(SampleExperiment): groups = {'group': ('R0002', 'R0000', 'R0001')} e = Experiment(root) eq_([s for s in e], ['R0000', 'R0001', 'R0002']) # changes class Changed(SampleExperiment): variables = { 'event': {(1, 2, 3, 4): 'target', 5: 'smiley', 32: 'button'}, 'side': {(1, 3): 'left', (2, 4): 'right_changed'}, 'modality': {(1, 2): 'auditory', (3, 4): 'visual'} } tests = { 'twostage': { 'kind': 'two-stage', 'stage 1': 'side_left + modality_a', 'vars': { 'side_left': "side == 'left'", 'modality_a': "modality == 'auditory'", } }, 'novars': { 'kind': 'two-stage', 'stage 1': 'side + modality' }, } e = Changed(root) # changed variable, while a test with model=None is not changed class Changed(Changed): variables = { 'side': {(1, 3): 'left', (2, 4): 'right_changed'}, 'modality': {(1, 2): 'auditory', (3, 4): 'visual_changed'} } e = Changed(root) # changed variable, unchanged test with vardef=None class Changed(Changed): variables = { 'side': {(1, 3): 'left', (2, 4): 'right_changed'}, 'modality': {(1, 2): 'auditory', (3, 4): 'visual_changed'} } e = Changed(root) # ICA # --- e = SampleExperiment(root) ica_path = e.make_ica(raw='ica') e.set(raw='ica1-40', model='') e.make_rej(auto=2e-12, overwrite=True) ds1 = e.load_evoked(raw='ica1-40') ica = e.load_ica(raw='ica') ica.exclude = [0, 1, 2] ica.save(ica_path) ds2 = e.load_evoked(raw='ica1-40') assert not np.allclose(ds1['meg'].x, ds2['meg'].x, atol=1e-20), "ICA change ignored"
def test_factor(): "Test basic Factor functionality" # initializing assert_array_equal(Factor('ab'), ['a', 'b']) assert_array_equal(Factor('ab', repeat=2), ['a', 'a', 'b', 'b']) assert_array_equal(Factor('ab', repeat=np.array([2, 1])), ['a', 'a', 'b']) empty_factor = Factor([]) eq_(len(empty_factor), 0) assert_dataobj_equal(Factor(np.empty(0)), empty_factor) # from Factor f = Factor('aabbcc') assert_array_equal(Factor(f), f) assert_array_equal(Factor(f, labels={'a': 'b'}), Factor('bbbbcc')) # removing a cell f = Factor('aabbcc') eq_(f.cells, ('a', 'b', 'c')) eq_(f.n_cells, 3) f[f == 'c'] = 'a' eq_(f.cells, ('a', 'b')) eq_(f.n_cells, 2) # cell order a = np.tile(np.arange(3), 3) # alphabetical f = Factor(a, labels={0: 'c', 1: 'b', 2: 'a'}) eq_(f.cells, ('a', 'b', 'c')) # ordered f = Factor(a, labels=((0, 'c'), (1, 'b'), (2, 'a'))) eq_(f.cells, ('c', 'b', 'a')) eq_(f[:2].cells, ('c', 'b')) f[f == 'b'] = 'c' eq_(f.cells, ('c', 'a')) # label length lens = [2, 5, 32, 2, 32, 524] f = Factor(['a' * l for l in lens], 'f') fl = f.label_length() assert_array_equal(fl, lens) eq_(fl.info['longname'], 'f.label_length()') lens2 = [3, 5, 32, 2, 32, 523] f2 = Factor(['b' * l for l in lens2], 'f2') assert_array_equal(fl - f2.label_length(), [a - b for a, b in zip(lens, lens2)]) # equality f = Factor('aabbcc') assert_equal(f == Factor('aabbcc'), True) assert_equal(f == Factor('bbccaa'), False) assert_equal(f == Factor('aabxxx'), (True, True, True, False, False, False)) assert_equal(f == Var(np.ones(6)), False) # Factor.as_var() assert_array_equal(f.as_var(dict(list(zip('abc', list(range(3)))))), [0, 0, 1, 1, 2, 2]) assert_array_equal(f.as_var({'a': 1}, 2), [1, 1, 2, 2, 2, 2]) assert_raises(KeyError, f.as_var, {'a': 1}) # Factor.floodfill() f = Factor([' ', ' ', '1', '2', ' ', ' ', '3', ' ', ' ', '2', ' ', ' ', '1']) regions = [ 1, 1, 1, 2, 2, 2, 3, 3, 3, 2, 2, 1, 1] regions2 = [ 1, 1, 1, 2, 2, 3, 3, 2, 2, 2, 2, 1, 1] regions3 = [ 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2] target3 = ['1', '1', '1', '2', '2', '2', '3', '3', '2', '2', '2', '2', '1'] target_p = [' ', ' ', '1', '2', '2', '2', '3', '3', '3', '2', '2', '2', '1'] assert_array_equal(f.floodfill(regions, ' '), Var(regions).as_factor()) assert_array_equal(f.floodfill(regions2, ' '), Var(regions2).as_factor()) assert_array_equal(f.floodfill(regions3, ' '), target3) assert_array_equal(f.floodfill('previous', ' '), target_p) f = Factor(['', '', 'a', '', 'e', 'r', '']) assert_array_equal(f.floodfill([1, 1, 1, 11, 11, 11, 11]), Factor('aaaeerr'))
def test_factor(): "Test basic Factor functionality" # initializing assert_array_equal(Factor('ab'), ['a', 'b']) assert_array_equal(Factor('ab', repeat=2), ['a', 'a', 'b', 'b']) assert_array_equal(Factor('ab', repeat=np.array([2, 1])), ['a', 'a', 'b']) empty_factor = Factor([]) eq_(len(empty_factor), 0) assert_dataobj_equal(Factor(np.empty(0)), empty_factor) # from Factor f = Factor('aabbcc') assert_array_equal(Factor(f), f) assert_array_equal(Factor(f, labels={'a': 'b'}), Factor('bbbbcc')) # removing a cell f = Factor('aabbcc') eq_(f.cells, ('a', 'b', 'c')) eq_(f.n_cells, 3) f[f == 'c'] = 'a' eq_(f.cells, ('a', 'b')) eq_(f.n_cells, 2) # cell order a = np.tile(np.arange(3), 3) # alphabetical f = Factor(a, labels={0: 'c', 1: 'b', 2: 'a'}) eq_(f.cells, ('a', 'b', 'c')) # ordered f = Factor(a, labels=((0, 'c'), (1, 'b'), (2, 'a'))) eq_(f.cells, ('c', 'b', 'a')) eq_(f[:2].cells, ('c', 'b')) f[f == 'b'] = 'c' eq_(f.cells, ('c', 'a')) # label length lens = [2, 5, 32, 2, 32, 524] f = Factor(['a' * l for l in lens], 'f') fl = f.label_length() assert_array_equal(fl, lens) eq_(fl.info['longname'], 'f.label_length()') lens2 = [3, 5, 32, 2, 32, 523] f2 = Factor(['b' * l for l in lens2], 'f2') assert_array_equal(fl - f2.label_length(), [a - b for a, b in zip(lens, lens2)]) # equality f = Factor('aabbcc') assert_equal(f == Factor('aabbcc'), True) assert_equal(f == Factor('bbccaa'), False) assert_equal(f == Factor('aabxxx'), (True, True, True, False, False, False)) assert_equal(f == Var(np.ones(6)), False) # Factor.as_var() assert_array_equal(f.as_var(dict(zip('abc', range(3)))), [0, 0, 1, 1, 2, 2]) assert_array_equal(f.as_var({'a': 1}, 2), [1, 1, 2, 2, 2, 2]) assert_raises(KeyError, f.as_var, {'a': 1}) # Factor.floodfill() f = Factor([' ', ' ', '1', '2', ' ', ' ', '3', ' ', ' ', '2', ' ', ' ', '1']) regions = [ 1, 1, 1, 2, 2, 2, 3, 3, 3, 2, 2, 1, 1] regions2 = [ 1, 1, 1, 2, 2, 3, 3, 2, 2, 2, 2, 1, 1] regions3 = [ 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2] target3 = ['1', '1', '1', '2', '2', '2', '3', '3', '2', '2', '2', '2', '1'] target_p = [' ', ' ', '1', '2', '2', '2', '3', '3', '3', '2', '2', '2', '1'] assert_array_equal(f.floodfill(regions, ' '), Var(regions).as_factor()) assert_array_equal(f.floodfill(regions2, ' '), Var(regions2).as_factor()) assert_array_equal(f.floodfill(regions3, ' '), target3) assert_array_equal(f.floodfill('previous', ' '), target_p) f = Factor(['', '', 'a', '', 'e', 'r', '']) assert_array_equal(f.floodfill([1, 1, 1, 11, 11, 11, 11]), Factor('aaaeerr'))
def test_model(): "Test Model class" a = Factor('ab', repeat=3, name='a') b = Factor('ab', tile=3, name='b') u = Var([1, 1, 1, -1, -1, -1], 'u') v = Var([1., 2., 3., 4., 5., 6.], 'v') w = Var([1., 0., 0., 1., 1., 0.], 'w') # model repr m = a * b + v eq_(repr(m), "a + b + a % b + v") lines = ("intercept a b a x b v", "-----------------------------", "1 1 1 1 1", "1 1 0 0 2", "1 1 1 1 3", "1 0 0 0 4", "1 0 1 0 5", "1 0 0 0 6") eq_(str(m), '\n'.join(lines)) eq_(str(m.head(2)), '\n'.join(lines[:4])) eq_(str(m.tail(2)), '\n'.join(lines[:2] + lines[-2:])) # model without explicit names x1 = Factor('ab', repeat=2) x2 = Factor('ab', tile=2) m = x1 * x2 eq_(repr(m), "<?> + <?> + <?> % <?>") # catch explicit intercept intercept = Factor('i', repeat=4, name='intercept') assert_raises(ValueError, a.__mul__, intercept) # different var/factor combinations eq_(a * b, a + b + a % b) eq_(a * v, a + v + a % v) eq_(a * (v + w), a + v + w + a % v + a % w) # parametrization m = v + w + v * w p = m._parametrize('dummy') eq_(p.column_names, ['intercept', 'v', 'w', 'v * w']) assert_array_equal(p.x[:, p.terms['intercept']], 1) assert_array_equal(p.x[:, p.terms['v']], v.x[:, None]) assert_array_equal(p.x[:, p.terms['w']], w.x[:, None]) assert_array_equal(p.x[:, p.terms['v * w']], (v * w).x[:, None]) # persistence mp = pickle.loads(pickle.dumps(m, pickle.HIGHEST_PROTOCOL)) assert_array_equal(m.full, mp.full) # nested Vars m = (v + w) * u assert_dataobj_equal(m.effects[2], u) assert_dataobj_equal(m.effects[3], v * u) assert_dataobj_equal(m.effects[4], w * u) m = u * (v + w) assert_dataobj_equal(m.effects[0], u) assert_dataobj_equal(m.effects[3], u * v) assert_dataobj_equal(m.effects[4], u * w) m = (v + w) % u assert_dataobj_equal(m.effects[0], v * u) assert_dataobj_equal(m.effects[1], w * u) m = u % (v + w) assert_dataobj_equal(m.effects[0], u * v) assert_dataobj_equal(m.effects[1], u * w)
def test_ndvar(): "Test the NDVar class" ds = datasets.get_uts(utsnd=True) x = ds['utsnd'] # meaningful slicing assert_raises(KeyError, x.sub, sensor='5') assert_equal(x.sub(sensor='4'), x.x[:, 4]) assert_equal(x.sub(sensor=['4', '3', '2']), x.x[:, [4, 3, 2]]) assert_equal(x.sub(sensor=['4']), x.x[:, [4]]) assert_equal(x.sub(case=1, sensor='4'), x.x[1, 4]) # setup indices s_case = slice(10, 13) s_sensor = slice(2, 4) s_time = x.time._slice(0.1, 0.2) b_case = np.zeros(ds.n_cases, dtype=bool) b_case[s_case] = True b_sensor = np.array([False, False, True, True, False]) b_time = np.arange(s_time.start, s_time.stop) a_case = np.arange(10, 13) a_sensor = np.arange(2, 4) a_time = np.arange(x.time.dimindex(0.1), x.time.dimindex(0.2)) # slicing with different index kinds tgt = x.x[s_case, s_sensor, s_time] eq_(tgt.shape, (3, 2, 10)) # single assert_equal(x.sub(case=s_case, sensor=s_sensor, time=s_time), tgt) assert_equal(x.sub(case=a_case, sensor=a_sensor, time=a_time), tgt) assert_equal(x.sub(case=b_case, sensor=b_sensor, time=b_time), tgt) # bool & slice assert_equal(x.sub(case=b_case, sensor=s_sensor, time=s_time), tgt) assert_equal(x.sub(case=s_case, sensor=b_sensor, time=s_time), tgt) assert_equal(x.sub(case=s_case, sensor=s_sensor, time=b_time), tgt) assert_equal(x.sub(case=b_case, sensor=b_sensor, time=s_time), tgt) assert_equal(x.sub(case=s_case, sensor=b_sensor, time=b_time), tgt) assert_equal(x.sub(case=b_case, sensor=s_sensor, time=b_time), tgt) # bool & array assert_equal(x.sub(case=b_case, sensor=a_sensor, time=a_time), tgt) assert_equal(x.sub(case=a_case, sensor=b_sensor, time=a_time), tgt) assert_equal(x.sub(case=a_case, sensor=a_sensor, time=b_time), tgt) assert_equal(x.sub(case=b_case, sensor=b_sensor, time=a_time), tgt) assert_equal(x.sub(case=a_case, sensor=b_sensor, time=b_time), tgt) assert_equal(x.sub(case=b_case, sensor=a_sensor, time=b_time), tgt) # slice & array assert_equal(x.sub(case=s_case, sensor=a_sensor, time=a_time), tgt) assert_equal(x.sub(case=a_case, sensor=s_sensor, time=a_time), tgt) assert_equal(x.sub(case=a_case, sensor=a_sensor, time=s_time), tgt) assert_equal(x.sub(case=s_case, sensor=s_sensor, time=a_time), tgt) assert_equal(x.sub(case=a_case, sensor=s_sensor, time=s_time), tgt) assert_equal(x.sub(case=s_case, sensor=a_sensor, time=s_time), tgt) # all three assert_equal(x.sub(case=a_case, sensor=b_sensor, time=s_time), tgt) assert_equal(x.sub(case=a_case, sensor=s_sensor, time=b_time), tgt) assert_equal(x.sub(case=b_case, sensor=a_sensor, time=s_time), tgt) assert_equal(x.sub(case=b_case, sensor=s_sensor, time=a_time), tgt) assert_equal(x.sub(case=s_case, sensor=a_sensor, time=b_time), tgt) assert_equal(x.sub(case=s_case, sensor=b_sensor, time=a_time), tgt) # Var v_case = Var(b_case) assert_equal(x.sub(case=v_case, sensor=b_sensor, time=a_time), tgt) # univariate result assert_dataobj_equal(x.sub(sensor=2, time=0.1), Var(x.x[:, 2, a_time[0]], x.name)) eq_(x.sub(case=0, sensor=2, time=0.1), x.x[0, 2, a_time[0]]) # baseline correction x_bl = x - x.summary(time=(None, 0)) # assert that the baseline is 0 bl = x_bl.summary('case', 'sensor', time=(None, 0)) ok_(abs(bl) < 1e-10, "Baseline correction") # NDVar as index sens_mean = x.mean(('case', 'time')) idx = sens_mean > 0 pos = sens_mean[idx] assert_array_equal(pos.x > 0, True) # NDVar as index along one dimension x_tc = x.sub(sensor='1') x_time = NDVar(x_tc.time.times >= 0.3, dims=(x_tc.time,)) assert_dataobj_equal(x_tc[x_time], x_tc.sub(time=(0.3, None))) # out of range index assert_raises(ValueError, x.sub, time=(0.1, 0.81)) assert_raises(ValueError, x.sub, time=(-0.25, 0.1))
def test_anova(): "Test testnd.anova()" ds = datasets.get_uts(True) testnd.anova('utsnd', 'A*B', ds=ds) for samples in (0, 2): logging.info("TEST: samples=%r" % samples) testnd.anova('utsnd', 'A*B', ds=ds, samples=samples) testnd.anova('utsnd', 'A*B', ds=ds, samples=samples, pmin=0.05) testnd.anova('utsnd', 'A*B', ds=ds, samples=samples, tfce=True) res = testnd.anova('utsnd', 'A*B*rm', ds=ds, samples=0, pmin=0.05) eq_( repr(res), "<anova 'utsnd', 'A*B*rm', samples=0, pmin=0.05, " "'A': 17 clusters, 'B': 20 clusters, 'A x B': 22 clusters>") res = testnd.anova('utsnd', 'A*B*rm', match='rm', ds=ds, samples=2, pmin=0.05) eq_( repr(res), "<anova 'utsnd', 'A*B*rm', match='rm', samples=2, pmin=0.05, " "'A': 17 clusters, p >= 0.000, 'B': 20 clusters, p >= 0.000, " "'A x B': 22 clusters, p >= 0.000>") # persistence string = pickle.dumps(res, protocol=pickle.HIGHEST_PROTOCOL) res_ = pickle.loads(string) assert_equal(repr(res_), repr(res)) # threshold-free res = testnd.anova('utsnd', 'A*B*rm', ds=ds, samples=10) repr(res) assert_in('A clusters', res.clusters.info) assert_in('B clusters', res.clusters.info) assert_in('A x B clusters', res.clusters.info) # no clusters res = testnd.anova('uts', 'B', sub="A=='a1'", ds=ds, samples=5, pmin=0.05, mintime=0.02) repr(res) assert_in('v', res.clusters) assert_in('p', res.clusters) # all effects with clusters res = testnd.anova('uts', 'A*B*rm', ds=ds, samples=5, pmin=0.05, tstart=0.1, mintime=0.02) assert_equal(set(res.clusters['effect'].cells), set(res.effects)) # some effects with clusters, some without res = testnd.anova('uts', 'A*B*rm', ds=ds, samples=5, pmin=0.05, tstart=0.37, mintime=0.02) string = pickle.dumps(res, pickle.HIGHEST_PROTOCOL) res_ = pickle.loads(string) assert_dataobj_equal(res.clusters, res_.clusters) # test multi-effect results (with persistence) # UTS res = testnd.anova('uts', 'A*B*rm', ds=ds, samples=5) repr(res) string = pickle.dumps(res, pickle.HIGHEST_PROTOCOL) resr = pickle.loads(string) tf_clusters = resr.find_clusters(pmin=0.05) peaks = resr.find_peaks() assert_dataobj_equal(tf_clusters, res.find_clusters(pmin=0.05)) assert_dataobj_equal(peaks, res.find_peaks()) assert_equal(tf_clusters.eval("p.min()"), peaks.eval("p.min()")) unmasked = resr.f[0] masked = resr.masked_parameter_map(effect=0, pmin=0.05) assert_array_equal(masked.x <= unmasked.x, True) # reproducibility res0 = testnd.anova('utsnd', 'A*B*rm', ds=ds, pmin=0.05, samples=5) res = testnd.anova('utsnd', 'A*B*rm', ds=ds, pmin=0.05, samples=5) assert_dataset_equal(res.clusters, res0.clusters) configure(n_workers=0) res = testnd.anova('utsnd', 'A*B*rm', ds=ds, pmin=0.05, samples=5) assert_dataset_equal(res.clusters, res0.clusters) configure(n_workers=True) # permutation eelbrain._stats.permutation._YIELD_ORIGINAL = 1 samples = 4 # raw res = testnd.anova('utsnd', 'A*B*rm', ds=ds, samples=samples) for dist in res._cdist: eq_(len(dist.dist), samples) assert_array_equal(dist.dist, dist.parameter_map.abs().max()) # TFCE res = testnd.anova('utsnd', 'A*B*rm', ds=ds, tfce=True, samples=samples) for dist in res._cdist: eq_(len(dist.dist), samples) assert_array_equal(dist.dist, dist.tfce_map.abs().max()) # thresholded res = testnd.anova('utsnd', 'A*B*rm', ds=ds, pmin=0.05, samples=samples) clusters = res.find_clusters() for dist, effect in izip(res._cdist, res.effects): effect_idx = clusters.eval("effect == %r" % effect) vmax = clusters[effect_idx, 'v'].abs().max() eq_(len(dist.dist), samples) assert_array_equal(dist.dist, vmax) eelbrain._stats.permutation._YIELD_ORIGINAL = 0 # 1d TFCE configure(n_workers=0) res = testnd.anova('utsnd.rms(time=(0.1, 0.3))', 'A*B*rm', ds=ds, tfce=True, samples=samples) configure(n_workers=True) # zero variance ds['utsnd'].x[:, 1, 10] = 0. assert_raises(ZeroVariance, testnd.anova, 'utsnd', 'A', ds=ds) assert_raises(ZeroVariance, testnd.anova, 'utsnd', 'A*B*rm', ds=ds)
def test_celltable(): "Test the Celltable class." ds = datasets.get_uts() ds['cat'] = Factor('abcd', repeat=15) ct = Celltable('Y', 'A', ds=ds) eq_(ct.n_cases, 60) eq_(ct.n_cells, 2) ct = Celltable('Y', 'A', match='rm', ds=ds) eq_(ct.n_cases, 30) eq_(ct.n_cells, 2) # cat argument ct = Celltable('Y', 'cat', cat=('c', 'b'), ds=ds) eq_(ct.n_cases, 30) eq_(ct.X[0], 'c') eq_(ct.X[-1], 'b') assert_raises(ValueError, Celltable, 'Y', 'cat', cat=('c', 'e'), ds=ds) ct = Celltable('Y', 'A', match='rm', ds=ds) eq_(ct.n_cases, 30) assert np.all(ct.groups['a0'] == ct.groups['a1']) ct = Celltable('Y', 'cat', match='rm', cat=('c', 'b'), ds=ds) eq_(ct.n_cases, 30) eq_(ct.X[0], 'c') eq_(ct.X[-1], 'b') # catch unequal length assert_raises(ValueError, Celltable, ds['Y', :-1], 'cat', ds=ds) assert_raises(ValueError, Celltable, ds['Y', :-1], 'cat', match='rm', ds=ds) # coercion of numerical X X = ds.eval("A == 'a0'") ct = Celltable('Y', X, cat=(None, None), ds=ds) eq_(('False', 'True'), ct.cat) assert_array_equal(ct.data['True'], ds['Y', X]) ct = Celltable('Y', X, cat=(True, False), ds=ds) eq_(('True', 'False'), ct.cat) assert_array_equal(ct.data['True'], ds['Y', X]) # test coercion of Y ct = Celltable(ds['Y'].x, 'A', ds=ds) assert_is_instance(ct.Y, np.ndarray) ct = Celltable(ds['Y'].x, 'A', ds=ds, coercion=asvar) assert_is_instance(ct.Y, Var) # test sub ds_sub = ds.sub("A == 'a0'") ct_sub = Celltable('Y', 'B', ds=ds_sub) ct = Celltable('Y', 'B', sub="A == 'a0'", ds=ds) assert_dataobj_equal(ct_sub.Y, ct.Y) # test sub with rm ct_sub = Celltable('Y', 'B', match='rm', ds=ds_sub) ct = Celltable('Y', 'B', match='rm', sub="A == 'a0'", ds=ds) assert_dataobj_equal(ct_sub.Y, ct.Y) # Interaction match ct = Celltable('Y', 'A', match='B % rm', ds=ds) ok_(ct.all_within) assert_dataobj_equal(combine((ct.data['a0'], ct.data['a1'])), ds['Y']) # test rm sorting ds = Dataset() ds['rm'] = Factor('abc', repeat=4) ds['Y'] = Var(np.arange(3.).repeat(4)) ds['X'] = Factor('ab', repeat=2, tile=3) idx = np.arange(12) np.random.shuffle(idx) ds = ds[idx] ct = Celltable('Y', 'X', 'rm', ds=ds) assert_array_equal(ct.match, Factor('abc', tile=2)) assert_array_equal(ct.Y, np.tile(np.arange(3.), 2)) assert_array_equal(ct.X, Factor('ab', repeat=3))
def test_ttest_1samp(): "Test testnd.ttest_1samp()" ds = datasets.get_uts(True) # no clusters res0 = testnd.ttest_1samp('uts', sub="A == 'a0'", ds=ds) assert_less(res0.p_uncorrected.min(), 0.05) repr0 = repr(res0) assert_in("'uts'", repr0) assert_not_in('clusters', repr0) assert_not_in('mintime', repr0) # sub as array res1 = testnd.ttest_1samp('uts', sub=ds.eval("A == 'a0'"), ds=ds) repr1 = repr(res1) assert_not_equal(repr1, repr0) # clusters without resampling res1 = testnd.ttest_1samp('uts', sub="A == 'a0'", ds=ds, samples=0, pmin=0.05, tstart=0, tstop=0.6, mintime=0.05) assert_equal(res1.clusters.n_cases, 1) assert_not_in('p', res1.clusters) repr1 = repr(res1) assert_in('clusters', repr1) assert_in('samples', repr1) assert_in('mintime', repr1) # persistence string = pickle.dumps(res1, pickle.HIGHEST_PROTOCOL) res1_ = pickle.loads(string) assert_equal(repr(res1_), repr1) assert_dataobj_equal(res1.p_uncorrected, res1_.p_uncorrected) # clusters with resampling res2 = testnd.ttest_1samp('uts', sub="A == 'a0'", ds=ds, samples=10, pmin=0.05, tstart=0, tstop=0.6, mintime=0.05) assert_equal(res2.clusters.n_cases, 1) assert_equal(res2.samples, 10) assert_in('p', res2.clusters) repr2 = repr(res2) assert_in('samples', repr2) # clusters with permutations dss = ds.sub("logical_and(A=='a0', B=='b0')")[:8] res3 = testnd.ttest_1samp('uts', sub="A == 'a0'", ds=dss, samples=10000, pmin=0.05, tstart=0, tstop=0.6, mintime=0.05) assert_equal(res3.clusters.n_cases, 2) assert_equal(res3.samples, -1) assert_less(res3.clusters['p'].x.min(), 0.05) repr3 = repr(res3) assert_in('samples', repr3) # nd dss = ds.sub("A == 'a0'") res = testnd.ttest_1samp('utsnd', ds=dss, samples=1) res = testnd.ttest_1samp('utsnd', ds=dss, pmin=0.05, samples=1) res = testnd.ttest_1samp('utsnd', ds=dss, tfce=True, samples=1) # TFCE properties res = testnd.ttest_1samp('utsnd', sub="A == 'a0'", ds=ds, samples=1) string = pickle.dumps(res, pickle.HIGHEST_PROTOCOL) res = pickle.loads(string) tfce_clusters = res.find_clusters(pmin=0.05) peaks = res.find_peaks() assert_equal(tfce_clusters.eval("p.min()"), peaks.eval("p.min()")) masked = res.masked_parameter_map(pmin=0.05) assert_array_equal(masked.abs().x <= res.t.abs().x, True) # zero variance ds['utsnd'].x[:, 1, 10] = 0. ds['utsnd'].x[:, 2, 10] = 0.1 res = testnd.ttest_1samp('utsnd', ds=ds) eq_(res.t.x[1, 10], 0.) assert_greater(res.t.x[2, 10], 1e10)
def test_celltable(): "Test the Celltable class." ds = datasets.get_uts() ds['cat'] = Factor('abcd', repeat=15) ct = Celltable('Y', 'A', ds=ds) eq_(ct.n_cases, 60) eq_(ct.n_cells, 2) eq_(repr(ct), "Celltable(Y, A)") eq_(repr(Celltable(ds['Y'].x, 'A', ds=ds)), "Celltable(<ndarray>, A)") eq_(repr(Celltable(ds['Y'].x, ds['A'].x, ds=ds)), "Celltable(<ndarray>, <Factor>)") ct = Celltable('Y', 'A', match='rm', ds=ds) eq_(ct.n_cases, 30) eq_(ct.n_cells, 2) # cat argument ct = Celltable('Y', 'cat', cat=('c', 'b'), ds=ds) eq_(ct.n_cases, 30) eq_(ct.X[0], 'c') eq_(ct.X[-1], 'b') assert_raises(ValueError, Celltable, 'Y', 'cat', cat=('c', 'e'), ds=ds) ct = Celltable('Y', 'A', match='rm', ds=ds) eq_(ct.n_cases, 30) assert np.all(ct.groups['a0'] == ct.groups['a1']) ct = Celltable('Y', 'cat', match='rm', cat=('c', 'b'), ds=ds) eq_(ct.n_cases, 30) eq_(ct.X[0], 'c') eq_(ct.X[-1], 'b') # catch unequal length assert_raises(ValueError, Celltable, ds['Y', :-1], 'cat', ds=ds) assert_raises(ValueError, Celltable, ds['Y', :-1], 'cat', match='rm', ds=ds) # coercion of numerical X X = ds.eval("A == 'a0'") ct = Celltable('Y', X, cat=(None, None), ds=ds) eq_(('False', 'True'), ct.cat) assert_array_equal(ct.data['True'], ds['Y', X]) ct = Celltable('Y', X, cat=('True', 'False'), ds=ds) eq_(('True', 'False'), ct.cat) assert_array_equal(ct.data['True'], ds['Y', X]) # test coercion of Y ct = Celltable(ds['Y'].x, 'A', ds=ds) assert_is_instance(ct.Y, np.ndarray) ct = Celltable(ds['Y'].x, 'A', ds=ds, coercion=asvar) assert_is_instance(ct.Y, Var) # test sub ds_sub = ds.sub("A == 'a0'") ct_sub = Celltable('Y', 'B', ds=ds_sub) ct = Celltable('Y', 'B', sub="A == 'a0'", ds=ds) assert_dataobj_equal(ct_sub.Y, ct.Y) # test sub with rm ct_sub = Celltable('Y', 'B', match='rm', ds=ds_sub) ct = Celltable('Y', 'B', match='rm', sub="A == 'a0'", ds=ds) assert_dataobj_equal(ct_sub.Y, ct.Y) # Interaction match ct = Celltable('Y', 'A', match='B % rm', ds=ds) ok_(ct.all_within) assert_dataobj_equal(combine((ct.data['a0'], ct.data['a1'])), ds['Y']) # test rm sorting ds = Dataset() ds['rm'] = Factor('abc', repeat=4) ds['Y'] = Var(np.arange(3.).repeat(4)) ds['X'] = Factor('ab', repeat=2, tile=3) idx = np.arange(12) np.random.shuffle(idx) ds = ds[idx] ct = Celltable('Y', 'X', 'rm', ds=ds) assert_array_equal(ct.match, Factor('abc', tile=2)) assert_array_equal(ct.Y, np.tile(np.arange(3.), 2)) assert_array_equal(ct.X, Factor('ab', repeat=3))
def test_var(): "Test Var objects" base = Factor('aabbcde') # initialization x = np.arange(4) y = Var(x) assert_array_equal(y, x) y = Var(x, repeat=2) assert_array_equal(y, x.repeat(2)) y = Var(x, repeat=x) assert_array_equal(y, x.repeat(x)) y = Var.from_dict(base, {'a': 5, 'e': 8}, default=0) assert_array_equal(y.x, [5, 5, 0, 0, 0, 0, 8]) assert_raises(TypeError, Var, x, info=1) # basic operations info = {'a': 1} v = Var([1., 2., 3., -4.], 'v', info=info) c = 2 v2 = Var([2., 2., 3., 3.], 'w', info=info) eq_(v.info, info) for op, iop, desc in OPERATORS: target = op(v.x, c) vtarget = op(v.x, v2.x) # op if desc == '+': w = v.copy() w.x = iop(w.x, c) else: w = op(v, c) eq_(w.info, {'a': 1, 'longname': 'v %s %s' % (desc, c)}) assert_array_equal(w, target) # with Var w = op(v, v2) eq_(w.info, {'a': 1, 'longname': 'v %s w' % desc}) assert_array_equal(w, vtarget) # i-op w = v.copy() w = iop(w, c) assert_array_equal(w, target) # i-op with Var w = v.copy() w = iop(w, v2) assert_array_equal(w, vtarget) # methods w = v.abs() eq_(w.info, {'a': 1, 'longname': 'abs(v)'}) assert_array_equal(w, np.abs(v.x)) x = w.log() eq_(x.info, {'a': 1, 'longname': 'log(abs(v))'}) assert_array_equal(x, np.log(w.x)) # assignment tgt1 = np.arange(10) tgt2 = np.tile(np.arange(5), 2) v = Var(np.arange(10)) v[v > 4] = np.arange(5) assert_array_equal(v, tgt2) v[5:] = np.arange(5, 10) assert_array_equal(v, tgt1) v = Var(np.arange(10)) v[v > 4] = Var(np.arange(5)) assert_array_equal(v, tgt2) v[5:] = Var(np.arange(5, 10)) assert_array_equal(v, tgt1) # .count() v = Var([1., 2., 1.11, 2., 1.11, 4.]) assert_array_equal(v.count(), [0, 0, 0, 1, 1, 0]) # .split() y = Var(np.arange(16)) for i in range(1, 9): split = y.split(i) eq_(len(split.cells), i) # .as_factor() v = Var(np.arange(4)) assert_dataobj_equal(v.as_factor(), Factor('0123')) assert_dataobj_equal(v.as_factor({0: 'a'}), Factor(['a', '', '', ''])) assert_dataobj_equal(v.as_factor({(0, 1): 'a', (2, 3): 'b'}), Factor('aabb')) assert_dataobj_equal(v.as_factor({(0, 1): 'a', 2: 'b', 'default': 'c'}), Factor('aabc')) assert_dataobj_equal(v.as_factor({(0, 1): 'a', (2, 'default'): 'b'}), Factor('aabb'))
def test_clusterdist(): "Test _ClusterDist class" shape = (10, 6, 6, 4) locs = [[0, 0, 0], [1, 0, 0], [1, 1, 0], [0, 1, 0]] x = np.random.normal(0, 1, shape) sensor = Sensor(locs, ['0', '1', '2', '3']) sensor.set_connectivity(connect_dist=1.1) dims = ('case', UTS(-0.1, 0.1, 6), Ordered('dim2', list(range(6)), 'unit'), sensor) y = NDVar(x, dims) # test connecting sensors logging.info("TEST: connecting sensors") bin_map = np.zeros(shape[1:], dtype=np.bool8) bin_map[:3, :3, :2] = True pmap = np.random.normal(0, 1, shape[1:]) np.clip(pmap, -1, 1, pmap) pmap[bin_map] = 2 cdist = _ClusterDist(y, 0, 1.5) print(repr(cdist)) cdist.add_original(pmap) print(repr(cdist)) assert_equal(cdist.n_clusters, 1) assert_array_equal(cdist._original_cluster_map == cdist._cids[0], cdist._crop(bin_map).swapaxes(0, cdist._nad_ax)) assert_equal(cdist.parameter_map.dims, y.dims[1:]) # test connecting many sensors logging.info("TEST: connecting sensors") bin_map = np.zeros(shape[1:], dtype=np.bool8) bin_map[:3, :3] = True pmap = np.random.normal(0, 1, shape[1:]) np.clip(pmap, -1, 1, pmap) pmap[bin_map] = 2 cdist = _ClusterDist(y, 0, 1.5) cdist.add_original(pmap) assert_equal(cdist.n_clusters, 1) assert_array_equal(cdist._original_cluster_map == cdist._cids[0], cdist._crop(bin_map).swapaxes(0, cdist._nad_ax)) # test keeping sensors separate logging.info("TEST: keeping sensors separate") bin_map = np.zeros(shape[1:], dtype=np.bool8) bin_map[:3, :3, 0] = True bin_map[:3, :3, 2] = True pmap = np.random.normal(0, 1, shape[1:]) np.clip(pmap, -1, 1, pmap) pmap[bin_map] = 2 cdist = _ClusterDist(y, 1, 1.5) cdist.add_original(pmap) assert_equal(cdist.n_clusters, 2) # criteria ds = datasets.get_uts(True) res = testnd.ttest_rel('utsnd', 'A', match='rm', ds=ds, samples=0, pmin=0.05) assert_less(res.clusters['duration'].min(), 0.01) eq_(res.clusters['n_sensors'].min(), 1) res = testnd.ttest_rel('utsnd', 'A', match='rm', ds=ds, samples=0, pmin=0.05, mintime=0.02, minsensor=2) assert_greater_equal(res.clusters['duration'].min(), 0.02) eq_(res.clusters['n_sensors'].min(), 2) # 1d res1d = testnd.ttest_rel('utsnd.sub(time=0.1)', 'A', match='rm', ds=ds, samples=0, pmin=0.05) assert_dataobj_equal(res1d.p_uncorrected, res.p_uncorrected.sub(time=0.1)) # TFCE logging.info("TEST: TFCE") sensor = Sensor(locs, ['0', '1', '2', '3']) sensor.set_connectivity(connect_dist=1.1) dims = ('case', UTS(-0.1, 0.1, 4), sensor, Ordered('dim2', list(range(10)), 'unit')) y = NDVar(np.random.normal(0, 1, (10, 4, 4, 10)), dims) cdist = _ClusterDist(y, 3, None) cdist.add_original(y.x[0]) cdist.finalize() assert_equal(cdist.dist.shape, (3,)) # I/O string = pickle.dumps(cdist, pickle.HIGHEST_PROTOCOL) cdist_ = pickle.loads(string) assert_equal(repr(cdist_), repr(cdist)) # find peaks x = np.array([[[0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [7, 7, 0, 0, 0, 0, 0, 0, 0, 0], [0, 7, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [5, 7, 0, 0, 0, 0, 0, 0, 0, 0], [0, 6, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 7, 5, 5, 0, 0], [0, 0, 0, 0, 5, 4, 4, 4, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 4, 0, 0], [0, 0, 0, 0, 7, 0, 0, 3, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]]) tgt = np.equal(x, 7) peaks = cdist._find_peaks(x) logging.debug(' detected: \n%s' % (peaks.astype(int))) logging.debug(' target: \n%s' % (tgt.astype(int))) assert_array_equal(peaks, tgt)