def test_anova_parc(): "Test ANOVA with parc argument and source space data" set_log_level('warning', 'mne') ds = datasets.get_mne_sample(src='ico', sub="side.isin(('L', 'R'))") y = ds['src'].sub(source=('lateraloccipital-lh', 'cuneus-lh')) y1 = y.sub(source='lateraloccipital-lh') y2 = y.sub(source='cuneus-lh') kwa = dict(ds=ds, tstart=0.2, tstop=0.3, samples=100) resp = testnd.anova(y, "side*modality", pmin=0.05, parc='source', **kwa) c1p = resp.find_clusters(source='lateraloccipital-lh') c2p = resp.find_clusters(source='cuneus-lh') del c1p['p_parc', 'id'] del c2p['p_parc', 'id'] res1 = testnd.anova(y1, "side*modality", pmin=0.05, **kwa) c1 = res1.find_clusters() del c1['id'] res2 = testnd.anova(y2, "side*modality", pmin=0.05, **kwa) c2 = res2.find_clusters() del c2['id'] assert_dataset_equal(c1p, c1) assert_dataset_equal(c2p, c2) assert_array_equal(c2['p'], [ 0.85, 0.88, 0.97, 0.75, 0.99, 0.99, 0.98, 0.0, 0.12, 0.88, 0.25, 0.97, 0.34, 0.96 ]) # without multiprocessing configure(n_workers=0) ress = testnd.anova(y, "side*modality", pmin=0.05, parc='source', **kwa) c1s = ress.find_clusters(source='lateraloccipital-lh') c2s = ress.find_clusters(source='cuneus-lh') del c1s['p_parc', 'id'] del c2s['p_parc', 'id'] assert_dataset_equal(c1s, c1) assert_dataset_equal(c2s, c2) configure(n_workers=True) # parc but single label resp2 = testnd.anova(y2, "side*modality", pmin=0.05, parc='source', **kwa) c2sp = resp2.find_clusters(source='cuneus-lh') del c2sp['p_parc', 'id'] assert_dataset_equal(c2sp, c2) # not defined assert_raises(NotImplementedError, testnd.anova, y, "side*modality", tfce=True, parc='source', **kwa)
def test_ttest_rel(): "Test testnd.ttest_rel()" ds = datasets.get_uts(True) # basic res = testnd.ttest_rel('uts', 'A%B', ('a1', 'b1'), ('a0', 'b0'), 'rm', ds=ds, samples=100) eq_(repr(res), "<ttest_rel 'uts', 'A x B', ('a1', 'b1'), ('a0', 'b0'), " "'rm' (n=15), samples=100, p >= 0.000>") # persistence string = pickle.dumps(res, pickle.HIGHEST_PROTOCOL) res_ = pickle.loads(string) repr(res_) assert_equal(repr(res_), repr(res)) assert_dataobj_equal(res.p_uncorrected, res_.p_uncorrected) # collapsing cells res2 = testnd.ttest_rel('uts', 'A', 'a1', 'a0', 'rm', ds=ds) assert_less(res2.p_uncorrected.min(), 0.05) assert_equal(res2.n, res.n) # reproducibility res3 = testnd.ttest_rel('uts', 'A%B', ('a1', 'b1'), ('a0', 'b0'), 'rm', ds=ds, samples=100) assert_dataset_equal(res3.find_clusters(maps=True), res.clusters) testnd.configure(0) res4 = testnd.ttest_rel('uts', 'A%B', ('a1', 'b1'), ('a0', 'b0'), 'rm', ds=ds, samples=100) assert_dataset_equal(res4.find_clusters(maps=True), res.clusters) testnd.configure(-1) sds = ds.sub("B=='b0'") # thresholded, UTS testnd.configure(0) res0 = testnd.ttest_rel('uts', 'A', 'a1', 'a0', 'rm', ds=sds, pmin=0.1, samples=100) tgt = res0.find_clusters() testnd.configure(-1) res1 = testnd.ttest_rel('uts', 'A', 'a1', 'a0', 'rm', ds=sds, pmin=0.1, samples=100) assert_dataset_equal(res1.find_clusters(), tgt) # thresholded, UTSND testnd.configure(0) res0 = testnd.ttest_rel('utsnd', 'A', 'a1', 'a0', 'rm', ds=sds, pmin=0.1, samples=100) tgt = res0.find_clusters() testnd.configure(-1) res1 = testnd.ttest_rel('utsnd', 'A', 'a1', 'a0', 'rm', ds=sds, pmin=0.1, samples=100) assert_dataset_equal(res1.find_clusters(), tgt) # TFCE, UTS testnd.configure(0) res0 = testnd.ttest_rel('uts', 'A', 'a1', 'a0', 'rm', ds=sds, tfce=True, samples=10) tgt = res0.compute_probability_map() testnd.configure(-1) res1 = testnd.ttest_rel('uts', 'A', 'a1', 'a0', 'rm', ds=sds, tfce=True, samples=10) assert_dataobj_equal(res1.compute_probability_map(), tgt)
def test_io_pickle(): "Test io by pickling" ds = datasets.get_uts() ds.info['info'] = "Some very useful information about the Dataset" tempdir = tempfile.mkdtemp() try: dest = os.path.join(tempdir, 'test.pickled') with open(dest, 'wb') as fid: pickle.dump(ds, fid, protocol=pickle.HIGHEST_PROTOCOL) with open(dest, 'rb') as fid: ds2 = pickle.load(fid) finally: shutil.rmtree(tempdir) assert_dataset_equal(ds, ds2)
def test_ttest_rel(): "Test testnd.ttest_rel()" ds = datasets.get_uts(True) # basic res = testnd.ttest_rel('uts', 'A%B', ('a1', 'b1'), ('a0', 'b0'), 'rm', ds=ds, samples=100) repr(res) # persistence string = pickle.dumps(res, pickle.HIGHEST_PROTOCOL) res_ = pickle.loads(string) repr(res_) assert_equal(repr(res_), repr(res)) assert_dataobj_equal(res.p_uncorrected, res_.p_uncorrected) # collapsing cells res2 = testnd.ttest_rel('uts', 'A', 'a1', 'a0', 'rm', ds=ds) assert_less(res2.p_uncorrected.min(), 0.05) assert_equal(res2.n, res.n) # reproducibility res3 = testnd.ttest_rel('uts', 'A%B', ('a1', 'b1'), ('a0', 'b0'), 'rm', ds=ds, samples=100) assert_dataset_equal(res3.find_clusters(maps=True), res.clusters) testnd.configure(0) res4 = testnd.ttest_rel('uts', 'A%B', ('a1', 'b1'), ('a0', 'b0'), 'rm', ds=ds, samples=100) assert_dataset_equal(res4.find_clusters(maps=True), res.clusters) testnd.configure(-1) sds = ds.sub("B=='b0'") # thresholded, UTS testnd.configure(0) res0 = testnd.ttest_rel('uts', 'A', 'a1', 'a0', 'rm', ds=sds, pmin=0.1, samples=100) tgt = res0.find_clusters() testnd.configure(-1) res1 = testnd.ttest_rel('uts', 'A', 'a1', 'a0', 'rm', ds=sds, pmin=0.1, samples=100) assert_dataset_equal(res1.find_clusters(), tgt) # thresholded, UTSND testnd.configure(0) res0 = testnd.ttest_rel('utsnd', 'A', 'a1', 'a0', 'rm', ds=sds, pmin=0.1, samples=100) tgt = res0.find_clusters() testnd.configure(-1) res1 = testnd.ttest_rel('utsnd', 'A', 'a1', 'a0', 'rm', ds=sds, pmin=0.1, samples=100) assert_dataset_equal(res1.find_clusters(), tgt) # TFCE, UTS testnd.configure(0) res0 = testnd.ttest_rel('uts', 'A', 'a1', 'a0', 'rm', ds=sds, tfce=True, samples=10) tgt = res0.compute_probability_map() testnd.configure(-1) res1 = testnd.ttest_rel('uts', 'A', 'a1', 'a0', 'rm', ds=sds, tfce=True, samples=10) assert_dataobj_equal(res1.compute_probability_map(), tgt)
def test_r(): "Test interaction with R thorugh rpy2" from rpy2.robjects import r r("data(sleep)") ds = Dataset.from_r("sleep") eq_(ds.name, 'sleep') extra = (0.7, -1.6, -0.2, -1.2, -0.1, 3.4, 3.7, 0.8, 0.0, 2.0, 1.9, 0.8, 1.1, 0.1, -0.1, 4.4, 5.5, 1.6, 4.6, 3.4) assert_array_equal(ds.eval('extra'), extra) assert_array_equal(ds.eval('ID'), map(str, xrange(1, 11)) * 2) assert_array_equal(ds.eval('group'), ['1'] * 10 + ['2'] * 10) # test putting ds.to_r('sleep_copy') ds_copy = Dataset.from_r('sleep_copy') assert_dataset_equal(ds_copy, ds)
def test_r(): "Test interaction with R through rpy2" from rpy2.robjects import r r("data(sleep)") ds = Dataset.from_r("sleep") eq_(ds.name, 'sleep') extra = (0.7, -1.6, -0.2, -1.2, -0.1, 3.4, 3.7, 0.8, 0.0, 2.0, 1.9, 0.8, 1.1, 0.1, -0.1, 4.4, 5.5, 1.6, 4.6, 3.4) assert_array_equal(ds.eval('extra'), extra) assert_array_equal(ds.eval('ID'), list(map(str, range(1, 11))) * 2) assert_array_equal(ds.eval('group'), ['1'] * 10 + ['2'] * 10) # test putting ds.to_r('sleep_copy') ds_copy = Dataset.from_r('sleep_copy') assert_dataset_equal(ds_copy, ds)
def test_anova_parc(): "Test ANOVA with parc argument and source space data" set_log_level('warning', 'mne') ds = datasets.get_mne_sample(src='ico', sub="side.isin(('L', 'R'))") y = ds['src'].sub(source=('lateraloccipital-lh', 'cuneus-lh')) y1 = y.sub(source='lateraloccipital-lh') y2 = y.sub(source='cuneus-lh') kwa = dict(ds=ds, tstart=0.2, tstop=0.3, samples=100) resp = testnd.anova(y, "side*modality", pmin=0.05, parc='source', **kwa) c1p = resp.find_clusters(source='lateraloccipital-lh') c2p = resp.find_clusters(source='cuneus-lh') del c1p['p_parc', 'id'] del c2p['p_parc', 'id'] res1 = testnd.anova(y1, "side*modality", pmin=0.05, **kwa) c1 = res1.find_clusters() del c1['id'] res2 = testnd.anova(y2, "side*modality", pmin=0.05, **kwa) c2 = res2.find_clusters() del c2['id'] assert_dataset_equal(c1p, c1) assert_dataset_equal(c2p, c2) assert_array_equal(c2['p'], [0.85, 0.88, 0.97, 0.75, 0.99, 0.99, 0.98, 0.0, 0.12, 0.88, 0.25, 0.97, 0.34, 0.96]) # without multiprocessing testnd.configure(0) ress = testnd.anova(y, "side*modality", pmin=0.05, parc='source', **kwa) c1s = ress.find_clusters(source='lateraloccipital-lh') c2s = ress.find_clusters(source='cuneus-lh') del c1s['p_parc', 'id'] del c2s['p_parc', 'id'] assert_dataset_equal(c1s, c1) assert_dataset_equal(c2s, c2) testnd.configure(-1) # parc but single label resp2 = testnd.anova(y2, "side*modality", pmin=0.05, parc='source', **kwa) c2sp = resp2.find_clusters(source='cuneus-lh') del c2sp['p_parc', 'id'] assert_dataset_equal(c2sp, c2) # not defined assert_raises(NotImplementedError, testnd.anova, y, "side*modality", tfce=True, parc='source', **kwa)
def test_io_txt(): "Test Dataset io as text" ds = datasets.get_uv() # Var that has integer values as float ds['intflt'] = ds.eval('intvar * 1.') ds['intflt'].name = 'intflt' # io test tempdir = tempfile.mkdtemp() try: dest = os.path.join(tempdir, 'test.txt') ds.save_txt(dest) ds2 = load.tsv(dest) finally: shutil.rmtree(tempdir) assert_dataset_equal(ds, ds2, decimal=6)
def test_dataset_indexing(): """Test Dataset indexing""" ds = datasets.get_uv() # indexing values eq_(ds['A', 1], ds['A'][1]) eq_(ds[1, 'A'], ds['A'][1]) # indexing variables assert_dataobj_equal(ds[:, 'A'], ds['A']) assert_dataobj_equal(ds['A', :], ds['A']) assert_dataobj_equal(ds[:10, 'A'], ds['A'][:10]) assert_dataobj_equal(ds['A', :10], ds['A'][:10]) # new Dataset through indexing ds2 = Dataset() ds2['A'] = ds['A'] assert_dataset_equal(ds[('A',)], ds2) ds2['B'] = ds['B'] assert_dataset_equal(ds['A', 'B'], ds2) assert_dataset_equal(ds[('A', 'B'), :10], ds2[:10]) assert_dataset_equal(ds[:10, ('A', 'B')], ds2[:10]) # assigning value ds[2, 'A'] = 'hello' eq_(ds[2, 'A'], 'hello') ds['A', 2] = 'not_hello' eq_(ds[2, 'A'], 'not_hello') # assigning new factor ds['C', :] = 'c' ok_(np.all(ds.eval("C == 'c'"))) # assigning new Var ds['D1', :] = 5. ds[:, 'D2'] = 5. assert_array_equal(ds['D1'], 5) assert_array_equal(ds['D2'], 5) # test illegal names f = Factor('aaabbb') assert_raises(ValueError, ds.__setitem__, '%dsa', f) assert_raises(ValueError, ds.__setitem__, '432', f) assert_raises(ValueError, ds.__setitem__, ('%dsa', slice(None)), 'value') assert_raises(ValueError, ds.__setitem__, (slice(None), '%dsa'), 'value') assert_raises(ValueError, ds.__setitem__, ('432', slice(None)), 4.) assert_raises(ValueError, ds.__setitem__, (slice(None), '432'), 4.) # deleting items del ds['A'] ok_('A' not in ds) assert_raises(KeyError, ds.__getitem__, 'A') del ds['B', 'rm'] ok_('B' not in ds and 'rm' not in ds)
def test_dataset_sorting(): "Test Dataset sorting methods" test_array = np.arange(10) ds = Dataset() ds['v'] = Var(test_array) ds['f'] = Factor(test_array) # shuffle the Dataset rand_idx = test_array.copy() np.random.shuffle(rand_idx) ds_shuffled = ds[rand_idx] # ascending, Var, copy dsa = ds_shuffled.sorted('v') assert_dataset_equal(dsa, ds, "Copy sorted by Var, ascending") # descending, Factor, in-place ds_shuffled.sort('f', descending=True) assert_dataset_equal(ds_shuffled, ds[::-1], "In-place sorted by Factor, " "descending")
def test_align(): "Testing align() and align1() functions" ds = datasets.get_uv() # index the dataset ds.index() ds['aindex'] = ds.eval("A.enumerate_cells()") # subset idx4 = np.arange(0, ds.n_cases, 4) idx4i = idx4[::-1] ds2 = ds.sub(np.arange(0, ds.n_cases, 2)) # shuffle the whole dataset shuffle_index = np.arange(ds.n_cases) np.random.shuffle(shuffle_index) ds_shuffled = ds[shuffle_index] # align1: align Dataset to index dsa = align1(ds2, idx4) assert_array_equal(dsa['index'], idx4, "align1() failure") dsa = align1(ds2, idx4i) assert_array_equal(dsa['index'], idx4i, "align1() failure") # d_idx as Var dsa = align1(ds2[::2], idx4, idx4i) assert_array_equal(dsa['index'], idx4i, "align1() failure") assert_raises(ValueError, align1, ds2, idx4, idx4i) # Factor index assert_raises(ValueError, align1, ds, ds['rm', ::-1], 'rm') fds = ds[:20] dsa = align1(fds, fds['rm', ::-1], 'rm') assert_array_equal(dsa['index'], np.arange(19, -1, -1), "align1 Factor") # align two datasets dsa1, dsa2 = align(ds, ds2) assert_array_equal(dsa1['index'], dsa2['index'], "align() failure") dsa1, dsa2 = align(ds, ds2[::-1]) assert_array_equal(dsa1['index'], dsa2['index'], "align() failure") dsa1, dsa2 = align(ds, ds_shuffled) assert_dataset_equal(dsa1, dsa2) # align using categorial dsa1, dsa2 = align(ds, ds_shuffled, 'A % aindex') assert_dataset_equal(dsa1, dsa2) dsa1, dsa2 = align(ds, ds_shuffled, 'aindex % A') assert_dataset_equal(dsa1, dsa2)
def test_anova(): "Test testnd.anova()" ds = datasets.get_uts(True) testnd.anova('utsnd', 'A*B', ds=ds) for samples in (0, 2): logging.info("TEST: samples=%r" % samples) testnd.anova('utsnd', 'A*B', ds=ds, samples=samples) testnd.anova('utsnd', 'A*B', ds=ds, samples=samples, pmin=0.05) testnd.anova('utsnd', 'A*B', ds=ds, samples=samples, tfce=True) res = testnd.anova('utsnd', 'A*B*rm', ds=ds, samples=0, pmin=0.05) eq_( repr(res), "<anova 'utsnd', 'A*B*rm', samples=0, pmin=0.05, " "'A': 17 clusters, 'B': 20 clusters, 'A x B': 22 clusters>") res = testnd.anova('utsnd', 'A*B*rm', match='rm', ds=ds, samples=2, pmin=0.05) eq_( repr(res), "<anova 'utsnd', 'A*B*rm', match='rm', samples=2, pmin=0.05, " "'A': 17 clusters, p >= 0.000, 'B': 20 clusters, p >= 0.000, " "'A x B': 22 clusters, p >= 0.000>") # persistence string = pickle.dumps(res, protocol=pickle.HIGHEST_PROTOCOL) res_ = pickle.loads(string) assert_equal(repr(res_), repr(res)) # threshold-free res = testnd.anova('utsnd', 'A*B*rm', ds=ds, samples=10) repr(res) assert_in('A clusters', res.clusters.info) assert_in('B clusters', res.clusters.info) assert_in('A x B clusters', res.clusters.info) # no clusters res = testnd.anova('uts', 'B', sub="A=='a1'", ds=ds, samples=5, pmin=0.05, mintime=0.02) repr(res) assert_in('v', res.clusters) assert_in('p', res.clusters) # all effects with clusters res = testnd.anova('uts', 'A*B*rm', ds=ds, samples=5, pmin=0.05, tstart=0.1, mintime=0.02) assert_equal(set(res.clusters['effect'].cells), set(res.effects)) # some effects with clusters, some without res = testnd.anova('uts', 'A*B*rm', ds=ds, samples=5, pmin=0.05, tstart=0.37, mintime=0.02) string = pickle.dumps(res, pickle.HIGHEST_PROTOCOL) res_ = pickle.loads(string) assert_dataobj_equal(res.clusters, res_.clusters) # test multi-effect results (with persistence) # UTS res = testnd.anova('uts', 'A*B*rm', ds=ds, samples=5) repr(res) string = pickle.dumps(res, pickle.HIGHEST_PROTOCOL) resr = pickle.loads(string) tf_clusters = resr.find_clusters(pmin=0.05) peaks = resr.find_peaks() assert_dataobj_equal(tf_clusters, res.find_clusters(pmin=0.05)) assert_dataobj_equal(peaks, res.find_peaks()) assert_equal(tf_clusters.eval("p.min()"), peaks.eval("p.min()")) unmasked = resr.f[0] masked = resr.masked_parameter_map(effect=0, pmin=0.05) assert_array_equal(masked.x <= unmasked.x, True) # reproducibility res0 = testnd.anova('utsnd', 'A*B*rm', ds=ds, pmin=0.05, samples=5) res = testnd.anova('utsnd', 'A*B*rm', ds=ds, pmin=0.05, samples=5) assert_dataset_equal(res.clusters, res0.clusters) configure(n_workers=0) res = testnd.anova('utsnd', 'A*B*rm', ds=ds, pmin=0.05, samples=5) assert_dataset_equal(res.clusters, res0.clusters) configure(n_workers=True) # permutation eelbrain._stats.permutation._YIELD_ORIGINAL = 1 samples = 4 # raw res = testnd.anova('utsnd', 'A*B*rm', ds=ds, samples=samples) for dist in res._cdist: eq_(len(dist.dist), samples) assert_array_equal(dist.dist, dist.parameter_map.abs().max()) # TFCE res = testnd.anova('utsnd', 'A*B*rm', ds=ds, tfce=True, samples=samples) for dist in res._cdist: eq_(len(dist.dist), samples) assert_array_equal(dist.dist, dist.tfce_map.abs().max()) # thresholded res = testnd.anova('utsnd', 'A*B*rm', ds=ds, pmin=0.05, samples=samples) clusters = res.find_clusters() for dist, effect in izip(res._cdist, res.effects): effect_idx = clusters.eval("effect == %r" % effect) vmax = clusters[effect_idx, 'v'].abs().max() eq_(len(dist.dist), samples) assert_array_equal(dist.dist, vmax) eelbrain._stats.permutation._YIELD_ORIGINAL = 0 # 1d TFCE configure(n_workers=0) res = testnd.anova('utsnd.rms(time=(0.1, 0.3))', 'A*B*rm', ds=ds, tfce=True, samples=samples) configure(n_workers=True) # zero variance ds['utsnd'].x[:, 1, 10] = 0. assert_raises(ZeroVariance, testnd.anova, 'utsnd', 'A', ds=ds) assert_raises(ZeroVariance, testnd.anova, 'utsnd', 'A*B*rm', ds=ds)
def test_ttest_rel(): "Test testnd.ttest_rel()" ds = datasets.get_uts(True) # basic res = testnd.ttest_rel('uts', 'A%B', ('a1', 'b1'), ('a0', 'b0'), 'rm', ds=ds, samples=100) eq_( repr(res), "<ttest_rel 'uts', 'A x B', ('a1', 'b1'), ('a0', 'b0'), " "'rm' (n=15), samples=100, p=.000>") # alternate argspec ds1 = Dataset() ds1['a1b1'] = ds.eval("uts[A%B == ('a1', 'b1')]") ds1['a0b0'] = ds.eval("uts[A%B == ('a0', 'b0')]") res1 = testnd.ttest_rel('a1b1', 'a0b0', ds=ds1, samples=100) assert_dataobj_equal(res1.t, res.t) eq_(repr(res1), "<ttest_rel 'a1b1', 'a0b0' (n=15), samples=100, p=.000>") # persistence string = pickle.dumps(res, pickle.HIGHEST_PROTOCOL) res_ = pickle.loads(string) repr(res_) assert_equal(repr(res_), repr(res)) assert_dataobj_equal(res.p_uncorrected, res_.p_uncorrected) # collapsing cells res2 = testnd.ttest_rel('uts', 'A', 'a1', 'a0', 'rm', ds=ds) assert_less(res2.p_uncorrected.min(), 0.05) assert_equal(res2.n, res.n) # reproducibility res3 = testnd.ttest_rel('uts', 'A%B', ('a1', 'b1'), ('a0', 'b0'), 'rm', ds=ds, samples=100) assert_dataset_equal(res3.find_clusters(maps=True), res.clusters) configure(n_workers=0) res4 = testnd.ttest_rel('uts', 'A%B', ('a1', 'b1'), ('a0', 'b0'), 'rm', ds=ds, samples=100) assert_dataset_equal(res4.find_clusters(maps=True), res.clusters) configure(n_workers=True) sds = ds.sub("B=='b0'") # thresholded, UTS configure(n_workers=0) res0 = testnd.ttest_rel('uts', 'A', 'a1', 'a0', 'rm', ds=sds, pmin=0.1, samples=100) tgt = res0.find_clusters() configure(n_workers=True) res1 = testnd.ttest_rel('uts', 'A', 'a1', 'a0', 'rm', ds=sds, pmin=0.1, samples=100) assert_dataset_equal(res1.find_clusters(), tgt) # thresholded, UTSND configure(n_workers=0) res0 = testnd.ttest_rel('utsnd', 'A', 'a1', 'a0', 'rm', ds=sds, pmin=0.1, samples=100) tgt = res0.find_clusters() configure(n_workers=True) res1 = testnd.ttest_rel('utsnd', 'A', 'a1', 'a0', 'rm', ds=sds, pmin=0.1, samples=100) assert_dataset_equal(res1.find_clusters(), tgt) # TFCE, UTS configure(n_workers=0) res0 = testnd.ttest_rel('uts', 'A', 'a1', 'a0', 'rm', ds=sds, tfce=True, samples=10) tgt = res0.compute_probability_map() configure(n_workers=True) res1 = testnd.ttest_rel('uts', 'A', 'a1', 'a0', 'rm', ds=sds, tfce=True, samples=10) assert_dataobj_equal(res1.compute_probability_map(), tgt) # zero variance ds['utsnd'].x[:, 1, 10] = 0. res = testnd.ttest_rel('utsnd', 'A', match='rm', ds=ds) eq_(res.t.x[1, 10], 0)
def test_anova(): "Test testnd.anova()" ds = datasets.get_uts(True) testnd.anova('utsnd', 'A*B', ds=ds) for samples in (0, 2): logging.info("TEST: samples=%r" % samples) testnd.anova('utsnd', 'A*B', ds=ds, samples=samples) testnd.anova('utsnd', 'A*B', ds=ds, samples=samples, pmin=0.05) testnd.anova('utsnd', 'A*B', ds=ds, samples=samples, tfce=True) res = testnd.anova('utsnd', 'A*B*rm', ds=ds, samples=0, pmin=0.05) repr(res) res = testnd.anova('utsnd', 'A*B*rm', ds=ds, samples=2, pmin=0.05) repr(res) # persistence string = pickle.dumps(res, protocol=pickle.HIGHEST_PROTOCOL) res_ = pickle.loads(string) assert_equal(repr(res_), repr(res)) # threshold-free res = testnd.anova('utsnd', 'A*B*rm', ds=ds, samples=10) repr(res) assert_in('A clusters', res.clusters.info) assert_in('B clusters', res.clusters.info) assert_in('A x B clusters', res.clusters.info) # no clusters res = testnd.anova('uts', 'B', sub="A=='a1'", ds=ds, samples=5, pmin=0.05, mintime=0.02) repr(res) assert_in('v', res.clusters) assert_in('p', res.clusters) # all effects with clusters res = testnd.anova('uts', 'A*B*rm', ds=ds, samples=5, pmin=0.05, tstart=0.1, mintime=0.02) assert_equal(set(res.clusters['effect'].cells), set(res.effects)) # some effects with clusters, some without res = testnd.anova('uts', 'A*B*rm', ds=ds, samples=5, pmin=0.05, tstart=0.37, mintime=0.02) string = pickle.dumps(res, pickle.HIGHEST_PROTOCOL) res_ = pickle.loads(string) assert_dataobj_equal(res.clusters, res_.clusters) # test multi-effect results (with persistence) # UTS res = testnd.anova('uts', 'A*B*rm', ds=ds, samples=5) repr(res) string = pickle.dumps(res, pickle.HIGHEST_PROTOCOL) resr = pickle.loads(string) tf_clusters = resr.find_clusters(pmin=0.05) peaks = resr.find_peaks() assert_dataobj_equal(tf_clusters, res.find_clusters(pmin=0.05)) assert_dataobj_equal(peaks, res.find_peaks()) assert_equal(tf_clusters.eval("p.min()"), peaks.eval("p.min()")) unmasked = resr.f[0] masked = resr.masked_parameter_map(effect=0, pmin=0.05) assert_array_equal(masked.x <= unmasked.x, True) # reproducibility res0 = testnd.anova('utsnd', 'A*B*rm', ds=ds, pmin=0.05, samples=5) res = testnd.anova('utsnd', 'A*B*rm', ds=ds, pmin=0.05, samples=5) assert_dataset_equal(res.clusters, res0.clusters) testnd.configure(0) res = testnd.anova('utsnd', 'A*B*rm', ds=ds, pmin=0.05, samples=5) assert_dataset_equal(res.clusters, res0.clusters) testnd.configure(-1) # permutation eelbrain._stats.permutation._YIELD_ORIGINAL = 1 samples = 4 # raw res = testnd.anova('utsnd', 'A*B*rm', ds=ds, samples=samples) for dist in res._cdist: eq_(len(dist.dist), samples) assert_array_equal(dist.dist, dist.parameter_map.abs().max()) # TFCE res = testnd.anova('utsnd', 'A*B*rm', ds=ds, tfce=True, samples=samples) for dist in res._cdist: eq_(len(dist.dist), samples) assert_array_equal(dist.dist, dist.tfce_map.abs().max()) # thresholded res = testnd.anova('utsnd', 'A*B*rm', ds=ds, pmin=0.05, samples=samples) clusters = res.find_clusters() for dist, effect in izip(res._cdist, res.effects): effect_idx = clusters.eval("effect == %r" % effect) vmax = clusters[effect_idx, 'v'].abs().max() eq_(len(dist.dist), samples) assert_array_equal(dist.dist, vmax) eelbrain._stats.permutation._YIELD_ORIGINAL = 0 # 1d TFCE testnd.configure(0) res = testnd.anova('utsnd.rms(time=(0.1, 0.3))', 'A*B*rm', ds=ds, tfce=True, samples=samples) testnd.configure(-1)