def test_ttest_ind(): "Test testnd.ttest_ind()" ds = datasets.get_uts(True) # basic res = testnd.ttest_ind('uts', 'A', 'a1', 'a0', ds=ds) repr(res) assert_less(res.p_uncorrected.min(), 0.05) # persistence string = pickle.dumps(res, pickle.HIGHEST_PROTOCOL) res_ = pickle.loads(string) repr(res_) assert_dataobj_equal(res.p_uncorrected, res_.p_uncorrected) # cluster res = testnd.ttest_ind('uts', 'A', 'a1', 'a0', ds=ds, tail=1, samples=1) # persistence string = pickle.dumps(res, pickle.HIGHEST_PROTOCOL) res_ = pickle.loads(string) assert_equal(repr(res_), repr(res)) assert_dataobj_equal(res.p_uncorrected, res_.p_uncorrected) # nd res = testnd.ttest_ind('utsnd', 'A', 'a1', 'a0', ds=ds, pmin=0.05, samples=2) eq_(res._cdist.n_clusters, 10)
def test_ttest_ind(): "Test testnd.ttest_ind()" ds = datasets.get_uts(True) # basic res = testnd.ttest_ind('uts', 'A', 'a1', 'a0', ds=ds) eq_(repr(res), "<ttest_ind 'uts', 'A', 'a1' (n=30), 'a0' (n=30)>") assert_less(res.p_uncorrected.min(), 0.05) # persistence string = pickle.dumps(res, pickle.HIGHEST_PROTOCOL) res_ = pickle.loads(string) eq_(repr(res_), "<ttest_ind 'uts', 'A', 'a1' (n=30), 'a0' (n=30)>") assert_dataobj_equal(res.p_uncorrected, res_.p_uncorrected) # cluster res = testnd.ttest_ind('uts', 'A', 'a1', 'a0', ds=ds, tail=1, samples=1) # persistence string = pickle.dumps(res, pickle.HIGHEST_PROTOCOL) res_ = pickle.loads(string) assert_equal(repr(res_), repr(res)) assert_dataobj_equal(res.p_uncorrected, res_.p_uncorrected) # nd res = testnd.ttest_ind('utsnd', 'A', 'a1', 'a0', ds=ds, pmin=0.05, samples=2) eq_(res._cdist.n_clusters, 10) # zero variance ds['utsnd'].x[:, 1, 10] = 0. assert_raises(ZeroVariance, testnd.ttest_ind, 'utsnd', 'A', ds=ds)
def test_merged_temporal_cluster_dist(): "Test use of _MergedTemporalClusterDist with testnd test results" ds1 = datasets.get_uts() ds2 = datasets.get_uts(seed=42) anova_kw = dict(Y='uts', X='A*B*rm', pmin=0.05, samples=10) ttest_kw = dict(Y='uts', X='A', c1='a1', c0='a0', pmin=0.05, samples=10) contrast_kw = dict(Y='uts', X='A', contrast='a1>a0', pmin=0.05, samples=10) def test_merged(res1, res2): merged_dist = _MergedTemporalClusterDist([res1._cdist, res2._cdist]) if isinstance(res1, testnd.anova): assert_equal(len(merged_dist.dist), len(res1.effects)) for effect, dist in merged_dist.dist.iteritems(): assert_in(effect, res1.effects) assert_equal(len(dist), res1.samples) else: assert_equal(len(merged_dist.dist), res1.samples) res1_clusters = merged_dist.correct_cluster_p(res1) res2_clusters = merged_dist.correct_cluster_p(res2) for clusters in [res1_clusters, res2_clusters]: assert_in('p_parc', clusters) for cl in clusters.itercases(): assert_greater_equal(cl['p_parc'], cl['p']) # multi-effect res1 = testnd.anova(ds=ds1, **anova_kw) res2 = testnd.anova(ds=ds2, **anova_kw) test_merged(res1, res2) # ttest_rel res1 = testnd.ttest_rel(ds=ds1, match='rm', **ttest_kw) res2 = testnd.ttest_rel(ds=ds2, match='rm', **ttest_kw) test_merged(res1, res2) # ttest_ind res1 = testnd.ttest_ind(ds=ds1, **ttest_kw) res2 = testnd.ttest_ind(ds=ds2, **ttest_kw) test_merged(res1, res2) # ttest_1samp res1 = testnd.ttest_1samp('uts', ds=ds1, pmin=0.05, samples=10) res2 = testnd.ttest_1samp('uts', ds=ds2, pmin=0.05, samples=10) test_merged(res1, res2) # t_contrast_rel res1 = testnd.t_contrast_rel(ds=ds1, match='rm', **contrast_kw) res2 = testnd.t_contrast_rel(ds=ds2, match='rm', **contrast_kw) test_merged(res1, res2)
def test_plot_array(): "Test plot.TopoArray" ds = datasets.get_uts(utsnd=True) p = plot.TopoArray('utsnd', ds=ds) assert repr(p) == "<TopoArray: utsnd>" p.set_topo_t(0, 0.2) p.close() p = plot.TopoArray('utsnd', ds=ds, vmax=0.2, w=2) p.close() p = plot.TopoArray('utsnd', 'A%B', ds=ds, axw=4) assert repr(p) == "<TopoArray: utsnd ~ A x B>" p.close() # results res = testnd.ttest_ind('utsnd', 'A', ds=ds, pmin=0.05, tstart=0.1, tstop=0.3, samples=2) p = plot.TopoArray(res) assert repr(p) == "<TopoArray: a0, a1, a0 - a1>" p.set_topo_t(0, 0.) p.close()
def test_plot_array(): "Test plot.TopoArray" ds = datasets.get_uts(utsnd=True) p = plot.TopoArray("utsnd", ds=ds, show=False) p.set_topo_t(0, 0.2) p.close() p = plot.TopoArray("utsnd", ds=ds, vmax=0.2, w=2, show=False) p.close() p = plot.TopoArray("utsnd", "A%B", ds=ds, axw=4, show=False) p.close() # results res = testnd.ttest_ind("utsnd", "A", ds=ds, pmin=0.05, tstart=0.1, tstop=0.3, samples=2) p = plot.TopoArray(res, show=False) p.set_topo_t(0, 0.0) p.close()
def test_ttest_ind(): "Test testnd.ttest_ind()" ds = datasets.get_uts(True) # basic res = testnd.ttest_ind('uts', 'A', 'a1', 'a0', ds=ds, samples=0) assert repr( res) == "<ttest_ind 'uts', 'A', 'a1' (n=30), 'a0' (n=30), samples=0>" assert res.p_uncorrected.min() < 0.05 # persistence string = pickle.dumps(res, pickle.HIGHEST_PROTOCOL) res_ = pickle.loads(string) assert repr( res_) == "<ttest_ind 'uts', 'A', 'a1' (n=30), 'a0' (n=30), samples=0>" assert_dataobj_equal(res.p_uncorrected, res_.p_uncorrected) # alternate argspec res_ = testnd.ttest_ind("uts[A == 'a1']", "uts[A == 'a0']", ds=ds, samples=0) assert repr(res_) == "<ttest_ind 'uts' (n=30), 'uts' (n=30), samples=0>" assert_dataobj_equal(res_.t, res.t) # cluster res = testnd.ttest_ind('uts', 'A', 'a1', 'a0', ds=ds, tail=1, samples=1) # persistence string = pickle.dumps(res, pickle.HIGHEST_PROTOCOL) res_ = pickle.loads(string) assert repr(res_) == repr(res) assert_dataobj_equal(res.p_uncorrected, res_.p_uncorrected) # nd res = testnd.ttest_ind('utsnd', 'A', 'a1', 'a0', ds=ds, pmin=0.05, samples=2) assert res._cdist.n_clusters == 10 # zero variance ds['utsnd'].x[:, 1, 10] = 0. res_zv = testnd.ttest_ind('utsnd', 'A', 'a1', 'a0', ds=ds, samples=0) assert_array_equal(res_zv.t.x[0], res.t.x[0]) assert res_zv.t.x[1, 10] == 0. # argument mismatch with pytest.raises(ValueError): testnd.ttest_ind(ds['utsnd'], ds[:-1, 'A'], samples=0)
def test_plot_array(): "Test plot.TopoArray" ds = datasets.get_uts(utsnd=True) p = plot.TopoArray('utsnd', ds=ds, show=False) p.set_topo_t(0, 0.2) p.close() p = plot.TopoArray('utsnd', ds=ds, vmax=0.2, w=2, show=False) p.close() p = plot.TopoArray('utsnd', 'A%B', ds=ds, axw=4, show=False) p.close() # results res = testnd.ttest_ind('utsnd', 'A', ds=ds, pmin=0.05, tstart=0.1, tstop=0.3, samples=2) p = plot.TopoArray(res, show=False) p.set_topo_t(0, 0.) p.close()
def test_plot_array(): "Test plot.TopoArray" ds = datasets.get_uts(utsnd=True) p = plot.TopoArray('utsnd', ds=ds) assert repr(p) == "<TopoArray: utsnd>" p.set_topo_t(0, 0.2) p.close() p = plot.TopoArray('utsnd', ds=ds, vmax=0.2, w=2) p.close() p = plot.TopoArray('utsnd', 'A%B', ds=ds, axw=4) assert repr(p) == "<TopoArray: utsnd ~ A x B>" p.close() # results res = testnd.ttest_ind('utsnd', 'A', ds=ds, pmin=0.05, tstart=0.1, tstop=0.3, samples=2) p = plot.TopoArray(res) assert repr(p) == "<TopoArray: a0, a1, difference>" p.set_topo_t(0, 0.) p.close()
def test_result_report(): "Test result_report function for different Results" ds = datasets.get_uts(True) sds = ds.sub("B == 'b0'") for y in ( 'uts', # time "utsnd.summary(time=(0.25, 0.35))", # sensor 'utsnd', # sensor x time ): dims = tuple(dim.name for dim in ds.eval(y).dims[1:]) logging.info("y=%s %s", y, dims) kwargs = dict(pmin=0.1, samples=100) if 'time' in dims: kwargs['tstart'] = 0.2 kwargs['tstop'] = 0.4 for match in (None, 'rm'): logging.info(" match=%s", match) res = testnd.ttest_1samp(y, match=match, ds=sds, **kwargs) rep = result_report(res, ds) html(rep) res = testnd.ttest_ind(y, 'A', ds=sds, **kwargs) rep = result_report(res, ds) html(rep) res = testnd.ttest_rel(y, 'A', ds=sds, match='rm', **kwargs) rep = result_report(res, sds) html(rep) res = testnd.anova(y, 'A * B', ds=ds, **kwargs) rep = result_report(res, ds) html(rep) res = testnd.anova(y, 'A * rm', ds=sds, match='rm', **kwargs) rep = result_report(res, ds) html(rep)
def test_result_report(): "Test result_report function for different Results" ds = datasets.get_uts(True) sds = ds.sub("B == 'b0'") ys = [ 'uts', # time "utsnd.summary(time=(0.25, 0.35))", # sensor 'utsnd', # sensor x time ] for y in ys: y_obj = sds.eval(y) kwargs = dict(pmin=0.1, samples=100) if y_obj.has_dim('time'): kwargs['tstart'] = 0.2 kwargs['tstop'] = 0.4 for match in (None, 'rm'): logging.info(" match=%s", match) res = testnd.ttest_1samp(y, match=match, ds=sds, **kwargs) rep = result_report(res, ds) html(rep) res = testnd.ttest_ind(y, 'A', ds=sds, **kwargs) rep = result_report(res, ds) html(rep) res = testnd.ttest_rel(y, 'A', ds=sds, match='rm', **kwargs) rep = result_report(res, sds) html(rep) res = testnd.anova(y, 'A * B', ds=ds, **kwargs) rep = result_report(res, ds) html(rep) res = testnd.anova(y, 'A * rm', ds=sds, match='rm', **kwargs) rep = result_report(res, ds) html(rep)
def test_result_report(): "Test result_report function for different Results" ds = datasets.get_uts(True) sds = ds.sub("B == 'b0'") for y in ('uts', # time "utsnd.summary(time=(0.25, 0.35))", # sensor 'utsnd', # sensor x time ): dims = tuple(dim.name for dim in ds.eval(y).dims[1:]) logging.info("y=%s %s", y, dims) kwargs = dict(pmin=0.1, samples=100) if 'time' in dims: kwargs['tstart'] = 0.2 kwargs['tstop'] = 0.4 for match in (None, 'rm'): logging.info(" match=%s", match) res = testnd.ttest_1samp(y, match=match, ds=sds, **kwargs) rep = result_report(res, ds) html(rep) res = testnd.ttest_ind(y, 'A', ds=sds, **kwargs) rep = result_report(res, ds) html(rep) res = testnd.ttest_rel(y, 'A', ds=sds, match='rm', **kwargs) rep = result_report(res, sds) html(rep) res = testnd.anova(y, 'A * B', ds=ds, **kwargs) rep = result_report(res, ds) html(rep) res = testnd.anova(y, 'A * rm', ds=sds, match='rm', **kwargs) rep = result_report(res, ds) html(rep)
def test_ttest_ind(): "Test testnd.ttest_ind()" ds = datasets.get_uts(True) # basic res = testnd.ttest_ind('uts', 'A', 'a1', 'a0', ds=ds) assert repr(res) == "<ttest_ind 'uts', 'A', 'a1' (n=30), 'a0' (n=30)>" assert res.p_uncorrected.min() < 0.05 # persistence string = pickle.dumps(res, pickle.HIGHEST_PROTOCOL) res_ = pickle.loads(string) assert repr(res_) == "<ttest_ind 'uts', 'A', 'a1' (n=30), 'a0' (n=30)>" assert_dataobj_equal(res.p_uncorrected, res_.p_uncorrected) # alternate argspec res_ = testnd.ttest_ind("uts[A == 'a1']", "uts[A == 'a0']", ds=ds) assert repr(res_) == "<ttest_ind 'uts' (n=30), 'uts' (n=30)>" assert_dataobj_equal(res_.t, res.t) # cluster res = testnd.ttest_ind('uts', 'A', 'a1', 'a0', ds=ds, tail=1, samples=1) # persistence string = pickle.dumps(res, pickle.HIGHEST_PROTOCOL) res_ = pickle.loads(string) assert repr(res_) == repr(res) assert_dataobj_equal(res.p_uncorrected, res_.p_uncorrected) # nd res = testnd.ttest_ind('utsnd', 'A', 'a1', 'a0', ds=ds, pmin=0.05, samples=2) assert res._cdist.n_clusters == 10 # zero variance ds['utsnd'].x[:, 1, 10] = 0. res_zv = testnd.ttest_ind('utsnd', 'A', 'a1', 'a0', ds=ds) assert_array_equal(res_zv.t.x[0], res.t.x[0]) assert res_zv.t.x[1, 10] == 0. # argument mismatch with pytest.raises(ValueError): testnd.ttest_ind(ds['utsnd'], ds[:-1, 'A'])
def test_vector(): """Test vector tests""" # single vector ds = datasets.get_uv(vector=True) res = testnd.Vector('v[:40]', ds=ds, samples=10) assert res.p == 0.0 res = testnd.Vector('v[40:]', ds=ds, samples=10) assert res.p == 1.0 # single vector with norm stat res_t = testnd.Vector('v[:40]', ds=ds, samples=10, norm=True) assert res_t.p == 0.0 res_t = testnd.Vector('v[40:]', ds=ds, samples=10, norm=True) assert res_t.p == 1.0 # non-space tests should raise error with pytest.raises(WrongDimension): testnd.ttest_1samp('v', ds=ds) with pytest.raises(WrongDimension): testnd.ttest_rel('v', 'A', match='rm', ds=ds) with pytest.raises(WrongDimension): testnd.ttest_ind('v', 'A', ds=ds) with pytest.raises(WrongDimension): testnd.t_contrast_rel('v', 'A', 'a0 > a1', 'rm', ds=ds) with pytest.raises(WrongDimension): testnd.corr('v', 'fltvar', ds=ds) with pytest.raises(WrongDimension): testnd.anova('v', 'A * B', ds=ds) # vector in time ds = datasets.get_uts(vector3d=True) v1 = ds[30:, 'v3d'] v2 = ds[:30, 'v3d'] vd = v1 - v2 res = testnd.Vector(vd, samples=10) assert res.p.min() == 0.2 difference = res.masked_difference(0.5) assert difference.x.mask.sum() == 288 # diff related resd = testnd.VectorDifferenceRelated(v1, v2, samples=10) assert_dataobj_equal(resd.p, res.p, name=False) assert_dataobj_equal(resd.t2, res.t2, name=False) # diff independent res = VectorDifferenceIndependent(v1, v2, samples=10, norm=True) assert_dataobj_equal(res.difference, v1.mean('case') - v2.mean('case'), name=False) assert res.p.max() == 1 assert res.p.min() == 0 # with mp res = testnd.Vector(v1, samples=10) assert res.p.min() == 0.4 # without mp configure(n_workers=0) res0 = testnd.Vector(v1, samples=10) assert_array_equal(np.sort(res0._cdist.dist), np.sort(res._cdist.dist)) configure(n_workers=True) # time window res = testnd.Vector(v2, samples=10, tstart=0.1, tstop=0.4) assert res.p.min() == 0.3 difference = res.masked_difference(0.5) assert difference.x.mask.sum() == 294 # vector in time with norm stat res = testnd.Vector(vd, samples=10, norm=True) assert res.p.min() == 0 difference = res.masked_difference() assert difference.x.mask.sum() == 297 resd = testnd.VectorDifferenceRelated(v1, v2, samples=10, norm=True) assert_dataobj_equal(resd.p, res.p, name=False) assert_dataobj_equal(resd.difference, res.difference, name=False) v_small = v2 / 100 res = testnd.Vector(v_small, tfce=True, samples=10, norm=True) assert 'WARNING' in repr(res) res = testnd.Vector(v_small, tfce=0.1, samples=10) assert res.p.min() == 0.0
def test_source_estimate(): "Test SourceSpace dimension" mne.set_log_level('warning') ds = datasets.get_mne_sample(src='ico') dsa = ds.aggregate('side') # test auto-conversion asndvar('epochs', ds=ds) asndvar('epochs', ds=dsa) asndvar(dsa['epochs'][0]) # source space clustering res = testnd.ttest_ind('src', 'side', ds=ds, samples=0, pmin=0.05, tstart=0.05, mintime=0.02, minsource=10) assert_equal(res.clusters.n_cases, 52) # test disconnecting parc src = ds['src'] source = src.source parc = source.parc orig_conn = set(map(tuple, source.connectivity())) disc_conn = set(map(tuple, source.connectivity(True))) assert_true(len(disc_conn) < len(orig_conn)) for pair in orig_conn: s, d = pair if pair in disc_conn: assert_equal(parc[s], parc[d]) else: assert_not_equal(parc[s], parc[d]) # threshold-based test with parc srcl = src.sub(source='lh') res = testnd.ttest_ind(srcl, 'side', ds=ds, samples=10, pmin=0.05, tstart=0.05, mintime=0.02, minsource=10, parc='source') assert_equal(res._cdist.dist.shape[1], len(srcl.source.parc.cells)) label = 'superiortemporal-lh' c_all = res._clusters(maps=True) c_label = res._clusters(maps=True, source=label) assert_array_equal(c_label['location'], label) for case in c_label.itercases(): id_ = case['id'] idx = c_all['id'].index(id_)[0] assert_equal(case['v'], c_all[idx, 'v']) assert_equal(case['tstart'], c_all[idx, 'tstart']) assert_equal(case['tstop'], c_all[idx, 'tstop']) assert_less_equal(case['p'], c_all[idx, 'p']) assert_dataobj_equal(case['cluster'], c_all[idx, 'cluster'].sub(source=label)) # threshold-free test with parc res = testnd.ttest_ind(srcl, 'side', ds=ds, samples=10, tstart=0.05, parc='source') cl = res._clusters(0.05) assert_equal(cl.eval("p.min()"), res.p.min()) mp = res.masked_parameter_map() assert_in(mp.min(), (0, res.t.min())) assert_in(mp.max(), (0, res.t.max())) # indexing source space s_sub = src.sub(source='fusiform-lh') idx = source.index_for_label('fusiform-lh') s_idx = src[idx] assert_dataobj_equal(s_sub, s_idx)
def test_source_estimate(): "Test SourceSpace dimension" mne.set_log_level('warning') ds = datasets.get_mne_sample(src='ico') dsa = ds.aggregate('side') # test auto-conversion asndvar('epochs', ds=ds) asndvar('epochs', ds=dsa) asndvar(dsa['epochs'][0]) # source space clustering res = testnd.ttest_ind('src', 'side', ds=ds, samples=0, pmin=0.05, tstart=0.05, mintime=0.02, minsource=10) assert res.clusters.n_cases == 52 # test disconnecting parc src = ds['src'] source = src.source parc = source.parc orig_conn = set(map(tuple, source.connectivity())) disc_conn = set(map(tuple, source.connectivity(True))) assert len(disc_conn) < len(orig_conn) for pair in orig_conn: s, d = pair if pair in disc_conn: assert parc[s] == parc[d] else: assert parc[s] != parc[d] # threshold-based test with parc srcl = src.sub(source='lh') res = testnd.ttest_ind(srcl, 'side', ds=ds, samples=10, pmin=0.05, tstart=0.05, mintime=0.02, minsource=10, parc='source') assert res._cdist.dist.shape[1] == len(srcl.source.parc.cells) label = 'superiortemporal-lh' c_all = res.find_clusters(maps=True) c_label = res.find_clusters(maps=True, source=label) assert_array_equal(c_label['location'], label) for case in c_label.itercases(): id_ = case['id'] idx = c_all['id'].index(id_)[0] assert case['v'] == c_all[idx, 'v'] assert case['tstart'] == c_all[idx, 'tstart'] assert case['tstop'] == c_all[idx, 'tstop'] assert case['p'] <= c_all[idx, 'p'] assert_dataobj_equal(case['cluster'], c_all[idx, 'cluster'].sub(source=label)) # threshold-free test with parc res = testnd.ttest_ind(srcl, 'side', ds=ds, samples=10, tstart=0.05, parc='source') cl = res.find_clusters(0.05) assert cl.eval("p.min()") == res.p.min() mp = res.masked_parameter_map() assert mp.min() == res.t.min() assert mp.max() == res.t.max(res.p <= 0.05) assert mp.max() == pytest.approx(-4.95817732) # indexing source space s_sub = src.sub(source='fusiform-lh') idx = source.index_for_label('fusiform-lh') s_idx = src[idx] assert_dataobj_equal(s_sub, s_idx) # concatenate src_reconc = concatenate((src.sub(source='lh'), src.sub(source='rh')), 'source') assert_dataobj_equal(src_reconc, src)