Example #1
0
def test_combine():
    "Test combine()"
    ds1 = datasets.get_uts()
    ds2 = datasets.get_uts()
    ds = combine((ds1, ds2))
    assert_array_equal(ds2['Y'].x, ds['Y'].x[ds1.n_cases:], "Basic combine")
    del ds1['Y']
    del ds2['YCat']
    ds = combine((ds1, ds2))
    assert_array_equal(ds2['Y'].x, ds['Y'].x[ds1.n_cases:], "Combine with "
                       "missing Var")
    ok_(np.all(ds1['YCat'] == ds['YCat'][:ds1.n_cases]),
        "Combine with missing "
        "Factor")

    assert_raises(TypeError, combine, (ds2['A'], ds2['Y']))

    # combine NDVar with unequel dimensions
    ds = datasets.get_uts(utsnd=True)
    y = ds['utsnd']
    y1 = y.sub(sensor=['0', '1', '2', '3'])
    y2 = y.sub(sensor=['1', '2', '3', '4'])
    ds1 = Dataset((y1, ))
    ds2 = Dataset((y2, ))
    dsc = combine((ds1, ds2))
    y = dsc['utsnd']
    eq_(y.sensor.names, ['1', '2', '3'], "Sensor dimension "
        "intersection failed.")
    dims = ('case', 'sensor', 'time')
    ref = np.concatenate((y1.get_data(dims)[:, 1:], y2.get_data(dims)[:, :3]))
    assert_array_equal(y.get_data(dims), ref, "combine utsnd")
Example #2
0
def test_random_lm():
    # dummy coding
    ds = datasets.get_uts()
    lms = []
    for i in xrange(5):
        ds['uts'].x += np.random.normal(0, 2, ds['uts'].shape)
        lms.append(LM('uts', 'A*B*Y', ds))
    rlm = LMGroup(lms)
    eq_(repr(rlm),
        '<LMGroup: uts ~ A + B + A x B + Y + A x Y + B x Y + A x B x Y, n=5>')

    # coefficients
    ds = rlm.coefficients_dataset(('A', 'A x B'))
    eq_(ds['term'].cells, ('A', 'A x B'))

    # tests
    res = rlm.column_ttest('A x B', samples=100, pmin=0.05, mintime=0.025)
    eq_(res.clusters.n_cases, 1)

    # effect coding
    ds = datasets.get_uts()
    lms = []
    for i in xrange(5):
        ds['uts'].x += np.random.normal(0, 2, ds['uts'].shape)
        lms.append(LM('uts', 'A*B*Y', ds, 'effect'))
    rlm = LMGroup(lms)
    res = rlm.column_ttest('A x B', samples=100, pmin=0.05, mintime=0.025)
    eq_(res.clusters.n_cases, 6)

    # persistence
    rlm_p = pickle.loads(pickle.dumps(rlm, pickle.HIGHEST_PROTOCOL))
    eq_(rlm_p.dims, rlm.dims)
Example #3
0
def test_random_lm():
    # dummy coding
    ds = datasets.get_uts()
    lms = []
    for i in range(5):
        ds['uts'].x += np.random.normal(0, 2, ds['uts'].shape)
        lms.append(LM('uts', 'A*B*Y', ds))
    rlm = LMGroup(lms)
    eq_(repr(rlm), '<LMGroup: uts ~ A + B + A x B + Y + A x Y + B x Y + A x B x Y, n=5>')

    # coefficients
    ds = rlm.coefficients_dataset(('A', 'A x B'))
    eq_(ds['term'].cells, ('A', 'A x B'))

    # tests
    res = rlm.column_ttest('A x B', samples=100, pmin=0.05, mintime=0.025)
    eq_(res.clusters.n_cases, 1)

    # effect coding
    ds = datasets.get_uts()
    lms = []
    for i in range(5):
        ds['uts'].x += np.random.normal(0, 2, ds['uts'].shape)
        lms.append(LM('uts', 'A*B*Y', ds, 'effect'))
    rlm = LMGroup(lms)
    res = rlm.column_ttest('A x B', samples=100, pmin=0.05, mintime=0.025)
    eq_(res.clusters.n_cases, 6)

    # persistence
    rlm_p = pickle.loads(pickle.dumps(rlm, pickle.HIGHEST_PROTOCOL))
    eq_(rlm_p.dims, rlm.dims)
Example #4
0
def test_combine():
    "Test combine()"
    ds1 = datasets.get_uts()
    ds2 = datasets.get_uts()
    ds = combine((ds1, ds2))
    assert_array_equal(ds2['Y'].x, ds['Y'].x[ds1.n_cases:], "Basic combine")
    del ds1['Y']
    del ds2['YCat']
    ds = combine((ds1, ds2))
    assert_array_equal(ds2['Y'].x, ds['Y'].x[ds1.n_cases:], "Combine with "
                       "missing Var")
    assert_true(np.all(ds1['YCat'] == ds['YCat'][:ds1.n_cases]), "Combine "
                "with missing Factor")

    # combine NDVar with unequel dimensions
    ds = datasets.get_uts(utsnd=True)
    y = ds['utsnd']
    y1 = y.sub(sensor=['0', '1', '2', '3'])
    y2 = y.sub(sensor=['1', '2', '3', '4'])
    ds1 = Dataset(y1)
    ds2 = Dataset(y2)
    dsc = combine((ds1, ds2))
    y = dsc['utsnd']
    assert_equal(y.sensor.names, ['1', '2', '3'], "Sensor dimension "
                 "intersection failed.")
    dims = ('case', 'sensor', 'time')
    ref = np.concatenate((y1.get_data(dims)[:, 1:], y2.get_data(dims)[:, :3]))
    assert_array_equal(y.get_data(dims), ref, "combine utsnd")
Example #5
0
def test_merged_temporal_cluster_dist():
    "Test use of _MergedTemporalClusterDist with testnd test results"
    ds1 = datasets.get_uts()
    ds2 = datasets.get_uts(seed=42)

    anova_kw = dict(Y='uts', X='A*B*rm', pmin=0.05, samples=10)
    ttest_kw = dict(Y='uts', X='A', c1='a1', c0='a0', pmin=0.05, samples=10)
    contrast_kw = dict(Y='uts', X='A', contrast='a1>a0', pmin=0.05, samples=10)

    def test_merged(res1, res2):
        merged_dist = _MergedTemporalClusterDist([res1._cdist, res2._cdist])
        if isinstance(res1, testnd.anova):
            assert_equal(len(merged_dist.dist), len(res1.effects))
            for effect, dist in merged_dist.dist.iteritems():
                assert_in(effect, res1.effects)
                assert_equal(len(dist), res1.samples)
        else:
            assert_equal(len(merged_dist.dist), res1.samples)
        res1_clusters = merged_dist.correct_cluster_p(res1)
        res2_clusters = merged_dist.correct_cluster_p(res2)
        for clusters in [res1_clusters, res2_clusters]:
            assert_in('p_parc', clusters)
            for cl in clusters.itercases():
                assert_greater_equal(cl['p_parc'], cl['p'])

    # multi-effect
    res1 = testnd.anova(ds=ds1, **anova_kw)
    res2 = testnd.anova(ds=ds2, **anova_kw)
    test_merged(res1, res2)

    # ttest_rel
    res1 = testnd.ttest_rel(ds=ds1, match='rm', **ttest_kw)
    res2 = testnd.ttest_rel(ds=ds2, match='rm', **ttest_kw)
    test_merged(res1, res2)

    # ttest_ind
    res1 = testnd.ttest_ind(ds=ds1, **ttest_kw)
    res2 = testnd.ttest_ind(ds=ds2, **ttest_kw)
    test_merged(res1, res2)

    # ttest_1samp
    res1 = testnd.ttest_1samp('uts', ds=ds1, pmin=0.05, samples=10)
    res2 = testnd.ttest_1samp('uts', ds=ds2, pmin=0.05, samples=10)
    test_merged(res1, res2)

    # t_contrast_rel
    res1 = testnd.t_contrast_rel(ds=ds1, match='rm', **contrast_kw)
    res2 = testnd.t_contrast_rel(ds=ds2, match='rm', **contrast_kw)
    test_merged(res1, res2)
Example #6
0
def test_merged_temporal_cluster_dist():
    "Test use of _MergedTemporalClusterDist with testnd test results"
    ds1 = datasets.get_uts()
    ds2 = datasets.get_uts(seed=42)

    anova_kw = dict(Y='uts', X='A*B*rm', pmin=0.05, samples=10)
    ttest_kw = dict(Y='uts', X='A', c1='a1', c0='a0', pmin=0.05, samples=10)
    contrast_kw = dict(Y='uts', X='A', contrast='a1>a0', pmin=0.05, samples=10)

    def test_merged(res1, res2):
        merged_dist = _MergedTemporalClusterDist([res1._cdist, res2._cdist])
        if isinstance(res1, testnd.anova):
            assert_equal(len(merged_dist.dist), len(res1.effects))
            for effect, dist in merged_dist.dist.iteritems():
                assert_in(effect, res1.effects)
                assert_equal(len(dist), res1.samples)
        else:
            assert_equal(len(merged_dist.dist), res1.samples)
        res1_clusters = merged_dist.correct_cluster_p(res1)
        res2_clusters = merged_dist.correct_cluster_p(res2)
        for clusters in [res1_clusters, res2_clusters]:
            assert_in('p_parc', clusters)
            for cl in clusters.itercases():
                assert_greater_equal(cl['p_parc'], cl['p'])

    # multi-effect
    res1 = testnd.anova(ds=ds1, **anova_kw)
    res2 = testnd.anova(ds=ds2, **anova_kw)
    test_merged(res1, res2)

    # ttest_rel
    res1 = testnd.ttest_rel(ds=ds1, match='rm', **ttest_kw)
    res2 = testnd.ttest_rel(ds=ds2, match='rm', **ttest_kw)
    test_merged(res1, res2)

    # ttest_ind
    res1 = testnd.ttest_ind(ds=ds1, **ttest_kw)
    res2 = testnd.ttest_ind(ds=ds2, **ttest_kw)
    test_merged(res1, res2)

    # ttest_1samp
    res1 = testnd.ttest_1samp('uts', ds=ds1, pmin=0.05, samples=10)
    res2 = testnd.ttest_1samp('uts', ds=ds2, pmin=0.05, samples=10)
    test_merged(res1, res2)

    # t_contrast_rel
    res1 = testnd.t_contrast_rel(ds=ds1, match='rm', **contrast_kw)
    res2 = testnd.t_contrast_rel(ds=ds2, match='rm', **contrast_kw)
    test_merged(res1, res2)
Example #7
0
def test_plot_butterfly():
    "Test plot.Butterfly"
    ds = datasets.get_uts(utsnd=True)
    p = plot.Butterfly('utsnd', ds=ds, show=False)
    p.close()
    p = plot.Butterfly('utsnd', 'A%B', ds=ds, show=False)
    p.close()
def test_corr():
    "Test testnd.corr()"
    ds = datasets.get_uts(True)

    # add correlation
    Y = ds['Y']
    utsnd = ds['utsnd']
    utsnd.x[:, 3:5, 50:65] += Y.x[:, None, None]

    res = testnd.corr('utsnd', 'Y', ds=ds)
    assert repr(res) == "<corr 'utsnd', 'Y'>"
    for s, t in product('01234', (0.1, 0.2, 0.35)):
        target = test.Correlation(utsnd.sub(sensor=s, time=t), Y).r
        assert res.r.sub(sensor=s, time=t) == pytest.approx(target)
    res = testnd.corr('utsnd', 'Y', 'rm', ds=ds)
    repr(res)
    res = testnd.corr('utsnd', 'Y', ds=ds, samples=10, pmin=0.05)
    repr(res)
    res = testnd.corr('utsnd', 'Y', ds=ds, samples=10, tfce=True)
    repr(res)

    # persistence
    string = pickle.dumps(res, protocol=pickle.HIGHEST_PROTOCOL)
    res_ = pickle.loads(string)
    assert repr(res_) == repr(res)
    assert_dataobj_equal(res.p_uncorrected, res_.p_uncorrected)
    assert_dataobj_equal(res.p, res_.p)
Example #9
0
def test_t_contrast_testnd():
    ds = datasets.get_uts()

    # binary function
    res = testnd.t_contrast_rel('uts',
                                'A',
                                "a1>a0 - a0>a1",
                                'rm',
                                ds=ds,
                                tmin=4,
                                samples=10)
    assert_equal(res.find_clusters()['p'],
                 np.array([1, 1, 0.9, 0, 0.2, 1, 1, 0]))
    res_t = testnd.ttest_rel('uts',
                             'A',
                             'a1',
                             'a0',
                             match='rm',
                             ds=ds,
                             tmin=2,
                             samples=10)
    assert_array_equal(res.t.x, res_t.t.x * 2)
    assert_array_equal(res.clusters['tstart'], res_t.clusters['tstart'])
    assert_array_equal(res.clusters['tstop'], res_t.clusters['tstop'])
    assert_array_equal(res.clusters['v'], res_t.clusters['v'] * 2)
Example #10
0
def test_clusters():
    "test plot.uts cluster plotting functions"
    ds = datasets.get_uts()

    A = ds['A']
    B = ds['B']
    Y = ds['uts']

    # fixed effects model
    res = testnd.anova(Y, A * B)
    p = plot.UTSClusters(res, title="Fixed Effects Model", show=False)
    p.close()

    # random effects model:
    subject = Factor(range(15), tile=4, random=True, name='subject')
    res = testnd.anova(Y, A * B * subject, samples=2)
    p = plot.UTSClusters(res, title="Random Effects Model", show=False)
    p.close()

    # plot UTSStat
    p = plot.UTSStat(Y, A % B, match=subject, show=False)
    p.set_clusters(res.clusters)
    p.close()
    p = plot.UTSStat(Y, A, Xax=B, match=subject, show=False)
    p.close()
Example #11
0
def test_corr():
    "Test stats.corr"
    ds = datasets.get_uts()
    y = ds.eval("uts.x[:,:3]")
    x = ds.eval('Y.x')
    n_cases = len(y)
    df = n_cases - 2

    corr = stats.corr(y, x)
    p = stats.rtest_p(corr, df)
    for i in range(len(corr)):
        r_sp, p_sp = scipy.stats.pearsonr(y[:, i], x)
        assert corr[i] == pytest.approx(r_sp)
        assert p[i] == pytest.approx(p_sp)

    # NaN
    with warnings.catch_warnings():  # divide by 0
        warnings.simplefilter("ignore")
        assert stats.corr(np.arange(10), np.zeros(10)) == 0

    # perm
    y_perm = np.empty_like(y)
    for perm in permute_order(n_cases, 2):
        y_perm[perm] = y
        stats.corr(y, x, corr, perm)
        for i in range(len(corr)):
            r_sp, _ = scipy.stats.pearsonr(y_perm[:, i], x)
            assert corr[i] == pytest.approx(r_sp)
Example #12
0
def test_t_contrast():
    ds = datasets.get_uts()

    # simple contrast
    res = testnd.t_contrast_rel('uts', 'A', 'a1>a0', 'rm', ds=ds, samples=10,
                                pmin=0.05)
    repr(res)
    res_ = testnd.ttest_rel('uts', 'A', 'a1', 'a0', 'rm', ds=ds)
    assert_array_equal(res.t.x, res_.t.x)
    assert_in('samples', repr(res))

    # complex contrast
    res = testnd.t_contrast_rel('uts', 'A%B', 'min(a0|b0>a1|b0, a0|b1>a1|b1)',
                                'rm', ds=ds, samples=10, pmin=0.05)
    res_b0 = testnd.ttest_rel('uts', 'A%B', ('a0', 'b0'), ('a1', 'b0'), 'rm',
                              ds=ds)
    res_b1 = testnd.ttest_rel('uts', 'A%B', ('a0', 'b1'), ('a1', 'b1'), 'rm',
                              ds=ds)
    assert_array_equal(res.t.x, np.min([res_b0.t.x, res_b1.t.x], axis=0))

    # persistence
    string = pickle.dumps(res, protocol=pickle.HIGHEST_PROTOCOL)
    res_ = pickle.loads(string)
    assert_equal(repr(res_), repr(res))
    assert_dataobj_equal(res.p, res_.p)

    # contrast with "*"
    res = testnd.t_contrast_rel('uts', 'A%B', 'min(a1|b0>a0|b0, a1|b1>a0|b1)',
                                'rm', ds=ds, tail=1)
Example #13
0
def test_aggregate():
    "Test aggregation methods"
    ds = datasets.get_uts()

    # don't handle inconsistencies silently
    assert_raises(ValueError, ds.aggregate, 'A%B')

    dsa = ds.aggregate('A%B', drop_bad=True)
    assert_array_equal(dsa['n'], [15, 15, 15, 15])
    idx1 = ds.eval("logical_and(A=='a0', B=='b0')")
    eq_(dsa['Y', 0], ds['Y', idx1].mean())

    # unequal cell counts
    ds = ds[:-3]
    dsa = ds.aggregate('A%B', drop_bad=True)
    assert_array_equal(dsa['n'], [15, 15, 15, 12])
    idx1 = ds.eval("logical_and(A=='a0', B=='b0')")
    eq_(dsa['Y', 0], ds['Y', idx1].mean())

    # equalize count
    dsa = ds.aggregate('A%B', drop_bad=True, equal_count=True)
    assert_array_equal(dsa['n'], [12, 12, 12, 12])
    idx1_12 = np.logical_and(idx1, idx1.cumsum() <= 12)
    eq_(dsa['Y', 0], ds['Y', idx1_12].mean())

    # equalize count with empty cell
    sds = ds.sub("logical_or(A == 'a1', B == 'b1')")
    dsa = sds.aggregate('A%B', drop_bad=True, equal_count=True)
    assert_array_equal(dsa['n'], [12, 12, 12])
Example #14
0
def test_ndvar_timeseries_methods():
    "Test NDVar time-series methods"
    ds = datasets.get_uts(True)
    x = ds['utsnd']
    xs = NDVar(x.x.swapaxes(1, 2), ('case', x.dims[2], x.dims[1]),
               x.info.copy(), x.name)

    # envelope
    env = x.envelope()
    assert_array_equal(env.x >= 0, True)
    envs = xs.envelope()
    assert_array_equal(env.x, envs.x.swapaxes(1,2))

    # indexing
    eq_(len(ds[0, 'uts'][-10:-1].time), 9)

    # FFT
    x = ds['uts'].mean('case')
    np.sin(2 * np.pi * x.time.times, x.x)
    f = x.fft()
    assert_array_almost_equal(f.x, (f.frequency.x == 1) * (len(f) - 1))
    np.sin(4 * np.pi * x.time.times, x.x)
    f = x.fft()
    assert_array_almost_equal(f.x, (f.frequency.x == 2) * (len(f) - 1))

    # update tmin
    eq_(x.time.times[0], -0.2)
    x.time.set_tmin(3.2)
    eq_(x.time.times[0], 3.2)
Example #15
0
def test_uts():
    "test plot.UTS plotting function"
    ds = datasets.get_uts()
    p = plot.UTS('uts', ds=ds, show=False)
    p.close()
    p = plot.UTS('uts', 'A%B', ds=ds, show=False)
    p.close()
Example #16
0
def test_melt_ndvar():
    "Test table.melt_ndvar()"
    ds = datasets.get_uts(True)
    ds = ds.sub("A == 'a1'")

    lds = table.melt_ndvar('uts', ds=ds)
    ok_('time' in lds)
    assert_is_instance(lds['time'], Var)
    eq_(set(lds['time'].x), set(ds['uts'].time.x))

    # no ds
    lds2 = table.melt_ndvar(ds['uts'])
    assert_dataobj_equal(lds2['uts'], lds['uts'])

    # sensor
    lds = table.melt_ndvar("utsnd.summary(time=(0.1, 0.2))",
                           ds=ds,
                           varname='summary')
    eq_(set(lds['sensor'].cells), set(ds['utsnd'].sensor.names))

    # NDVar out
    lds = table.melt_ndvar("utsnd", 'sensor', ds=ds)
    ok_('utsnd' in lds)
    assert_is_instance(lds['utsnd'], NDVar)
    assert_dataobj_equal(lds[:ds.n_cases, 'utsnd'],
                         ds.eval("utsnd.sub(sensor='0')"))

    # more than one dimensions
    assert_raises(ValueError, table.melt_ndvar, 'utsnd', ds=ds)
Example #17
0
def test_corr():
    "Test stats.corr"
    ds = datasets.get_uts()
    y = ds.eval("uts.x[:,:3]")
    x = ds.eval('Y.x')
    n_cases = len(y)
    df = n_cases - 2

    corr = stats.corr(y, x)
    p = stats.rtest_p(corr, df)
    for i in xrange(len(corr)):
        r_sp, p_sp = scipy.stats.pearsonr(y[:, i], x)
        assert_almost_equal(corr[i], r_sp)
        assert_almost_equal(p[i], p_sp)

    # NaN
    r = stats.corr(np.arange(10), np.zeros(10))
    eq_(r, 0)

    # perm
    y_perm = np.empty_like(y)
    for perm in permute_order(n_cases, 2):
        y_perm[perm] = y
        stats.corr(y, x, corr, perm)
        for i in xrange(len(corr)):
            r_sp, _ = scipy.stats.pearsonr(y_perm[:, i], x)
            assert_almost_equal(corr[i], r_sp)
Example #18
0
def test_time_slicer():
    "Test linked time axes"
    ds = datasets.get_uts(True)

    p1 = plot.Butterfly(ds['utsnd'])
    p2 = plot.Array('utsnd', 'A', ds=ds)
    p1.link_time_axis(p2)

    p1._set_time(.1, True)
    assert p2._current_time == .1
    assert p2._time_fixed == True
    p2._set_time(.2)
    assert p1._current_time == .2
    assert p1._time_fixed == False

    p1 = plot.TopoButterfly(ds['utsnd'])
    p2 = plot.Array('utsnd', 'A', ds=ds)
    p2.link_time_axis(p1)

    p1._set_time(.1, True)
    assert p2._current_time == .1
    assert p2._time_fixed == True

    # merge another
    p3 = plot.TopoButterfly(ds[0, 'utsnd'])
    p3.link_time_axis(p2)

    p2._set_time(.2)
    assert p1._current_time == .2
    assert p1._time_fixed == False
Example #19
0
def test_corr():
    "Test testnd.corr()"
    ds = datasets.get_uts(True)

    # add correlation
    Y = ds['Y']
    utsnd = ds['utsnd']
    utsnd.x[:, 3:5, 50:65] += Y.x[:, None, None]

    res = testnd.corr('utsnd', 'Y', ds=ds)
    repr(res)
    for s, t in product('01234', (0.1, 0.2, 0.35)):
        target = test.Correlation(utsnd.sub(sensor=s, time=t), Y).r
        assert_almost_equal(res.r.sub(sensor=s, time=t), target, 10)
    res = testnd.corr('utsnd', 'Y', 'rm', ds=ds)
    repr(res)
    res = testnd.corr('utsnd', 'Y', ds=ds, samples=10, pmin=0.05)
    repr(res)
    res = testnd.corr('utsnd', 'Y', ds=ds, samples=10, tfce=True)
    repr(res)

    # persistence
    string = pickle.dumps(res, protocol=pickle.HIGHEST_PROTOCOL)
    res_ = pickle.loads(string)
    assert_equal(repr(res_), repr(res))
    assert_dataobj_equal(res.p_uncorrected, res_.p_uncorrected)
    assert_dataobj_equal(res.p, res_.p)
Example #20
0
def test_t_contrast():
    ds = datasets.get_uts()

    # simple contrast
    res = testnd.t_contrast_rel('uts',
                                'A',
                                'a1>a0',
                                'rm',
                                ds=ds,
                                samples=10,
                                pmin=0.05)
    assert repr(
        res
    ) == "<t_contrast_rel 'uts', 'A', 'a1>a0', match='rm', samples=10, pmin=0.05, 7 clusters, p < .001>"
    res_ = testnd.ttest_rel('uts', 'A', 'a1', 'a0', 'rm', ds=ds)
    assert_array_equal(res.t.x, res_.t.x)

    # complex contrast
    res = testnd.t_contrast_rel('uts',
                                'A%B',
                                'min(a0|b0>a1|b0, a0|b1>a1|b1)',
                                'rm',
                                ds=ds,
                                samples=10,
                                pmin=0.05)
    res_b0 = testnd.ttest_rel('uts',
                              'A%B', ('a0', 'b0'), ('a1', 'b0'),
                              'rm',
                              ds=ds)
    res_b1 = testnd.ttest_rel('uts',
                              'A%B', ('a0', 'b1'), ('a1', 'b1'),
                              'rm',
                              ds=ds)
    assert_array_equal(res.t.x, np.min([res_b0.t.x, res_b1.t.x], axis=0))

    # persistence
    string = pickle.dumps(res, protocol=pickle.HIGHEST_PROTOCOL)
    res_ = pickle.loads(string)
    assert repr(res_) == repr(res)
    assert_dataobj_equal(res.p, res_.p)

    # contrast with "*"
    res = testnd.t_contrast_rel('uts',
                                'A%B',
                                'min(a1|b0>a0|b0, a1|b1>a0|b1)',
                                'rm',
                                ds=ds,
                                tail=1,
                                samples=0)

    # zero variance
    ds['uts'].x[:, 10] = 0.
    with pytest.raises(ZeroVariance):
        testnd.t_contrast_rel('uts',
                              'A%B',
                              'min(a1|b0>a0|b0, a1|b1>a0|b1)',
                              'rm',
                              tail=1,
                              ds=ds,
                              samples=0)
Example #21
0
def test_plot_array():
    "Test plot.Array"
    ds = datasets.get_uts(utsnd=True)
    p = plot.Array('utsnd', 'A%B', ds=ds, show=False)
    p.close()
    p = plot.Array('utsnd', ds=ds, show=False)
    p.close()
Example #22
0
def test_concatenate():
    """Test concatenate()

    Concatenation of SourceSpace is tested in .test_mne.test_source_estimate
    """
    ds = datasets.get_uts(True)

    v0 = ds[0, 'utsnd']
    v1 = ds[1, 'utsnd']
    vc = concatenate((v1, v0))
    assert_array_equal(vc.sub(time=(0, 1)).x, v1.x)
    assert_array_equal(vc.sub(time=(1, 2)).x, v0.x)
    assert_array_equal(vc.info, ds['utsnd'].info)

    # scalar
    psd = psd_welch(ds['utsnd'], n_fft=100)
    v0 = psd.sub(frequency=(None, 5))
    v1 = psd.sub(frequency=(45, None))
    conc = concatenate((v0, v1), 'frequency')
    assert_array_equal(conc.frequency.values[:5], psd.frequency.values[:5])
    assert_array_equal(conc.frequency.values[5:], psd.frequency.values[45:])
    conc_data = conc.get_data(v1.dimnames)
    assert_array_equal(conc_data[:, :, 5:], v1.x)

    # cat
    x = get_ndvar(2, frequency=0, cat=4)
    x_re = concatenate([x.sub(cat=(None, 'c')), x.sub(cat=('c', None))], 'cat')
    assert_dataobj_equal(x_re, x)
def test_boosting_epochs():
    """Test boosting with epoched data"""
    ds = datasets.get_uts(True, vector3d=True)
    p1 = epoch_impulse_predictor('uts', 'A=="a1"', name='a1', ds=ds)
    p0 = epoch_impulse_predictor('uts', 'A=="a0"', name='a0', ds=ds)
    p1 = p1.smooth('time', .05, 'hamming')
    p0 = p0.smooth('time', .05, 'hamming')
    # 1d
    for tstart, basis in product((-0.1, 0.1, 0), (0, 0.05)):
        print(f"tstart={tstart}, basis={basis}")
        res = boosting('uts', [p0, p1], tstart, 0.6, model='A', ds=ds, basis=basis, partitions=10, debug=True)
        y = convolve(res.h_scaled, [p0, p1])
        assert correlation_coefficient(y, res.y_pred) > .999
        r = correlation_coefficient(y, ds['uts'])
        assert res.r == approx(r, abs=1e-3)
        assert res.partitions == 10
    # 2d
    res = boosting('utsnd', [p0, p1], 0, 0.6, model='A', ds=ds, partitions=10)
    assert len(res.h) == 2
    assert res.h[0].shape == (5, 60)
    assert res.h[1].shape == (5, 60)
    y = convolve(res.h_scaled, [p0, p1])
    r = correlation_coefficient(y, ds['utsnd'], ('case', 'time'))
    assert_dataobj_equal(res.r, r, decimal=3, name=False)
    # vector
    res = boosting('v3d', [p0, p1], 0, 0.6, error='l1', model='A', ds=ds, partitions=10)
    assert res.residual.ndim == 0
Example #24
0
def test_uts():
    "test plot.UTS plotting function"
    ds = datasets.get_uts()
    x_long = set_tmin(concatenate(ds[:10, 'uts']), -1)

    p = plot.UTS('uts', ds=ds)
    p.close()
    p = plot.UTS('uts', 'A%B', ds=ds)
    p.set_ylim(1)
    p.set_ylim(0, 1)
    assert p.get_ylim() == (0, 1)
    p.set_ylim(1, -1)
    assert p.get_ylim() == (1, -1)
    p.close()

    p = plot.UTS(x_long, h=2, w=5, xlim=2)
    assert p.get_xlim() == (-1, 1)
    p.set_xlim(2, 4)
    assert p.get_xlim() == (2, 4)
    p.close()

    # multiple y with xax
    y1 = ds.eval("uts[(A == 'a1') & (B == 'b1')]")
    y1.name='y'
    y2 = ds.eval("uts[(A == 'a0') & (B == 'b1')]")
    y2.name='y2'
    rm = ds.eval("rm[(A == 'a0') & (B == 'b1')]")
    p = plot.UTS(y1, rm)
    p.close()
    p = plot.UTS([y1, y2], rm)
    p.close()
Example #25
0
def test_melt_ndvar():
    "Test table.melt_ndvar()"
    ds = datasets.get_uts(True)
    ds = ds.sub("A == 'a1'")

    lds = table.melt_ndvar("uts", ds=ds)
    ok_("time" in lds)
    ok_(isvar(lds["time"]))
    eq_(set(lds["time"].x), set(ds["uts"].time.x))

    # no ds
    lds2 = table.melt_ndvar(ds["uts"])
    assert_dataobj_equal(lds2["uts"], lds["uts"])

    # sensor
    lds = table.melt_ndvar("utsnd.summary(time=(0.1, 0.2))", ds=ds, varname="summary")
    eq_(set(lds["sensor"].cells), set(ds["utsnd"].sensor.names))

    # NDVar out
    lds = table.melt_ndvar("utsnd", "sensor", ds=ds)
    ok_("utsnd" in lds)
    ok_(isndvar(lds["utsnd"]))
    assert_dataobj_equal(lds[: ds.n_cases, "utsnd"], ds.eval("utsnd.sub(sensor='0')"))

    # more than one dimensions
    assert_raises(ValueError, table.melt_ndvar, "utsnd", ds=ds)
Example #26
0
def test_uts():
    "test plot.UTS plotting function"
    ds = datasets.get_uts()
    p = plot.UTS('uts', ds=ds, show=False)
    p.close()
    p = plot.UTS('uts', 'A%B', ds=ds, show=False)
    p.close()
Example #27
0
def test_plot_results():
    "Test plotting test results"
    ds = datasets.get_uts(True)

    # ANOVA
    res = testnd.anova('utsnd',
                       'A*B*rm',
                       match='rm',
                       ds=ds,
                       samples=0,
                       pmin=0.05)
    p = plot.Array(res, show=False)
    p.close()
    res = testnd.anova('utsnd',
                       'A*B*rm',
                       match='rm',
                       ds=ds,
                       samples=2,
                       pmin=0.05)
    p = plot.Array(res, show=False)
    p.close()

    # Correlation
    res = testnd.corr('utsnd', 'Y', 'rm', ds=ds)
    p = plot.Array(res, show=False)
    p.close()
    res = testnd.corr('utsnd', 'Y', 'rm', ds=ds, samples=10, pmin=0.05)
    p = plot.Array(res, show=False)
    p.close()
Example #28
0
def test_melt_ndvar():
    "Test table.melt_ndvar()"
    ds = datasets.get_uts(True)
    ds = ds.sub("A == 'a1'")

    lds = table.melt_ndvar('uts', ds=ds)
    ok_('time' in lds)
    ok_(isvar(lds['time']))
    eq_(set(lds['time'].x), set(ds['uts'].time.x))

    # no ds
    lds2 = table.melt_ndvar(ds['uts'])
    assert_dataobj_equal(lds2['uts'], lds['uts'])

    # sensor
    lds = table.melt_ndvar("utsnd.summary(time=(0.1, 0.2))", ds=ds, varname='summary')
    eq_(set(lds['sensor'].cells), set(ds['utsnd'].sensor.names))

    # NDVar out
    lds = table.melt_ndvar("utsnd", 'sensor', ds=ds)
    ok_('utsnd' in lds)
    ok_(isndvar(lds['utsnd']))
    assert_dataobj_equal(lds[:ds.n_cases, 'utsnd'], ds.eval("utsnd.sub(sensor='0')"))

    # more than one dimensions
    assert_raises(ValueError, table.melt_ndvar, 'utsnd', ds=ds)
Example #29
0
def test_melt_ndvar():
    "Test table.melt_ndvar()"
    ds = datasets.get_uts(True)
    ds = ds.sub("A == 'a1'")

    lds = table.melt_ndvar('uts', ds=ds)
    assert 'time' in lds
    assert isinstance(lds['time'], Var)
    assert_array_equal(np.unique(lds['time'].x), ds['uts'].time)

    # no ds
    lds2 = table.melt_ndvar(ds['uts'])
    assert_dataobj_equal(lds2['uts'], lds['uts'])

    # sensor
    lds = table.melt_ndvar("utsnd.summary(time=(0.1, 0.2))", ds=ds, varname='summary')
    assert set(lds['sensor'].cells) == set(ds['utsnd'].sensor.names)

    # NDVar out
    lds = table.melt_ndvar("utsnd", 'sensor', ds=ds)
    assert 'utsnd' in lds
    assert isinstance(lds['utsnd'], NDVar)
    assert_dataobj_equal(lds[:ds.n_cases, 'utsnd'], ds.eval("utsnd.sub(sensor='0')"))

    # more than one dimensions
    with pytest.raises(ValueError):
        table.melt_ndvar('utsnd', ds=ds)
Example #30
0
def test_plot_butterfly():
    "Test plot.Butterfly"
    ds = datasets.get_uts(utsnd=True)
    p = plot.Butterfly('utsnd', ds=ds, show=False)
    p.close()
    p = plot.Butterfly('utsnd', 'A%B', ds=ds, show=False)
    p.close()
Example #31
0
def test_ttest_ind():
    "Test testnd.ttest_ind()"
    ds = datasets.get_uts(True)

    # basic
    res = testnd.ttest_ind('uts', 'A', 'a1', 'a0', ds=ds)
    repr(res)
    assert_less(res.p_uncorrected.min(), 0.05)
    # persistence
    string = pickle.dumps(res, pickle.HIGHEST_PROTOCOL)
    res_ = pickle.loads(string)
    repr(res_)
    assert_dataobj_equal(res.p_uncorrected, res_.p_uncorrected)

    # cluster
    res = testnd.ttest_ind('uts', 'A', 'a1', 'a0', ds=ds, tail=1, samples=1)
    # persistence
    string = pickle.dumps(res, pickle.HIGHEST_PROTOCOL)
    res_ = pickle.loads(string)
    assert_equal(repr(res_), repr(res))
    assert_dataobj_equal(res.p_uncorrected, res_.p_uncorrected)

    # nd
    res = testnd.ttest_ind('utsnd',
                           'A',
                           'a1',
                           'a0',
                           ds=ds,
                           pmin=0.05,
                           samples=2)
    eq_(res._cdist.n_clusters, 10)
Example #32
0
def test_t_contrast():
    ds = datasets.get_uts()

    # simple contrast
    res = testnd.t_contrast_rel('uts', 'A', 'a1>a0', 'rm', ds=ds, samples=10, pmin=0.05)
    assert repr(res) == "<t_contrast_rel 'uts', 'A', 'a1>a0', match='rm', samples=10, pmin=0.05, 7 clusters, p < .001>"
    res_ = testnd.ttest_rel('uts', 'A', 'a1', 'a0', 'rm', ds=ds)
    assert_array_equal(res.t.x, res_.t.x)

    # complex contrast
    res = testnd.t_contrast_rel('uts', 'A%B', 'min(a0|b0>a1|b0, a0|b1>a1|b1)', 'rm', ds=ds, samples=10, pmin=0.05)
    res_b0 = testnd.ttest_rel('uts', 'A%B', ('a0', 'b0'), ('a1', 'b0'), 'rm', ds=ds)
    res_b1 = testnd.ttest_rel('uts', 'A%B', ('a0', 'b1'), ('a1', 'b1'), 'rm', ds=ds)
    assert_array_equal(res.t.x, np.min([res_b0.t.x, res_b1.t.x], axis=0))

    # persistence
    string = pickle.dumps(res, protocol=pickle.HIGHEST_PROTOCOL)
    res_ = pickle.loads(string)
    assert repr(res_) == repr(res)
    assert_dataobj_equal(res.p, res_.p)

    # contrast with "*"
    res = testnd.t_contrast_rel('uts', 'A%B', 'min(a1|b0>a0|b0, a1|b1>a0|b1)',
                                'rm', ds=ds, tail=1)

    # zero variance
    ds['uts'].x[:, 10] = 0.
    with pytest.raises(ZeroVariance):
        testnd.t_contrast_rel('uts', 'A%B', 'min(a1|b0>a0|b0, a1|b1>a0|b1)', 'rm', tail=1, ds=ds)
Example #33
0
def test_pickle():
    ds = datasets.get_uts()

    ds_2 = load.unpickle(file_path('uts-py2.pickle'))
    assert_dataobj_equal(ds_2, ds)
    ds_3 = load.unpickle(file_path('uts-py3.pickle'))
    assert_dataobj_equal(ds_3, ds)
Example #34
0
def test_plot_array():
    "Test plot.Array"
    ds = datasets.get_uts(utsnd=True)
    p = plot.Array('utsnd', 'A%B', ds=ds, show=False)
    p.close()
    p = plot.Array('utsnd', ds=ds, show=False)
    p.close()
Example #35
0
def test_corr():
    "Test stats.corr"
    ds = datasets.get_uts()
    y = ds.eval("uts.x[:,:3]")
    x = ds.eval('Y.x')
    n_cases = len(y)
    df = n_cases - 2

    corr = stats.corr(y, x)
    p = stats.rtest_p(corr, df)
    for i in range(len(corr)):
        r_sp, p_sp = scipy.stats.pearsonr(y[:, i], x)
        assert_almost_equal(corr[i], r_sp)
        assert_almost_equal(p[i], p_sp)

    # NaN
    with warnings.catch_warnings():  # divide by 0
        warnings.simplefilter("ignore")
        eq_(stats.corr(np.arange(10), np.zeros(10)), 0)

    # perm
    y_perm = np.empty_like(y)
    for perm in permute_order(n_cases, 2):
        y_perm[perm] = y
        stats.corr(y, x, corr, perm)
        for i in range(len(corr)):
            r_sp, _ = scipy.stats.pearsonr(y_perm[:, i], x)
            assert_almost_equal(corr[i], r_sp)
Example #36
0
def test_melt_ndvar():
    "Test table.melt_ndvar()"
    ds = datasets.get_uts(True)
    ds = ds.sub("A == 'a1'")

    lds = table.melt_ndvar('uts', ds=ds)
    assert 'time' in lds
    assert isinstance(lds['time'], Var)
    assert_array_equal(np.unique(lds['time'].x), ds['uts'].time)

    # no ds
    lds2 = table.melt_ndvar(ds['uts'])
    assert_dataobj_equal(lds2['uts'], lds['uts'])

    # sensor
    lds = table.melt_ndvar("utsnd.summary(time=(0.1, 0.2))", ds=ds, varname='summary')
    assert set(lds['sensor'].cells) == set(ds['utsnd'].sensor.names)

    # NDVar out
    lds = table.melt_ndvar("utsnd", 'sensor', ds=ds)
    assert 'utsnd' in lds
    assert isinstance(lds['utsnd'], NDVar)
    assert_dataobj_equal(lds[:ds.n_cases, 'utsnd'], ds.eval("utsnd.sub(sensor='0')"))

    # more than one dimensions
    with pytest.raises(ValueError):
        table.melt_ndvar('utsnd', ds=ds)
Example #37
0
def test_time_slicer():
    "Test linked time axes"
    ds = datasets.get_uts(True)

    p1 = plot.Butterfly(ds['utsnd'], show=False)
    p2 = plot.Array('utsnd', 'A', ds=ds, show=False)
    p1.link_time_axis(p2)

    p1._set_time(.1, True)
    eq_(p2._current_time, .1)
    eq_(p2._time_fixed, True)
    p2._set_time(.2)
    eq_(p1._current_time, .2)
    eq_(p1._time_fixed, False)

    p1 = plot.TopoButterfly(ds['utsnd'], show=False)
    p2 = plot.Array('utsnd', 'A', ds=ds, show=False)
    p2.link_time_axis(p1)

    p1._set_time(.1, True)
    eq_(p2._current_time, .1)
    eq_(p2._time_fixed, True)
    p2._set_time(.2)
    eq_(p1._current_time, .2)
    eq_(p1._time_fixed, False)
Example #38
0
def test_corr():
    "Test testnd.corr()"
    plot.configure_backend(False, False)
    ds = datasets.get_uts(True)

    # add correlation
    Y = ds['Y']
    utsnd = ds['utsnd']
    utsnd.x.shape
    utsnd.x[:, 3:5, 50:65] += Y.x[:, None, None]

    res = testnd.corr('utsnd', 'Y', 'rm', ds=ds)
    repr(res)
    p = plot.Array(res)
    p.close()

    res = testnd.corr('utsnd', 'Y', 'rm', ds=ds, samples=10, pmin=0.05)
    p = plot.Array(res)
    p.close()

    # persistence
    string = pickle.dumps(res, protocol=pickle.HIGHEST_PROTOCOL)
    res_ = pickle.loads(string)
    assert_equal(repr(res_), repr(res))
    assert_dataobj_equal(res.p_uncorrected, res_.p_uncorrected)
    assert_dataobj_equal(res.p, res_.p)

    # NaN
    r = _testnd._corr(np.arange(10), np.zeros(10))
    assert_equal(r, 0)
Example #39
0
def test_plot_array():
    "Test plot.TopoArray"
    ds = datasets.get_uts(utsnd=True)
    p = plot.TopoArray('utsnd', ds=ds)
    assert repr(p) == "<TopoArray: utsnd>"
    p.set_topo_t(0, 0.2)
    p.close()
    p = plot.TopoArray('utsnd', ds=ds, vmax=0.2, w=2)
    p.close()
    p = plot.TopoArray('utsnd', 'A%B', ds=ds, axw=4)
    assert repr(p) == "<TopoArray: utsnd ~ A x B>"
    p.close()

    # results
    res = testnd.ttest_ind('utsnd',
                           'A',
                           ds=ds,
                           pmin=0.05,
                           tstart=0.1,
                           tstop=0.3,
                           samples=2)
    p = plot.TopoArray(res)
    assert repr(p) == "<TopoArray: a0, a1, a0 - a1>"
    p.set_topo_t(0, 0.)
    p.close()
Example #40
0
def test_lmfitter():
    "Test the _nd_anova class"
    ds = datasets.get_uts()

    # independent, residuals vs. Hopkins
    y = ds['uts'].x

    x = ds.eval("A * B")
    lm = glm._nd_anova(x)
    f_maps = lm.map(y)
    p_maps = lm.p_maps(f_maps)

    x_full = ds.eval("A * B + ind(A%B)")
    lm_full = glm._nd_anova(x_full)
    f_maps_full = lm_full.map(y)
    p_maps_full = lm_full.p_maps(f_maps)

    for f, f_full in izip(f_maps, f_maps_full):
        assert_allclose(f, f_full)
    for p, p_full in izip(p_maps, p_maps_full):
        assert_allclose(p, p_full)

    # repeated measures
    x = ds.eval("A * B * rm")
    lm = glm._nd_anova(x)
    f_maps = lm.map(y)
    p_maps = lm.p_maps(f_maps)

    aov = test.ANOVA(y[:, 0], x)
    for f_test, f_map, p_map in izip(aov.f_tests, f_maps, p_maps):
        assert_almost_equal(f_map[0], f_test.F)
        assert_almost_equal(p_map[0], f_test.p)
Example #41
0
def test_clusters():
    "test plot.uts cluster plotting functions"
    ds = datasets.get_uts()

    A = ds['A']
    B = ds['B']
    Y = ds['uts']

    # fixed effects model
    res = testnd.anova(Y, A * B)
    p = plot.UTSClusters(res, title="Fixed Effects Model", show=False)
    p.close()

    # random effects model:
    subject = Factor(range(15), tile=4, random=True, name='subject')
    res = testnd.anova(Y, A * B * subject, samples=2)
    p = plot.UTSClusters(res, title="Random Effects Model", show=False)
    p.close()

    # plot UTSStat
    p = plot.UTSStat(Y, A % B, match=subject, show=False)
    p.set_clusters(res.clusters)
    p.close()
    p = plot.UTSStat(Y, A, Xax=B, match=subject, show=False)
    p.close()
Example #42
0
def test_ttest_ind():
    "Test testnd.ttest_ind()"
    ds = datasets.get_uts(True)

    # basic
    res = testnd.ttest_ind('uts', 'A', 'a1', 'a0', ds=ds)
    eq_(repr(res), "<ttest_ind 'uts', 'A', 'a1' (n=30), 'a0' (n=30)>")
    assert_less(res.p_uncorrected.min(), 0.05)
    # persistence
    string = pickle.dumps(res, pickle.HIGHEST_PROTOCOL)
    res_ = pickle.loads(string)
    eq_(repr(res_), "<ttest_ind 'uts', 'A', 'a1' (n=30), 'a0' (n=30)>")
    assert_dataobj_equal(res.p_uncorrected, res_.p_uncorrected)

    # cluster
    res = testnd.ttest_ind('uts', 'A', 'a1', 'a0', ds=ds, tail=1, samples=1)
    # persistence
    string = pickle.dumps(res, pickle.HIGHEST_PROTOCOL)
    res_ = pickle.loads(string)
    assert_equal(repr(res_), repr(res))
    assert_dataobj_equal(res.p_uncorrected, res_.p_uncorrected)

    # nd
    res = testnd.ttest_ind('utsnd',
                           'A',
                           'a1',
                           'a0',
                           ds=ds,
                           pmin=0.05,
                           samples=2)
    eq_(res._cdist.n_clusters, 10)

    # zero variance
    ds['utsnd'].x[:, 1, 10] = 0.
    assert_raises(ZeroVariance, testnd.ttest_ind, 'utsnd', 'A', ds=ds)
Example #43
0
def test_ttest_ind():
    "Test testnd.ttest_ind()"
    ds = datasets.get_uts(True)

    # basic
    res = testnd.ttest_ind('uts', 'A', 'a1', 'a0', ds=ds)
    repr(res)
    assert_less(res.p_uncorrected.min(), 0.05)
    # persistence
    string = pickle.dumps(res, pickle.HIGHEST_PROTOCOL)
    res_ = pickle.loads(string)
    repr(res_)
    assert_dataobj_equal(res.p_uncorrected, res_.p_uncorrected)

    # cluster
    res = testnd.ttest_ind('uts', 'A', 'a1', 'a0', ds=ds, tail=1, samples=1)
    # persistence
    string = pickle.dumps(res, pickle.HIGHEST_PROTOCOL)
    res_ = pickle.loads(string)
    assert_equal(repr(res_), repr(res))
    assert_dataobj_equal(res.p_uncorrected, res_.p_uncorrected)

    # nd
    res = testnd.ttest_ind('utsnd', 'A', 'a1', 'a0', ds=ds, pmin=0.05, samples=2)
    eq_(res._cdist.n_clusters, 10)
Example #44
0
def test_aggregate():
    "Test aggregation methods"
    ds = datasets.get_uts()

    # don't handle inconsistencies silently
    assert_raises(ValueError, ds.aggregate, 'A%B')

    dsa = ds.aggregate('A%B', drop_bad=True)
    assert_array_equal(dsa['n'], [15, 15, 15, 15])
    idx1 = ds.eval("logical_and(A=='a0', B=='b0')")
    eq_(dsa['Y', 0], ds['Y', idx1].mean())

    # unequal cell counts
    ds = ds[:-3]
    dsa = ds.aggregate('A%B', drop_bad=True)
    assert_array_equal(dsa['n'], [15, 15, 15, 12])
    idx1 = ds.eval("logical_and(A=='a0', B=='b0')")
    eq_(dsa['Y', 0], ds['Y', idx1].mean())

    # equalize count
    dsa = ds.aggregate('A%B', drop_bad=True, equal_count=True)
    assert_array_equal(dsa['n'], [12, 12, 12, 12])
    idx1_12 = np.logical_and(idx1, idx1.cumsum() <= 12)
    eq_(dsa['Y', 0], ds['Y', idx1_12].mean())

    # equalize count with empty cell
    sds = ds.sub("logical_or(A == 'a1', B == 'b1')")
    dsa = sds.aggregate('A%B', drop_bad=True, equal_count=True)
    assert_array_equal(dsa['n'], [12, 12, 12])
Example #45
0
def test_lmfitter():
    "Test the _nd_anova class"
    ds = datasets.get_uts()

    # independent, residuals vs. Hopkins
    y = ds['uts'].x

    x = ds.eval("A * B")
    lm = glm._nd_anova(x)
    f_maps = lm.map(y)
    p_maps = lm.p_maps(f_maps)

    x_full = ds.eval("A * B + ind(A%B)")
    lm_full = glm._nd_anova(x_full)
    assert isinstance(lm_full, glm._BalancedMixedNDANOVA)
    f_maps_full = lm_full.map(y)
    p_maps_full = lm_full.p_maps(f_maps)

    for f, f_full in zip(f_maps, f_maps_full):
        assert_allclose(f, f_full)
    for p, p_full in zip(p_maps, p_maps_full):
        assert_allclose(p, p_full)

    # repeated measures
    x = ds.eval("A * B * rm")
    lm = glm._nd_anova(x)
    f_maps = lm.map(y)
    p_maps = lm.p_maps(f_maps)

    aov = test.ANOVA(y[:, 0], x)
    for f_test, f_map, p_map in zip(aov.f_tests, f_maps, p_maps):
        assert f_map[0] == pytest.approx(f_test.F)
        assert p_map[0] == pytest.approx(f_test.p)
Example #46
0
def test_t_contrast():
    ds = datasets.get_uts()

    # simple contrast
    res = testnd.t_contrast_rel('uts', 'A', 'a1>a0', 'rm', ds=ds, samples=10,
                                pmin=0.05)
    repr(res)
    res_ = testnd.ttest_rel('uts', 'A', 'a1', 'a0', 'rm', ds=ds)
    assert_array_equal(res.t.x, res_.t.x)
    assert_in('samples', repr(res))

    # complex contrast
    res = testnd.t_contrast_rel('uts', 'A%B', 'min(a0|b0>a1|b0, a0|b1>a1|b1)',
                                'rm', ds=ds, samples=10, pmin=0.05)
    res_b0 = testnd.ttest_rel('uts', 'A%B', ('a0', 'b0'), ('a1', 'b0'), 'rm',
                              ds=ds)
    res_b1 = testnd.ttest_rel('uts', 'A%B', ('a0', 'b1'), ('a1', 'b1'), 'rm',
                              ds=ds)
    assert_array_equal(res.t.x, np.min([res_b0.t.x, res_b1.t.x], axis=0))

    # persistence
    string = pickle.dumps(res, protocol=pickle.HIGHEST_PROTOCOL)
    res_ = pickle.loads(string)
    assert_equal(repr(res_), repr(res))
    assert_dataobj_equal(res.p, res_.p)

    # contrast with "*"
    res = testnd.t_contrast_rel('uts', 'A%B', 'min(a1|b0>a0|b0, a1|b1>a0|b1)',
                                'rm', ds=ds, tail=1)
Example #47
0
def test_ndvar_timeseries_methods():
    "Test NDVar time-series methods"
    ds = datasets.get_uts(True)
    x = ds['utsnd']
    xs = NDVar(x.x.swapaxes(1, 2), ('case', x.dims[2], x.dims[1]),
               x.info.copy(), x.name)

    # envelope
    env = x.envelope()
    assert_array_equal(env.x >= 0, True)
    envs = xs.envelope()
    assert_array_equal(env.x, envs.x.swapaxes(1,2))

    # indexing
    eq_(len(ds[0, 'uts'][0.01:0.1].time), 9)

    # FFT
    x = ds['uts'].mean('case')
    np.sin(2 * np.pi * x.time.times, x.x)
    f = x.fft()
    assert_array_almost_equal(f.x, (f.frequency.x == 1) * (len(f) - 1))
    np.sin(4 * np.pi * x.time.times, x.x)
    f = x.fft()
    assert_array_almost_equal(f.x, (f.frequency.x == 2) * (len(f) - 1))

    # update tmin
    eq_(x.time.times[0], -0.2)
    x.time.set_tmin(3.2)
    eq_(x.time.times[0], 3.2)
Example #48
0
def test_random_lm():
    np.random.seed(0)

    ds = datasets.get_uts()
    lms_dummy = []
    lms_effect = []
    for i in range(5):
        ds['uts'].x += np.random.normal(0, 2, ds['uts'].shape)
        lms_dummy.append(LM('uts', 'A*B*Y', ds))
        lms_effect.append(LM('uts', 'A*B*Y', ds, 'effect'))

    # dummy coding
    rlm = LMGroup(lms_dummy)
    assert repr(
        rlm
    ) == '<LMGroup: uts ~ A + B + A x B + Y + A x Y + B x Y + A x B x Y, n=5>'
    # coefficients
    ds = rlm.coefficients_dataset(('A', 'A x B'))
    assert ds['term'].cells == ('A', 'A x B')
    # tests
    res = rlm.column_ttest('A x B', samples=100, pmin=0.05, mintime=0.025)
    assert res.clusters.n_cases == 2

    # effect coding
    rlm = LMGroup(lms_effect)
    res = rlm.column_ttest('A x B', samples=100, pmin=0.05, mintime=0.025)
    assert res.clusters.n_cases == 5
    # persistence
    rlm_p = pickle.loads(pickle.dumps(rlm, pickle.HIGHEST_PROTOCOL))
    assert rlm_p.dims == rlm.dims
Example #49
0
def test_time_slicer():
    "Test linked time axes"
    ds = datasets.get_uts(True)

    p1 = plot.Butterfly(ds['utsnd'])
    p2 = plot.Array('utsnd', 'A', ds=ds)
    p1.link_time_axis(p2)

    p1._set_time(.1, True)
    assert p2._current_time == .1
    assert p2._time_fixed == True
    p2._set_time(.2)
    assert p1._current_time == .2
    assert p1._time_fixed == False

    p1 = plot.TopoButterfly(ds['utsnd'])
    p2 = plot.Array('utsnd', 'A', ds=ds)
    p2.link_time_axis(p1)

    p1._set_time(.1, True)
    assert p2._current_time == .1
    assert p2._time_fixed == True

    # merge another
    p3 = plot.TopoButterfly(ds[0, 'utsnd'])
    p3.link_time_axis(p2)

    p2._set_time(.2)
    assert p1._current_time == .2
    assert p1._time_fixed == False
Example #50
0
def test_ttest_rel():
    "Test testnd.ttest_rel()"
    ds = datasets.get_uts(True)

    # basic
    res = testnd.ttest_rel('uts', 'A%B', ('a1', 'b1'), ('a0', 'b0'), 'rm',
                           ds=ds, samples=100)
    eq_(repr(res), "<ttest_rel 'uts', 'A x B', ('a1', 'b1'), ('a0', 'b0'), "
                   "'rm' (n=15), samples=100, p >= 0.000>")

    # persistence
    string = pickle.dumps(res, pickle.HIGHEST_PROTOCOL)
    res_ = pickle.loads(string)
    repr(res_)
    assert_equal(repr(res_), repr(res))
    assert_dataobj_equal(res.p_uncorrected, res_.p_uncorrected)

    # collapsing cells
    res2 = testnd.ttest_rel('uts', 'A', 'a1', 'a0', 'rm', ds=ds)
    assert_less(res2.p_uncorrected.min(), 0.05)
    assert_equal(res2.n, res.n)

    # reproducibility
    res3 = testnd.ttest_rel('uts', 'A%B', ('a1', 'b1'), ('a0', 'b0'), 'rm',
                            ds=ds, samples=100)
    assert_dataset_equal(res3.find_clusters(maps=True), res.clusters)
    testnd.configure(0)
    res4 = testnd.ttest_rel('uts', 'A%B', ('a1', 'b1'), ('a0', 'b0'), 'rm',
                            ds=ds, samples=100)
    assert_dataset_equal(res4.find_clusters(maps=True), res.clusters)
    testnd.configure(-1)
    sds = ds.sub("B=='b0'")
    # thresholded, UTS
    testnd.configure(0)
    res0 = testnd.ttest_rel('uts', 'A', 'a1', 'a0', 'rm', ds=sds, pmin=0.1,
                            samples=100)
    tgt = res0.find_clusters()
    testnd.configure(-1)
    res1 = testnd.ttest_rel('uts', 'A', 'a1', 'a0', 'rm', ds=sds, pmin=0.1,
                            samples=100)
    assert_dataset_equal(res1.find_clusters(), tgt)
    # thresholded, UTSND
    testnd.configure(0)
    res0 = testnd.ttest_rel('utsnd', 'A', 'a1', 'a0', 'rm', ds=sds, pmin=0.1,
                            samples=100)
    tgt = res0.find_clusters()
    testnd.configure(-1)
    res1 = testnd.ttest_rel('utsnd', 'A', 'a1', 'a0', 'rm', ds=sds, pmin=0.1,
                            samples=100)
    assert_dataset_equal(res1.find_clusters(), tgt)
    # TFCE, UTS
    testnd.configure(0)
    res0 = testnd.ttest_rel('uts', 'A', 'a1', 'a0', 'rm', ds=sds, tfce=True,
                            samples=10)
    tgt = res0.compute_probability_map()
    testnd.configure(-1)
    res1 = testnd.ttest_rel('uts', 'A', 'a1', 'a0', 'rm', ds=sds, tfce=True,
                            samples=10)
    assert_dataobj_equal(res1.compute_probability_map(), tgt)
Example #51
0
def test_combine():
    "Test combine()"
    ds1 = datasets.get_uts()
    ds2 = datasets.get_uts()
    n = ds1.n_cases
    ds = combine((ds1, ds2))
    assert_array_equal(ds2['Y'].x, ds['Y'].x[n:])

    # combine Datasets with unequal keys
    del ds1['Y']
    # raise
    assert_raises(KeyError, combine, (ds1, ds2))
    assert_raises(KeyError, combine, (ds2, ds1))
    # drop
    del ds2['YCat']
    ds = combine((ds1, ds2), incomplete='drop')
    ok_('Y' not in ds)
    ok_('YCat' not in ds)
    # fill in
    ds = combine((ds1, ds2), incomplete='fill in')
    assert_array_equal(ds['Y'].x[n:], ds2['Y'].x)
    assert_array_equal(np.isnan(ds['Y'].x[:n]), True)
    assert_array_equal(ds['YCat'][:n], ds1['YCat'])
    assert_array_equal(ds['YCat'][n:], '')

    # invalid input
    assert_raises(ValueError, combine, ())
    assert_raises(TypeError, combine, (ds2['A'], ds2['Y']))

    # combine NDVar with unequel dimensions
    ds = datasets.get_uts(utsnd=True)
    y = ds['utsnd']
    y1 = y.sub(sensor=['0', '1', '2', '3'])
    y2 = y.sub(sensor=['1', '2', '3', '4'])
    ds1 = Dataset((y1,), info={'a': np.arange(2), 'b': [np.arange(2)]})
    ds2 = Dataset((y2,), info={'a': np.arange(2), 'b': [np.arange(2)]})
    dsc = combine((ds1, ds2))
    y = dsc['utsnd']
    eq_(list(y.sensor.names), ['1', '2', '3'], "Sensor dimension intersection")
    dims = ('case', 'sensor', 'time')
    ref = np.concatenate((y1.get_data(dims)[:, 1:], y2.get_data(dims)[:, :3]))
    assert_array_equal(y.get_data(dims), ref, "combine utsnd")
    # info
    assert_array_equal(dsc.info['a'], np.arange(2))
    eq_(len(dsc.info['b']), 1)
    assert_array_equal(dsc.info['b'][0], np.arange(2))
Example #52
0
def test_cwt():
    "Test tests with wavelet transform"
    ds = datasets.get_uts(True)
    ds['cwt'] = cwt_morlet(ds['utsnd'], np.arange(10, 20))
    res = testnd.ttest_rel('cwt', 'A', match='rm', ds=ds, pmin=0.05, samples=10)
    cluster = res.clusters.sub("p == 0")
    assert_array_equal(cluster['frequency_min'], 10)
    assert_array_equal(cluster['frequency_max'], 19)
Example #53
0
def test_pickle():
    ds = datasets.get_uts()

    decimal = None if IS_OSX else 15
    ds_2 = load.unpickle(file_path('uts-py2.pickle'))
    assert_dataobj_equal(ds_2, ds, decimal)
    ds_3 = load.unpickle(file_path('uts-py3.pickle'))
    assert_dataobj_equal(ds_3, ds, decimal)
Example #54
0
def test_coercion():
    "Test data class coercion"
    ds = datasets.get_uts()
    ds['avar'] = Var.from_dict(ds['A'], {'a0': 0, 'a1': 1})

    assert_array_equal(assub("A == 'a0'", ds), ds['A'] == 'a0')
    assert_array_equal(assub("avar == 0", ds), ds['avar'] == 0)
    assert_raises(TypeError, assub, "avar == '0'", ds)
Example #55
0
def test_combine():
    "Test combine()"
    ds1 = datasets.get_uts()
    ds2 = datasets.get_uts()
    n = ds1.n_cases
    ds = combine((ds1, ds2))
    assert_array_equal(ds2['Y'].x, ds['Y'].x[n:])

    # combine Datasets with unequal keys
    del ds1['Y']
    # raise
    assert_raises(KeyError, combine, (ds1, ds2))
    assert_raises(KeyError, combine, (ds2, ds1))
    # drop
    del ds2['YCat']
    ds = combine((ds1, ds2), incomplete='drop')
    ok_('Y' not in ds)
    ok_('YCat' not in ds)
    # fill in
    ds = combine((ds1, ds2), incomplete='fill in')
    assert_array_equal(ds['Y'].x[n:], ds2['Y'].x)
    assert_array_equal(np.isnan(ds['Y'].x[:n]), True)
    assert_array_equal(ds['YCat'][:n], ds1['YCat'])
    assert_array_equal(ds['YCat'][n:], '')

    # invalid input
    assert_raises(ValueError, combine, ())
    assert_raises(TypeError, combine, (ds2['A'], ds2['Y']))

    # combine NDVar with unequel dimensions
    ds = datasets.get_uts(utsnd=True)
    y = ds['utsnd']
    y1 = y.sub(sensor=['0', '1', '2', '3'])
    y2 = y.sub(sensor=['1', '2', '3', '4'])
    ds1 = Dataset((y1,), info={'a': np.arange(2), 'b': [np.arange(2)]})
    ds2 = Dataset((y2,), info={'a': np.arange(2), 'b': [np.arange(2)]})
    dsc = combine((ds1, ds2))
    y = dsc['utsnd']
    eq_(list(y.sensor.names), ['1', '2', '3'], "Sensor dimension intersection")
    dims = ('case', 'sensor', 'time')
    ref = np.concatenate((y1.get_data(dims)[:, 1:], y2.get_data(dims)[:, :3]))
    assert_array_equal(y.get_data(dims), ref, "combine utsnd")
    # info
    assert_array_equal(dsc.info['a'], np.arange(2))
    eq_(len(dsc.info['b']), 1)
    assert_array_equal(dsc.info['b'][0], np.arange(2))
Example #56
0
def test_lm():
    ds = datasets.get_uts()
    model = ds.eval("A*B*Y")
    coeffs = ds['uts'].ols(model)

    lm = LM('uts', 'A*B*Y', ds, 'effect')
    for i, effect in enumerate(model.effects):
        assert_array_equal(lm._coefficient(effect.name), coeffs.x[i: i+1])
Example #57
0
def test_ttest_rel():
    "Test testnd.ttest_rel()"
    ds = datasets.get_uts(True)

    # basic
    res = testnd.ttest_rel('uts', 'A%B', ('a1', 'b1'), ('a0', 'b0'), 'rm',
                           ds=ds, samples=100)
    repr(res)

    # persistence
    string = pickle.dumps(res, pickle.HIGHEST_PROTOCOL)
    res_ = pickle.loads(string)
    repr(res_)
    assert_equal(repr(res_), repr(res))
    assert_dataobj_equal(res.p_uncorrected, res_.p_uncorrected)

    # collapsing cells
    res2 = testnd.ttest_rel('uts', 'A', 'a1', 'a0', 'rm', ds=ds)
    assert_less(res2.p_uncorrected.min(), 0.05)
    assert_equal(res2.n, res.n)

    # reproducibility
    res3 = testnd.ttest_rel('uts', 'A%B', ('a1', 'b1'), ('a0', 'b0'), 'rm',
                            ds=ds, samples=100)
    assert_dataset_equal(res3.find_clusters(maps=True), res.clusters)
    testnd.configure(0)
    res4 = testnd.ttest_rel('uts', 'A%B', ('a1', 'b1'), ('a0', 'b0'), 'rm',
                            ds=ds, samples=100)
    assert_dataset_equal(res4.find_clusters(maps=True), res.clusters)
    testnd.configure(-1)
    sds = ds.sub("B=='b0'")
    # thresholded, UTS
    testnd.configure(0)
    res0 = testnd.ttest_rel('uts', 'A', 'a1', 'a0', 'rm', ds=sds, pmin=0.1,
                            samples=100)
    tgt = res0.find_clusters()
    testnd.configure(-1)
    res1 = testnd.ttest_rel('uts', 'A', 'a1', 'a0', 'rm', ds=sds, pmin=0.1,
                            samples=100)
    assert_dataset_equal(res1.find_clusters(), tgt)
    # thresholded, UTSND
    testnd.configure(0)
    res0 = testnd.ttest_rel('utsnd', 'A', 'a1', 'a0', 'rm', ds=sds, pmin=0.1,
                            samples=100)
    tgt = res0.find_clusters()
    testnd.configure(-1)
    res1 = testnd.ttest_rel('utsnd', 'A', 'a1', 'a0', 'rm', ds=sds, pmin=0.1,
                            samples=100)
    assert_dataset_equal(res1.find_clusters(), tgt)
    # TFCE, UTS
    testnd.configure(0)
    res0 = testnd.ttest_rel('uts', 'A', 'a1', 'a0', 'rm', ds=sds, tfce=True,
                            samples=10)
    tgt = res0.compute_probability_map()
    testnd.configure(-1)
    res1 = testnd.ttest_rel('uts', 'A', 'a1', 'a0', 'rm', ds=sds, tfce=True,
                            samples=10)
    assert_dataobj_equal(res1.compute_probability_map(), tgt)
Example #58
0
def test_histogram():
    "Test plot.Histogram"
    ds = datasets.get_uts()
    plot.Histogram('Y', 'A%B', ds=ds)
    plot.Histogram('Y', 'A%B', match='rm', ds=ds)
    plot.Histogram('Y', 'A%B', match='rm', ds=ds, density=True)
    plot.Histogram('Y', 'A%B', ds=ds, test=True)
    plot.Histogram('Y', 'A%B', match='rm', ds=ds, test=True)
    plot.Histogram('Y', 'A%B', match='rm', ds=ds, density=True, test=True)
Example #59
0
def test_lm():
    ds = datasets.get_uts()
    model = ds.eval("A*B*Y")
    coeffs = ds['uts'].ols(model)

    lm = LM('uts', 'A*B*Y', ds, 'effect')
    eq_(repr(lm), "<LM: uts ~ A + B + A x B + Y + A x Y + B x Y + A x B x Y>")
    for i, effect in enumerate(model.effects):
        assert_array_equal(lm.coefficient(effect.name).x, coeffs.x[i])
Example #60
0
def test_plot_array():
    "Test plot.TopoArray"
    ds = datasets.get_uts(utsnd=True)
    p = plot.TopoArray("utsnd", ds=ds, show=False)
    p.close()
    p = plot.TopoArray("utsnd", ds=ds, vmax=0.2, w=2, show=False)
    p.close()
    p = plot.TopoArray("utsnd", "A%B", ds=ds, axw=4, show=False)
    p.close()