コード例 #1
0
ファイル: test_ndvar.py プロジェクト: mhellb/Eelbrain
def test_concatenate():
    """Test concatenate()

    Concatenation of SourceSpace is tested in .test_mne.test_source_estimate
    """
    ds = datasets.get_uts(True)

    v0 = ds[0, 'utsnd']
    v1 = ds[1, 'utsnd']
    vc = concatenate((v1, v0))
    assert_array_equal(vc.sub(time=(0, 1)).x, v1.x)
    assert_array_equal(vc.sub(time=(1, 2)).x, v0.x)
    assert_array_equal(vc.info, ds['utsnd'].info)

    # scalar
    psd = psd_welch(ds['utsnd'], n_fft=100)
    v0 = psd.sub(frequency=(None, 5))
    v1 = psd.sub(frequency=(45, None))
    conc = concatenate((v0, v1), 'frequency')
    assert_array_equal(conc.frequency.values[:5], psd.frequency.values[:5])
    assert_array_equal(conc.frequency.values[5:], psd.frequency.values[45:])
    conc_data = conc.get_data(v1.dimnames)
    assert_array_equal(conc_data[:, :, 5:], v1.x)

    # cat
    x = get_ndvar(2, frequency=0, cat=4)
    x_re = concatenate([x.sub(cat=(None, 'c')), x.sub(cat=('c', None))], 'cat')
    assert_dataobj_equal(x_re, x)
コード例 #2
0
def test_uts():
    "test plot.UTS plotting function"
    ds = datasets.get_uts()
    x_long = set_tmin(concatenate(ds[:10, 'uts']), -1)

    p = plot.UTS('uts', ds=ds)
    p.close()
    p = plot.UTS('uts', 'A%B', ds=ds)
    p.set_ylim(1)
    p.set_ylim(0, 1)
    assert p.get_ylim() == (0, 1)
    p.set_ylim(1, -1)
    assert p.get_ylim() == (1, -1)
    p.close()

    p = plot.UTS(x_long, h=2, w=5, xlim=2)
    assert p.get_xlim() == (-1, 1)
    p.set_xlim(2, 4)
    assert p.get_xlim() == (2, 4)
    p.close()

    # multiple y with xax
    y1 = ds.eval("uts[(A == 'a1') & (B == 'b1')]")
    y1.name='y'
    y2 = ds.eval("uts[(A == 'a0') & (B == 'b1')]")
    y2.name='y2'
    rm = ds.eval("rm[(A == 'a0') & (B == 'b1')]")
    p = plot.UTS(y1, rm)
    p.close()
    p = plot.UTS([y1, y2], rm)
    p.close()
コード例 #3
0
def test_source_ndvar():
    "Test NDVar with source dimension"
    ds = datasets.get_mne_sample(-0.1, 0.1, src='ico', sub='index<=1')
    v = ds['src', 0]
    assert v.source.parc.name == 'aparc'
    v_2009 = set_parc(v, 'aparc.a2009s')
    assert v_2009.source.parc.name == 'aparc.a2009s'
    conn = v_2009.source.connectivity()
    assert np.sum(v.source.parc == v_2009.source.parc) < len(v.source)
    v_back = set_parc(v_2009, 'aparc')
    assert v_back.source.parc.name == 'aparc'
    assert_array_equal(v.source.parc, v_back.source.parc)
    assert v.x is v_back.x
    assert_array_equal(v_back.source.connectivity(), conn)

    # labels_from_cluster
    v1, v2 = ds['src']
    v1 = v1 * (v1 > 15)
    labels1 = labels_from_clusters(v1)
    assert len(labels1) == 1
    labels1s = labels_from_clusters(v1.sum('time'))
    assert len(labels1s) == 1
    assert_label_equal(labels1s[0], labels1[0])
    v2 = v2 * (v2 > 2)
    labels2 = labels_from_clusters(concatenate((v1, v2), 'case'))
    assert len(labels2) == 2
    assert_label_equal(labels1[0], labels2[0])
コード例 #4
0
def test_source_ndvar():
    "Test NDVar with source dimension"
    ds = datasets.get_mne_sample(-0.1, 0.1, src='ico', sub='index<=1')
    v = ds['src', 0]
    assert v.source.parc.name == 'aparc'
    v_2009 = set_parc(v, 'aparc.a2009s')
    assert v_2009.source.parc.name == 'aparc.a2009s'
    conn = v_2009.source.connectivity()
    assert np.sum(v.source.parc == v_2009.source.parc) < len(v.source)
    v_back = set_parc(v_2009, 'aparc')
    assert v_back.source.parc.name == 'aparc'
    assert_array_equal(v.source.parc, v_back.source.parc)
    assert v.x is v_back.x
    assert_array_equal(v_back.source.connectivity(), conn)

    # labels_from_cluster
    v1, v2 = ds['src']
    v1 = v1 * (v1 > 15)
    labels1 = labels_from_clusters(v1)
    assert len(labels1) == 1
    labels1s = labels_from_clusters(v1.sum('time'))
    assert len(labels1s) == 1
    assert_label_equal(labels1s[0], labels1[0])
    v2 = v2 * (v2 > 2)
    labels2 = labels_from_clusters(concatenate((v1, v2), 'case'))
    assert len(labels2) == 2
    assert_label_equal(labels1[0], labels2[0])
コード例 #5
0
def test_ncrf():
    meg = load('meg').sub(time=(0, 5))
    stim = load('stim').sub(time=(0, 5))
    fwd = load('fwd_sol')
    emptyroom = load('emptyroom')

    # 1 stimulus
    model = fit_ncrf(meg, stim, fwd, emptyroom, tstop=0.2, normalize='l1', mu=0.0019444, n_iter=3, n_iterc=3, n_iterf=10)
    # check residual
    assert model.residual == pytest.approx(172.714, 0.001)
    # check scaling
    stim_baseline = stim.mean()
    assert model._stim_baseline[0] == stim_baseline
    assert model._stim_scaling[0] == (stim - stim_baseline).abs().mean()
    assert model.h.norm('time').norm('source').norm('space') == pytest.approx(6.043e-10, rel=0.001)

    # test persistence
    model_2 = pickle.loads(pickle.dumps(model, pickle.HIGHEST_PROTOCOL))
    assert_dataobj_equal(model_2.h, model.h)
    assert_dataobj_equal(model_2.h_scaled, model.h_scaled)
    assert model_2.residual == model.residual

    # 2 stimuli, one of them 2-d, normalize='l2'
    diff = stim.diff('time')
    stim2 = concatenate([diff.clip(0), diff.clip(max=0)], Categorial('rep', ['on', 'off']))
    model = fit_ncrf(meg, [stim, stim2], fwd, emptyroom, tstop=0.2, normalize='l2', mu=0.0019444, n_iter=3, n_iterc=3, n_iterf=10)
    # check scaling
    assert model._stim_baseline[0] == stim.mean()
    assert model._stim_scaling[0] == stim.std()
    assert model.h[0].norm('time').norm('source').norm('space') == pytest.approx(4.732e-10, 0.001)

    # cross-validation
    model = fit_ncrf(meg, stim, fwd, emptyroom, tstop=0.2, normalize='l1', mu='auto', n_iter=1, n_iterc=2, n_iterf=2, n_workers=1)
    assert model.mu == pytest.approx(0.0203, 0.001)
    model.cv_info()
コード例 #6
0
def test_concatenate():
    "Test concatenate()"
    ds = datasets.get_uts(True)

    v0 = ds[0, 'utsnd']
    v1 = ds[1, 'utsnd']
    vc = concatenate((v1, v0))
    assert_array_equal(vc.sub(time=(0, 1)).x, v1.x)
    assert_array_equal(vc.sub(time=(1, 2)).x, v0.x)
コード例 #7
0
def test_concatenate():
    "Test concatenate()"
    ds = datasets.get_uts(True)

    v0 = ds[0, 'utsnd']
    v1 = ds[1, 'utsnd']
    vc = concatenate((v1, v0))
    assert_array_equal(vc.sub(time=(0, 1)).x, v1.x)
    assert_array_equal(vc.sub(time=(1, 2)).x, v0.x)
コード例 #8
0
    def report(
            self,
            brain_view: Union[str, Sequence[float]] = None,
            axw: float = None,
            surf: str = 'inflated',
            cortex: Any = ((1.00,) * 3, (.4,) * 3),
    ):
        doc = []

        # plot model-test results
        layout = BrainLayout(brain_view, axw)
        sp = plot.brain.SequencePlotter()
        sp.set_brain_args(mask=(0, 0, 0, 1))
        if layout.brain_view:
            sp.set_parallel_view(*layout.brain_view)
        sp.set_brain_args(surf=surf, cortex=cortex)
        # ROI overlay
        if self.masks:
            roi = self.masks[0] + self.masks[1]
            sp.add_ndvar_label(roi, color=(0, 1, 0), borders=2, overlay=True)
        # det data
        cmap = plot.soft_threshold_colormap('polar-lux-a', .2, 1)
        for label, term in self.terms.items():
            res = self.ress[term]
            diffs = [res.difference.sub(source=hemi) for hemi in ['lh', 'rh']]
            diffs = [diff / diff.max() for diff in diffs]
            diff = concatenate(diffs, 'source')
            sp.add_ndvar(diff, cmap=cmap, vmax=1, label=label)
        p = sp.plot_table(view='lateral', orientation='vertical', **layout.table_args)
        doc.append(p)

        # generate table
        t = fmtxt.Table('l' * (2 * len(self.terms) + 1))
        # header 1
        t.cell('')
        for text in ['Left', 'Right']:
            t.cell(f'{text} H', width=len(self.terms))
        # header 2
        t.cell('')
        for _ in range(2):
            t.cells(*self.terms.keys())
        t.midrule()
        for label, t1 in self.terms.items():
            t.cell(label)
            for hemi in ['lh', 'rh']:
                for t2 in self.terms.values():
                    if t1 == t2:
                        t.cell('')
                        continue
                    res = self.loc_ress[t1, t2, hemi].f_tests[0]
                    stars = fmtxt.Stars.from_p(res.p)
                    t.cell([stars, res._asfmtext()])
        doc.append(t)
        return fmtxt.FloatingLayout(doc)
コード例 #9
0
ファイル: test_ndvar.py プロジェクト: rbaehr/Eelbrain
def test_concatenate():
    """Test concatenate()

    Concatenation of SourceSpace is tested in .test_mne.test_source_estimate
    """
    ds = datasets.get_uts(True)

    v0 = ds[0, 'utsnd']
    v1 = ds[1, 'utsnd']
    vc = concatenate((v1, v0))
    assert_array_equal(vc.sub(time=(0, 1)).x, v1.x)
    assert_array_equal(vc.sub(time=(1, 2)).x, v0.x)
    assert_array_equal(vc.info, ds['utsnd'].info)
コード例 #10
0
def test_concatenate():
    """Test concatenate()

    Concatenation of SourceSpace is tested in .test_mne.test_source_estimate
    """
    ds = datasets.get_uts(True)

    v0 = ds[0, 'utsnd']
    v1 = ds[1, 'utsnd']
    vc = concatenate((v1, v0))
    assert_array_equal(vc.sub(time=(0, 1)).x, v1.x)
    assert_array_equal(vc.sub(time=(1, 2)).x, v0.x)
    assert_array_equal(vc.info, ds['utsnd'].info)

    # scalar
    psd = psd_welch(ds['utsnd'], n_fft=100)
    v0 = psd.sub(frequency=(None, 5))
    v1 = psd.sub(frequency=(45, None))
    conc = concatenate((v0, v1), 'frequency')
    assert_array_equal(conc.frequency.values[:5], psd.frequency.values[:5])
    assert_array_equal(conc.frequency.values[5:], psd.frequency.values[45:])
    conc_data = conc.get_data(v1.dimnames)
    assert_array_equal(conc_data[:, :, 5:], v1.x)
コード例 #11
0
def test_frequency_response():
    b_array = signal.firwin(80, 0.5, window=('kaiser', 8))
    freqs_array, fresp_array = signal.freqz(b_array)
    hz_to_rad = 2 * np.pi * 0.01

    b = NDVar(b_array, (UTS(0, 0.01, 80),))
    fresp = frequency_response(b)
    assert_array_equal(fresp.x, fresp_array)
    assert_array_equal(fresp.frequency.values * hz_to_rad, freqs_array)

    b2d = concatenate((b, b), Case)
    fresp = frequency_response(b2d)
    assert_array_equal(fresp.x[0], fresp_array)
    assert_array_equal(fresp.x[1], fresp_array)
    assert_array_equal(fresp.frequency.values * hz_to_rad, freqs_array)
コード例 #12
0
def test_frequency_response():
    b_array = signal.firwin(80, 0.5, window=('kaiser', 8))
    freqs_array, fresp_array = signal.freqz(b_array)
    hz_to_rad = 2 * np.pi * 0.01

    b = NDVar(b_array, (UTS(0, 0.01, 80), ))
    fresp = frequency_response(b)
    assert_array_equal(fresp.x, fresp_array)
    assert_array_equal(fresp.frequency.values * hz_to_rad, freqs_array)

    b2d = concatenate((b, b), Case)
    fresp = frequency_response(b2d)
    assert_array_equal(fresp.x[0], fresp_array)
    assert_array_equal(fresp.x[1], fresp_array)
    assert_array_equal(fresp.frequency.values * hz_to_rad, freqs_array)
コード例 #13
0
def test_uts():
    "test plot.UTS plotting function"
    ds = datasets.get_uts()
    x_long = set_tmin(concatenate(ds[:10, 'uts']), -1)

    p = plot.UTS('uts', ds=ds, show=False)
    p.close()
    p = plot.UTS('uts', 'A%B', ds=ds, show=False)
    p.set_ylim(1)
    p.set_ylim(0, 1)
    eq_(p.get_ylim(), (0, 1))
    p.set_ylim(1, -1)
    eq_(p.get_ylim(), (1, -1))
    p.close()

    p = plot.UTS(x_long, h=2, w=5, xlim=2)
    eq_(p.get_xlim(), (-1, 1))
    p.set_xlim(2, 4)
    eq_(p.get_xlim(), (2, 4))
    p.close()
コード例 #14
0
ファイル: test_uts.py プロジェクト: theo-dutcher/Eelbrain
def test_uts():
    "test plot.UTS plotting function"
    ds = datasets.get_uts()
    x_long = set_tmin(concatenate(ds[:10, 'uts']), -1)

    p = plot.UTS('uts', ds=ds)
    p.close()
    p = plot.UTS('uts', 'A%B', ds=ds)
    p.set_ylim(1)
    p.set_ylim(0, 1)
    assert p.get_ylim() == (0, 1)
    p.set_ylim(1, -1)
    assert p.get_ylim() == (1, -1)
    p.close()

    p = plot.UTS(x_long, h=2, w=5, xlim=2)
    assert p.get_xlim() == (-1, 1)
    p.set_xlim(2, 4)
    assert p.get_xlim() == (2, 4)
    p.close()

    # color dict
    colors = plot.colors_for_oneway(['a0', 'a1', 'a2'])
    a0, a1, a2 = ds[:3, 'uts']
    a0.name = 'a0'
    a1.name = 'a1'
    a2.name = 'a2'
    p = plot.UTS([[a0, a1, a2]], colors=colors)
    p.close()

    # multiple y with xax
    y1 = ds.eval("uts[(A == 'a1') & (B == 'b1')]")
    y1.name = 'y'
    y2 = ds.eval("uts[(A == 'a0') & (B == 'b1')]")
    y2.name = 'y2'
    rm = ds.eval("rm[(A == 'a0') & (B == 'b1')]")
    p = plot.UTS(y1, rm)
    p.close()
    p = plot.UTS([y1, y2], rm)
    p.close()
コード例 #15
0
def test_source_estimate():
    "Test SourceSpace dimension"
    mne.set_log_level('warning')
    ds = datasets.get_mne_sample(src='ico')
    dsa = ds.aggregate('side')

    # test auto-conversion
    asndvar('epochs', ds=ds)
    asndvar('epochs', ds=dsa)
    asndvar(dsa['epochs'][0])

    # source space clustering
    res = testnd.ttest_ind('src', 'side', ds=ds, samples=0, pmin=0.05,
                           tstart=0.05, mintime=0.02, minsource=10)
    assert res.clusters.n_cases == 52

    # test disconnecting parc
    src = ds['src']
    source = src.source
    parc = source.parc
    orig_conn = set(map(tuple, source.connectivity()))
    disc_conn = set(map(tuple, source.connectivity(True)))
    assert len(disc_conn) < len(orig_conn)
    for pair in orig_conn:
        s, d = pair
        if pair in disc_conn:
            assert parc[s] == parc[d]
        else:
            assert parc[s] != parc[d]

    # threshold-based test with parc
    srcl = src.sub(source='lh')
    res = testnd.ttest_ind(srcl, 'side', ds=ds, samples=10, pmin=0.05,
                           tstart=0.05, mintime=0.02, minsource=10,
                           parc='source')
    assert res._cdist.dist.shape[1] == len(srcl.source.parc.cells)
    label = 'superiortemporal-lh'
    c_all = res.find_clusters(maps=True)
    c_label = res.find_clusters(maps=True, source=label)
    assert_array_equal(c_label['location'], label)
    for case in c_label.itercases():
        id_ = case['id']
        idx = c_all['id'].index(id_)[0]
        assert case['v'] == c_all[idx, 'v']
        assert case['tstart'] == c_all[idx, 'tstart']
        assert case['tstop'] == c_all[idx, 'tstop']
        assert case['p'] <= c_all[idx, 'p']
        assert_dataobj_equal(case['cluster'], c_all[idx, 'cluster'].sub(source=label))

    # threshold-free test with parc
    res = testnd.ttest_ind(srcl, 'side', ds=ds, samples=10, tstart=0.05, parc='source')
    cl = res.find_clusters(0.05)
    assert cl.eval("p.min()") == res.p.min()
    mp = res.masked_parameter_map()
    assert mp.min() == res.t.min()
    assert mp.max() == res.t.max(res.p <= 0.05)
    assert mp.max() == pytest.approx(-4.95817732)

    # indexing source space
    s_sub = src.sub(source='fusiform-lh')
    idx = source.index_for_label('fusiform-lh')
    s_idx = src[idx]
    assert_dataobj_equal(s_sub, s_idx)

    # concatenate
    src_reconc = concatenate((src.sub(source='lh'), src.sub(source='rh')), 'source')
    assert_dataobj_equal(src_reconc, src)
コード例 #16
0
def test_source_estimate():
    "Test SourceSpace dimension"
    mne.set_log_level('warning')
    ds = datasets.get_mne_sample(src='ico')
    dsa = ds.aggregate('side')

    # test auto-conversion
    asndvar('epochs', ds=ds)
    asndvar('epochs', ds=dsa)
    asndvar(dsa['epochs'][0])

    # source space clustering
    res = testnd.ttest_ind('src',
                           'side',
                           ds=ds,
                           samples=0,
                           pmin=0.05,
                           tstart=0.05,
                           mintime=0.02,
                           minsource=10)
    assert res.clusters.n_cases == 52

    # test disconnecting parc
    src = ds['src']
    source = src.source
    parc = source.parc
    orig_conn = set(map(tuple, source.connectivity()))
    disc_conn = set(map(tuple, source.connectivity(True)))
    assert len(disc_conn) < len(orig_conn)
    for pair in orig_conn:
        s, d = pair
        if pair in disc_conn:
            assert parc[s] == parc[d]
        else:
            assert parc[s] != parc[d]

    # threshold-based test with parc
    srcl = src.sub(source='lh')
    res = testnd.ttest_ind(srcl,
                           'side',
                           ds=ds,
                           samples=10,
                           pmin=0.05,
                           tstart=0.05,
                           mintime=0.02,
                           minsource=10,
                           parc='source')
    assert res._cdist.dist.shape[1] == len(srcl.source.parc.cells)
    label = 'superiortemporal-lh'
    c_all = res.find_clusters(maps=True)
    c_label = res.find_clusters(maps=True, source=label)
    assert_array_equal(c_label['location'], label)
    for case in c_label.itercases():
        id_ = case['id']
        idx = c_all['id'].index(id_)[0]
        assert case['v'] == c_all[idx, 'v']
        assert case['tstart'] == c_all[idx, 'tstart']
        assert case['tstop'] == c_all[idx, 'tstop']
        assert case['p'] <= c_all[idx, 'p']
        assert_dataobj_equal(case['cluster'],
                             c_all[idx, 'cluster'].sub(source=label))

    # threshold-free test with parc
    res = testnd.ttest_ind(srcl,
                           'side',
                           ds=ds,
                           samples=10,
                           tstart=0.05,
                           parc='source')
    cl = res.find_clusters(0.05)
    assert cl.eval("p.min()") == res.p.min()
    mp = res.masked_parameter_map()
    assert mp.min() == res.t.min()
    assert mp.max() == res.t.max(res.p <= 0.05)
    assert mp.max() == pytest.approx(-4.95817732)

    # indexing source space
    s_sub = src.sub(source='fusiform-lh')
    idx = source.index_for_label('fusiform-lh')
    s_idx = src[idx]
    assert_dataobj_equal(s_sub, s_idx)

    # concatenate
    src_reconc = concatenate((src.sub(source='lh'), src.sub(source='rh')),
                             'source')
    assert_dataobj_equal(src_reconc, src)
コード例 #17
0
def test_ncrf():
    meg = load('meg').sub(time=(0, 5))
    stim = load('stim').sub(time=(0, 5))
    fwd = load('fwd_sol')
    emptyroom = load('emptyroom')

    # 1 stimulus
    model = fit_ncrf(meg,
                     stim,
                     fwd,
                     emptyroom,
                     tstop=0.2,
                     normalize='l1',
                     mu=0.0019444,
                     n_iter=3,
                     n_iterc=3,
                     n_iterf=10,
                     do_post_normalization=False)
    # check residual and explained var
    assert model.explained_var == pytest.approx(0.00641890144769941, rel=0.001)
    assert model.voxelwise_explained_variance.sum() == pytest.approx(
        0.08261162457414245, rel=0.001)
    assert model.residual == pytest.approx(178.512, 0.001)
    # check scaling
    stim_baseline = stim.mean()
    assert model._stim_baseline[0] == stim_baseline
    assert model._stim_scaling[0] == (stim - stim_baseline).abs().mean()
    assert model.h.norm('time').norm('source').norm('space') == pytest.approx(
        6.601677e-10, rel=0.001)

    # test persistence
    model_2 = pickle.loads(pickle.dumps(model, pickle.HIGHEST_PROTOCOL))
    assert_dataobj_equal(model_2.h, model.h)
    assert_dataobj_equal(model_2.h_scaled, model.h_scaled)
    assert model_2.residual == model.residual
    assert model_2.gaussian_fwhm == model.gaussian_fwhm

    # test gaussian fwhm
    model = fit_ncrf(meg,
                     stim,
                     fwd,
                     emptyroom,
                     tstop=0.2,
                     normalize='l1',
                     mu=0.0019444,
                     n_iter=1,
                     n_iterc=1,
                     n_iterf=1,
                     gaussian_fwhm=50.0)
    assert model.gaussian_fwhm == 50.0

    # 2 stimuli, one of them 2-d, normalize='l2'
    diff = stim.diff('time')
    stim2 = concatenate([diff.clip(0), diff.clip(max=0)],
                        Categorial('rep', ['on', 'off']))
    model = fit_ncrf(meg, [stim, stim2],
                     fwd,
                     emptyroom,
                     tstop=[0.2, 0.2],
                     normalize='l2',
                     mu=0.0019444,
                     n_iter=3,
                     n_iterc=3,
                     n_iterf=10,
                     do_post_normalization=False)
    # check scaling
    assert model._stim_baseline[0] == stim.mean()
    assert model._stim_scaling[0] == stim.std()
    assert model.h[0].norm('time').norm('source').norm(
        'space') == pytest.approx(7.0088e-10, rel=0.001)

    # cross-validation
    model = fit_ncrf(meg,
                     stim,
                     fwd,
                     emptyroom,
                     tstop=0.2,
                     normalize='l1',
                     mu='auto',
                     n_iter=1,
                     n_iterc=2,
                     n_iterf=2,
                     n_workers=1,
                     do_post_normalization=False)
    assert model.mu == pytest.approx(0.0203, 0.001)
    model.cv_info()