コード例 #1
0
def test_flat():
    hp = gvar.BufferDict({'log(sdev)': gvar.log(gvar.gvar(1, 1))})
    x = np.linspace(0, 5, 10)

    def gpfactory1(hp):
        gp = lgp.GP(lgp.ExpQuad() * hp['sdev']**2)
        gp.addx(x, 'x')
        return gp

    def gpfactory2(hp):
        gp = lgp.GP(lgp.ExpQuad() * jnp.exp(hp[0])**2)
        gp.addx(x, 'x')
        return gp

    def gpfactory3(hp):
        gp = lgp.GP(lgp.ExpQuad() * jnp.exp(hp)**2)
        gp.addx(x, 'x')
        return gp

    truehp = gvar.sample(hp)
    truegp = gpfactory1(truehp)
    trueprior = truegp.prior()
    data = gvar.sample(trueprior)
    fit1 = lgp.empbayes_fit(hp, gpfactory1, data)
    fit2 = lgp.empbayes_fit(hp.buf, gpfactory2, data)
    fit3 = lgp.empbayes_fit(hp.buf[0], gpfactory3, data)
    util.assert_similar_gvars(fit1.p.buf[0], fit2.p[0], fit3.p)
コード例 #2
0
def test_data():

    hp = gvar.BufferDict({'log(sdev)': gvar.log(gvar.gvar(1, 1))})
    x = np.linspace(0, 5, 10)

    def gpfactory(hp):
        gp = lgp.GP(lgp.ExpQuad() * hp['sdev']**2)
        gp.addx(x, 'x')
        return gp

    truehp = gvar.sample(hp)
    truegp = gpfactory(truehp)
    trueprior = truegp.prior()

    def makeerr(bd, err):
        return gvar.BufferDict(bd, buf=np.full_like(bd.buf, err))

    data_noerr = gvar.sample(trueprior)
    error = makeerr(data_noerr, 0.1)
    zeroerror = makeerr(data_noerr, 0)
    zerocov = gvar.evalcov(gvar.gvar(data_noerr, zeroerror))
    data_err = gvar.make_fake_data(gvar.gvar(data_noerr, error))

    datas = [
        [
            data_noerr,
            gvar.gvar(data_noerr),
            (data_noerr, ),
            (data_noerr, zerocov),
            lambda _: data_noerr,
            lambda _: gvar.gvar(data_noerr),
            lambda _: (data_noerr, ),
            lambda _: (data_noerr, zerocov),
        ],
        [
            data_err,
            (data_err, ),
            (gvar.mean(data_err), gvar.evalcov(data_err)),
            lambda _: data_err,
            lambda _: (data_err, ),
            lambda _: (gvar.mean(data_err), gvar.evalcov(data_err)),
        ],
    ]

    for datasets in datas:
        fits = []
        for data in datasets:
            fit = lgp.empbayes_fit(hp, gpfactory, data)
            fits.append(fit)

        p = fits[0].minresult.x
        for fit in fits[1:]:
            np.testing.assert_allclose(fit.minresult.x, p, atol=1e-6)
コード例 #3
0
def check_fit(hyperprior, gpfactory, dataerr=None, alpha=1e-5):
    """do a fit with empbayes_fit and check the fitted hyperparameters
    are compatible with the ones used to generate the data"""

    # generate hyperparameters
    truehp = gvar.sample(hyperprior)

    # generate data
    gp = gpfactory(truehp)
    data = gvar.sample(gp.prior())
    if dataerr:
        mean = dataerr * np.random.randn(len(data.buf))
        sdev = np.full_like(mean, dataerr)
        data += gvar.BufferDict(data, buf=gvar.gvar(mean, sdev))

    # run fit
    fit = lgp.empbayes_fit(hyperprior, gpfactory, data, raises=False)

    # check fit result against hyperparameters
    chisq_test(fit.p - truehp, alpha)
コード例 #4
0
def test_method():

    hp = gvar.BufferDict({'log(sdev)': gvar.log(gvar.gvar(1, 1))})
    x = np.linspace(0, 5, 10)

    def gpfactory(hp):
        gp = lgp.GP(lgp.ExpQuad() * hp['sdev']**2)
        gp.addx(x, 'x')
        return gp

    truehp = gvar.sample(hp)
    truegp = gpfactory(truehp)
    trueprior = truegp.prior()
    data_fixed = gvar.sample(trueprior)

    def data_variable(hp):
        return {k: v + hp['log(sdev)'] for k, v in data_fixed.items()}

    for data in [data_fixed, data_variable]:
        fits = []
        kws = [
            dict(method='nograd', minkw=dict(options=dict(xatol=1e-6))),
            dict(method='gradient'),
            dict(method='hessian'),
            dict(method='fisher'),
            dict(method='fisher'),
            dict(method='hessmod'),
        ]
        for kw in kws:
            kwargs = dict(data=data)
            kwargs.update(kw)
            kwargs.setdefault('minkw', {}).update(x0=truehp.buf)
            fit = lgp.empbayes_fit(hp, gpfactory, **kwargs)
            fits.append(fit)
        p = fits[0].minresult.x
        for fit in fits[1:]:
            np.testing.assert_allclose(fit.minresult.x, p, atol=1e-5)
コード例 #5
0
cov = gvar.evalcov(u)

fig, axs = plt.subplots(2, 1, num='dft', clear=True, figsize=[6.4, 7])

ax = axs[0]
ax.set_title('Function')
m = mean[DUO]
s = sdev[DUO]
patch = ax.fill_between(xpred, m - s, m + s, alpha=0.5)
color = patch.get_facecolor()[0]
simulated_lines = np.random.multivariate_normal(m, cov[DUO, DUO])
ax.plot(xpred, simulated_lines, '-', color=color)
ax.plot(xdata, np.zeros_like(xdata), '.k', label='discrete lattice')

ax = axs[1]
ax.set_title('DFT')
simul = gvar.sample(u['dft'])
for i, label in enumerate(['real', 'imag']):
    m = mean['dft'][i]
    s = sdev['dft'][i]
    n = len(m)
    patch = ax.fill_between(np.arange(n), m - s, m + s, alpha=0.5, label=label)
    color = patch.get_facecolor()[0]
    ax.plot(np.arange(n), simul[i], color=color)

for ax in axs:
    ax.legend()
    ax.grid(linestyle='--')

fig.show()
コード例 #6
0
    return dict(data=data, data2=data2)


def makeprior(gp, plot=False):
    out = ['datagrid']
    if plot:
        out += ['plotgrid']
    prior = gp.predfromdata(constraints, out)
    prior['Mparams'] = Mparams
    prior['M2params'] = M2params
    return prior


#### FAKE DATA ####

truehp = gvar.sample(hyperprior)
truegp = makegp(truehp)
trueprior = makeprior(truegp, plot=True)
trueparams = gvar.sample(trueprior)
truedata = fcn(trueparams)

dataerr = {
    k: np.full_like(v, 0.1 * (np.max(v) - np.min(v)))
    for k, v in truedata.items()
}
data = gvar.make_fake_data(gvar.gvar(truedata, dataerr))


def check_constraints(y):
    # integrate approximately with trapezoid rule
    integ = np.sum((y[:, 1:] + y[:, :-1]) / 2 * np.diff(plotgrid), 1)
コード例 #7
0
    data = np.tensordot(M(Mparams), xdata, 2)
    
    # data2 = np.einsum('dfxy,fx,fy->d', M2, xdata, xdata)
    # np.einsum does not work with gvar
    xdata2 = xdata[:, None, :nx2] * xdata[:, :nx2, None]
    data2 = np.tensordot(M2(M2params), xdata2, 3)
    
    return dict(data=data, data2=data2)

prior = gp.predfromdata(constraints, ['xdata'])
prior['Mparams'] = Mparams
prior['M2params'] = M2params

#### FAKE DATA ####

trueparams = gvar.sample(prior)
truedata = gvar.BufferDict(fcn(trueparams))

dataerr = np.full_like(truedata.buf, 0.1)
datamean = truedata.buf + dataerr * np.random.randn(*dataerr.shape)
data = gvar.BufferDict(truedata, buf=gvar.gvar(datamean, dataerr))

# check sum rules approximately with trapezoid rule
def check_integrals(x, y):
    checksum = np.sum(((y * x)[:, 1:] + (y * x)[:, :-1]) / 2 * np.diff(x, axis=1))
    print('sum_i int dx x f_i(x) =', checksum)
    for q in 'ducs':
        idx = indices[q]
        qx = x[idx]
        qy = y[idx]
        checksum = np.sum(qdiff * (qy[:, 1:] + qy[:, :-1]) / 2 * np.diff(qx, axis=1))
コード例 #8
0
    xdata2 = xdata[:, None, :nx2] * xdata[:, :nx2, None]
    data2 = np.tensordot(M2(M2params), xdata2, 3)

    return dict(data=data, data2=data2)


def makeprior(gp):
    prior = gp.predfromdata(constraints, ['xdata'])
    prior['Mparams'] = Mparams
    prior['M2params'] = M2params
    return prior


#### FAKE DATA ####

truehp = gvar.sample(hyperprior)
truegp = makegp(truehp)
trueparams = gvar.sample(makeprior(truegp))
truedata = fcn(trueparams)

dataerr = {
    k: np.full_like(v, 0.1 * (np.max(v) - np.min(v)))
    for k, v in truedata.items()
}
data = gvar.make_fake_data(gvar.gvar(truedata, dataerr))


# check sum rules approximately with trapezoid rule
def check_integrals(x, y):
    checksum = np.sum(
        ((y * x)[:, 1:] + (y * x)[:, :-1]) / 2 * np.diff(x, axis=1))