Exemplo n.º 1
0
 def test_chained_lsqfit_p0(self):
     " MultiFitter.chained_lsqfit(...) "
     # sequential fit
     fitter = MultiFitter(models=self.make_models(ncg=1))
     p0 = gv.BufferDict({'a': 0.9991638707908023, 'b': 0.4995927960301173})
     p0list = [
         p0,
         gv.BufferDict(a=0.9991638707908023),
         gv.BufferDict(a=0.9991638707908023)
     ]
     fit1 = fitter.chained_lsqfit(data=self.data, prior=self.prior, p0=p0)
     self.assertTrue(self.agree_ref(fit1.p))
     self.assertEqual(list(fit1.chained_fits.keys()), ['l', 'c1', 'c2'])
     self.assertEqual(fit1.p0, p0list)
     fit1 = fitter.chained_lsqfit(data=self.data,
                                  prior=self.prior,
                                  p0=3 * [p0])
     self.assertTrue(self.agree_ref(fit1.p))
     self.assertEqual(list(fit1.chained_fits.keys()), ['l', 'c1', 'c2'])
     self.assertEqual(fit1.p0, p0list)
     fn = 'test_multifitter.p'
     fit1 = fitter.chained_lsqfit(data=self.data, prior=self.prior, p0=fn)
     fit2 = fitter.chained_lsqfit(data=self.data, prior=self.prior, p0=fn)
     self.assertTrue(self.agree_ref(fit1.p))
     self.assertEqual(list(fit1.chained_fits.keys()), ['l', 'c1', 'c2'])
     self.assertEqual([f.pmean for f in fit1.chained_fits.values()],
                      fit2.p0)
     os.unlink(fn)
Exemplo n.º 2
0
 def __call__(self, y0, interval):
     if len(interval) > 2:
         ans = []
         xlast = interval[0]
         ylast = y0
         nbad = 0
         ngood = 0
         for x in interval[1:]:
             y = self(ylast, interval=(xlast,x))
             xlast = x
             ylast = y 
             nbad += self.nbad 
             ngood += self.ngood
             ans.append(y)
         self.nbad = nbad 
         self.ngood = ngood 
         return ans
     if not isinstance(y0, gvar.BufferDict):
         y0 = gvar.BufferDict(y0)
     deriv_orig = self.deriv
     def deriv(x, y):
         y = gvar.BufferDict(y0, buf=y)
         dydx = gvar.BufferDict(deriv_orig(x, y), keys=y0.keys())
         return dydx.buf
     self.deriv = deriv
     ans = super(DictIntegrator, self).__call__(y0.buf, interval)
     self.deriv = deriv_orig
     return gvar.BufferDict(y0, buf=ans)
Exemplo n.º 3
0
    def test_nopdf(self):
        " integrator(f ... nopdf=True) and pdf(p) "
        xarray = gv.gvar([5., 3.], [[4., 1.9], [1.9, 1.]])
        xdict = gv.BufferDict([(0, 1), (1, 1)])
        xdict = gv.BufferDict(xdict, buf=xarray)
        pdf = PDFIntegrator(xarray).pdf

        def farray(x):
            if hasattr(x, 'keys'):
                x = x.buf
            prob = pdf(x)
            return [x[0] * prob, x[0]**2 * prob, prob]

        def fdict(x):
            if hasattr(x, 'keys'):
                x = x.buf
            prob = pdf(x)
            return gv.BufferDict([(0, x[0] * prob), (1, x[0]**2 * prob),
                                  (3, prob)])

        for x in [xarray, xdict]:
            x[0] -= 0.1 * x[0].sdev
            x[1] += 0.1 * x[1].sdev
            for f in [farray, fdict]:
                integ = PDFIntegrator(x)
                integ(f, neval=1000, nitn=5)
                r = integ(f, neval=1000, nitn=5, nopdf=True, adapt=False)
                rmean = r[0]
                rsdev = np.sqrt(r[1] - rmean**2)
                self.assertTrue(abs(rmean.mean - 5.) < 5. * rmean.sdev)
                self.assertTrue(abs(rsdev.mean - 2.) < 5. * rsdev.sdev)
Exemplo n.º 4
0
def main():
    x, y = make_data()  # collect fit data
    p0 = None  # make larger fits go faster (opt.)
    for nexp in range(1, 7):
        print('************************************* nexp =', nexp)
        prior = make_prior(nexp)
        fit = lsqfit.nonlinear_fit(data=(x, y), fcn=fcn, prior=prior, p0=p0)
        print(fit)  # print the fit results
        if nexp > 2:
            E = fit.p['E']  # best-fit parameters
            a = fit.p['a']
            print('E1/E0 =', E[1] / E[0], '  E2/E0 =', E[2] / E[0])
            print('a1/a0 =', a[1] / a[0], '  a2/a0 =', a[2] / a[0])
        if fit.chi2 / fit.dof < 1.:
            p0 = fit.pmean  # starting point for next fit (opt.)
        print()

    # error budget analysis
    # outputs = {
    #     'E1/E0':E[1]/E[0], 'E2/E0':E[2]/E[0],
    #     'a1/a0':a[1]/a[0], 'a2/a0':a[2]/a[0]
    #     }
    # inputs = {'E':fit.prior['E'], 'a':fit.prior['a'], 'y':y}
    outputs = gv.BufferDict()
    outputs['E2/E0'] = E[2] / E[0]
    outputs['E1/E0'] = E[1] / E[0]
    outputs['a2/a0'] = a[2] / a[0]
    outputs['a1/a0'] = a[1] / a[0]
    inputs = gv.BufferDict()
    inputs['a'] = fit.prior['a']
    inputs['y'] = y
    inputs['E'] = fit.prior['E']
    print('================= Error Budget Analysis')
    print(gv.fmt_values(outputs))
    print(gv.fmt_errorbudget(outputs, inputs))
Exemplo n.º 5
0
    def test_expval(self):
        " integrator(f ...) "
        xarray = gv.gvar([5., 3.], [[400., 0.9], [0.9, 1.]])
        xdict = gv.BufferDict([(0, 1), (1, 1)])
        xdict = gv.BufferDict(xdict, buf=xarray)
        xscalar = xarray[0]

        def fscalar(x):
            if hasattr(x, 'keys'):
                x = x.buf
            return x.flat[0]

        def farray(x):
            if hasattr(x, 'keys'):
                x = x.buf
            return gv.PDFStatistics.moments(x.flat[0])

        def fdict(x):
            if hasattr(x, 'keys'):
                x = x.buf
            return gv.BufferDict([(0, x.flat[0]), (1, x.flat[0]**2),
                                  (2, x.flat[0]**3), (3, x.flat[0]**4)])

        for x in [xscalar, xarray, xdict]:
            integ = PDFIntegrator(x)
            integ(neval=1000, nitn=5)
            for f in [fscalar, farray, fdict]:
                r = integ(f, neval=1000, nitn=5, adapt=False)
                if f is fscalar:
                    self.assertTrue(abs(r.mean - 5) < 5. * r.sdev)
                else:
                    if hasattr(r, 'keys'):
                        r = r.buf
                    s = gv.PDFStatistics(r)
                    self.assertTrue(abs(s.mean.mean - 5.) < 10. * s.mean.sdev)
                    self.assertTrue(abs(s.sdev.mean - 20.) < 10. * s.sdev.sdev)
                    self.assertTrue(abs(s.skew.mean) < 10. * s.skew.sdev)
                    self.assertTrue(abs(s.ex_kurt.mean) < 10. * s.ex_kurt.sdev)

        # covariance test
        # N.B. Integrand has two entries that are identical,
        #   which leads to a singular covariance -- so SVD
        #   is essential here. The off-diagonal elements
        #   of np.outer(x, x) are what cause the singularity.
        def fcov(x):
            return dict(x=x, xx=np.outer(x, x))

        integ = PDFIntegrator(xarray)
        r = integ(fcov, neval=1000, nitn=5)
        rmean = r['x']
        rcov = r['xx'] - np.outer(r['x'], r['x'])
        xmean = gv.mean(xarray)
        xcov = gv.evalcov(xarray)
        for i in [0, 1]:
            self.assertTrue(abs(rmean[i].mean - xmean[i]) < 5. * rmean[i].sdev)
            for j in [0, 1]:
                self.assertTrue(
                    abs(rcov[i, j].mean - xcov[i, j]) < 5. * rcov[i, j].sdev)
Exemplo n.º 6
0
    def test_expval(self):
        " integrator(f ...) "
        xarray = gv.gvar([5., 3.], [[4., 0.9], [0.9, 1.]])
        xdict = gv.BufferDict([(0, 1), (1, 1)])
        xdict = gv.BufferDict(xdict, buf=xarray)
        xscalar = xarray[0]

        def fscalar(x):
            if hasattr(x, 'keys'):
                x = x.buf
            return x.flat[0]

        def farray(x):
            if hasattr(x, 'keys'):
                x = x.buf
            return gv.PDFStatistics.moments(x.flat[0])

        def fdict(x):
            if hasattr(x, 'keys'):
                x = x.buf
            return gv.BufferDict([(0, x.flat[0]), (1, x.flat[0]**2),
                                  (2, x.flat[0]**3), (3, x.flat[0]**4)])

        for x in [xscalar, xarray, xdict]:
            integ = PDFIntegrator(x)
            integ(neval=1000, nitn=5)
            for f in [fscalar, farray, fdict]:
                r = integ(f, neval=1000, nitn=5, adapt=False)
                if f is fscalar:
                    self.assertTrue(abs(r.mean - 5) < 5. * r.sdev)
                else:
                    if hasattr(r, 'keys'):
                        r = r.buf
                    s = gv.PDFStatistics(r)
                    self.assertTrue(abs(s.mean.mean - 5.) < 10. * s.mean.sdev)
                    self.assertTrue(abs(s.sdev.mean - 2.) < 10. * s.sdev.sdev)
                    self.assertTrue(abs(s.skew.mean) < 10. * s.skew.sdev)
                    self.assertTrue(abs(s.ex_kurt.mean) < 10. * s.ex_kurt.sdev)

        # covariance test
        def fcov(x):
            return dict(x=x, xx=np.outer(x, x))

        integ = PDFIntegrator(xarray)
        r = integ(fcov, neval=1000, nitn=5)
        rmean = r['x']
        rcov = r['xx'] - np.outer(r['x'], r['x'])
        xmean = gv.mean(xarray)
        xcov = gv.evalcov(xarray)
        for i in [0, 1]:
            self.assertTrue(abs(rmean[i].mean - xmean[i]) < 5. * rmean[i].sdev)
            for j in [0, 1]:
                self.assertTrue(
                    abs(rcov[i, j].mean - xcov[i, j]) < 5. * rcov[i, j].sdev)
Exemplo n.º 7
0
 def ff(theta, nopdf=nopdf):
     tan_theta = numpy.tan(theta)
     x = self.scale * tan_theta
     jac = self.scale * (tan_theta**2 + 1.)
     if nopdf:
         pdf = jac * self.pdf.pjac[None, :]
     else:
         pdf = jac * numpy.exp(-(x**2) / 2.) / numpy.sqrt(2 * numpy.pi)
     dp = self.pdf.x2dpflat(x)
     parg = None
     ans = None
     fparg_is_dict = False
     # iterate through the batch
     for i, (dpi, pdfi) in enumerate(zip(dp, pdf)):
         p = self.pdf.meanflat + dpi
         if parg is None:
             # first time only
             if self.pdf.shape is None:
                 parg = _gvar.BufferDict(self.pdf.g, buf=p)
             else:
                 parg = p.reshape(self.pdf.shape)
         else:
             if parg.shape is None:
                 parg.buf = p
             else:
                 parg.flat[:] = p
         fparg = 1. if f is None else f(parg)
         if ans is None:
             # first time only
             if hasattr(fparg, 'keys'):
                 fparg_is_dict = True
                 if not isinstance(fparg, _gvar.BufferDict):
                     fparg = _gvar.BufferDict(fparg)
                 ans = _gvar.BufferDict()
                 for k in fparg:
                     ans[k] = numpy.empty(
                         (len(pdf), ) + fparg.slice_shape(k)[1], float)
             else:
                 if numpy.shape(fparg) == ():
                     ans = numpy.empty(len(pdf), float)
                 else:
                     ans = numpy.empty(
                         (len(pdf), ) + numpy.shape(fparg), float)
         if fparg_is_dict:
             prod_pdfi = numpy.prod(pdfi)
             for k in ans:
                 ans[k][i] = fparg[k]
                 ans[k][i] *= prod_pdfi
         else:
             if not isinstance(fparg, numpy.ndarray):
                 fparg = numpy.asarray(fparg)
             ans[i] = fparg * numpy.prod(pdfi)
     return ans
Exemplo n.º 8
0
def main():
    x, y = make_data()
    prior = make_prior(100)  # 100 exponential terms in all
    p0 = None
    for nexp in range(1, 6):
        # marginalize the last 100 - nexp terms (in ymod_prior)
        fit_prior = gv.BufferDict()  # part of prior used in fit
        ymod_prior = gv.BufferDict()  # part of prior absorbed in ymod
        for k in prior:
            fit_prior[k] = prior[k][:nexp]
            ymod_prior[k] = prior[k][nexp:]
        ymod = y - fcn(x, ymod_prior)  # remove temrs in ymod_prior

        # fit modified data with just nexp terms (in fit_prior)
        fit = lsqfit.nonlinear_fit(
            data=(x, ymod),
            prior=fit_prior,
            fcn=fcn,
            p0=p0,
            tol=1e-15,
            svdcut=1e-4,
        )

        # print fit information
        print('************************************* nexp =', nexp)
        print(fit.format(True))
        p0 = fit.pmean

    # print summary information and error budget
    E = fit.p['E']  # best-fit parameters
    a = fit.p['a']
    # outputs = {
    #     'E1/E0':E[1] / E[0], 'E2/E0':E[2] / E[0],
    #     'a1/a0':a[1] / a[0], 'a2/a0':a[2] / a[0]
    #     }
    # inputs = {
    #     'E prior':prior['E'], 'a prior':prior['a'],
    #     'svd cut':fit.svdcorrection,
    #     }
    outputs = gv.BufferDict()
    outputs['E2/E0'] = E[2] / E[0]
    outputs['E1/E0'] = E[1] / E[0]
    outputs['a2/a0'] = a[2] / a[0]
    outputs['a1/a0'] = a[1] / a[0]
    inputs = gv.BufferDict()
    inputs['E prior'] = prior['E']
    inputs['svd cut'] = fit.svdcorrection
    inputs['a prior'] = prior['a']
    print(fit.fmt_values(outputs))
    print(fit.fmt_errorbudget(outputs, inputs))
Exemplo n.º 9
0
 def test_buildprior(self):
     " MultiFitter.buildprior "
     prior = gv.BufferDict(self.prior)
     prior['dummy'] = gv.gvar('12(12)')
     fitter = MultiFitter(models=self.make_models(ncg=1))
     prior = fitter.buildprior(prior=prior)
     self.assertEqual(str(prior), str(self.prior))
Exemplo n.º 10
0
    def _make_empbayes_fit(self, empbayes_grouping='order'):
        if (self._empbayes_fit is None) or (empbayes_grouping !=
                                            self.empbayes_grouping):
            self.empbayes_grouping = empbayes_grouping
            self._counter = {'iters': 0, 'evals': 0}

            z0 = gv.BufferDict()
            for group in self._empbayes_groupings():
                z0[group] = 1.0

            # Might need to change minargs default values for empbayes_fit to converge:
            # tol=1e-8, svdcut=1e-12, debug=False, maxit=1000, add_svdnoise=False, add_priornoise=False
            # Note: maxit != maxfev. See https://github.com/scipy/scipy/issues/3334
            # For Nelder-Mead algorithm, maxfev < maxit < 3 maxfev?

            # For debugging. Same as 'callback':
            # https://github.com/scipy/scipy/blob/c0dc7fccc53d8a8569cde5d55673fca284bca191/scipy/optimize/optimize.py#L651
            def analyzer(arg):
                self._counter['evals'] += 1
                print("\nEvals: ", self._counter['evals'], arg, "\n")
                print(type(arg[0]))
                return None

            fit, z = lsqfit.empbayes_fit(z0,
                                         fitargs=self._make_fitargs,
                                         maxit=200,
                                         analyzer=None)
            print(z)
            self._empbayes_fit = fit

        return self._empbayes_fit
Exemplo n.º 11
0
def key_parameters(p):
    """ collect key fit parameters in dictionary """
    ans = gv.BufferDict()
    for k in ['etas:a', 'etas:dE', 'Ds:a', 'Ds:dE']:
        ans[k] = p[k][0]
    ans['Vnn'] = p['Vnn'][0, 0]
    return ans
Exemplo n.º 12
0
    def test_chained_lsqfit(self):
        " MultiFitter.chained_lsqfit(models=[m1, m2, m3], ...) "
        # sequential fit
        fitter = MultiFitter(models=self.make_models(ncg=1))
        fit1 = fitter.chained_lsqfit(data=self.data, prior=self.prior)
        self.assertEqual(str(fit1.p), "{'a': 0.99929(48),'b': 0.50004(81)}")
        self.assertEqual(list(fit1.chained_fits.keys()), ['l', 'c1', 'c2'])

        # with coarse grain, marginalization and extend, and with fast=False
        prior = gv.BufferDict([
            ('log(a)', gv.log(self.prior['a'])),
            ('b', self.prior['b']),
        ])
        prior['log(aa)'] = prior['log(a)'] + gv.gvar('0(1)') * 1e-6
        fitter = MultiFitter(models=self.make_models(ncg=2), fast=False)
        fit2 = fitter.chained_lsqfit(data=self.data,
                                     prior=prior,
                                     mopt=True,
                                     extend=True)
        self.assertEqual(
            str(fit2.p),
            "{'log(a)': -0.00073(48),'b': 0.50015(82),"
            "'log(aa)': -0.00073(48),'a': 0.99927(48),"
            "'aa': 0.99927(48)}",
        )
Exemplo n.º 13
0
 def fdict(x):
     if hasattr(x, 'keys'):
         x = x.buf
     return gv.BufferDict([
          (0, x.flat[0]), (1, x.flat[0] ** 2),
         (2, x.flat[0] ** 3), (3, x.flat[0] ** 4)
         ])
Exemplo n.º 14
0
 def test_buildprior_fast(self):
     " MultiFitter.buildprior with fast=False "
     prior = gv.BufferDict(self.prior)
     prior['dummy'] = gv.gvar('12(12)')
     fitter = MultiFitter(models=self.make_models(ncg=1), fast=False)
     newprior = fitter.buildprior(prior=prior)
     self.assertEqual(str(prior), str(newprior))
Exemplo n.º 15
0
def build_prior(nexp):
    prior = gv.BufferDict()

    prior.add('log(a1:vec:s)', [log(gv.gvar(1, 1)) for i in range(nexp)])
    prior['log(a1:vec:s)'][0] = log(gv.gvar(0.5, 1))
    prior.add('log(ao:vec:s)', [log(gv.gvar(1, 1)) for i in range(nexp)])
    prior['log(ao:vec:s)'][0] = log(gv.gvar(0.5, 1))
    #prior.add('as1:etac',[gv.gvar(0.001,0.01) for i in range(nexp)])
    prior.add('log(dE:vec:s)', [log(gv.gvar(2, 2)) for i in range(nexp)])
    prior['log(dE:vec:s)'][0] = log(gv.gvar(1, 1))
    prior.add('log(dEo:vec:s)', [log(gv.gvar(1, 1)) for i in range(nexp)])
    prior['log(dEo:vec:s)'][0] = log(gv.gvar(1, 1))
    #prior['logdE:etac'][0] = log(gv.gvar(0.1,0.05))

    prior.add('log(a1:vec:qed:s)', [log(gv.gvar(1, 1)) for i in range(nexp)])
    prior['log(a1:vec:qed:s)'][0] = log(gv.gvar(0.5, 1))
    prior.add('log(ao:vec:qed:s)', [log(gv.gvar(1, 1)) for i in range(nexp)])
    prior['log(ao:vec:qed:s)'][0] = log(gv.gvar(0.5, 1))
    #prior.add('as1:etac',[gv.gvar(0.001,0.01) for i in range(nexp)])
    prior.add('log(dE:vec:qed:s)', [log(gv.gvar(2, 2)) for i in range(nexp)])
    prior['log(dE:vec:qed:s)'][0] = log(gv.gvar(1, 1))
    prior.add('log(dEo:vec:qed:s)', [log(gv.gvar(1, 1)) for i in range(nexp)])
    prior['log(dEo:vec:qed:s)'][0] = log(gv.gvar(1, 1))

    return prior
Exemplo n.º 16
0
    def gv_data(self):
        gv_data = {}

        for ens in sorted(self.bs_data):
            gv_data[ens] = gv.BufferDict()
            for param in ['mO', 'mpi', 'mk', 'Fpi']:
                gv_data[ens][param] = self.bs_data[ens][param][1:]

            gv_data[ens] = gv.dataset.avg_data(gv_data[ens], bstrap=True) 

            for param in ['mO', 'mpi', 'mk', 'Fpi']: 
                gv_data[ens][param] = gv_data[ens][param] - gv.mean(gv_data[ens][param]) + self.bs_data[ens][param][0]
            
            gv_data[ens]['a/w'] = self.bs_data[ens]['a/w']
            gv_data[ens]['t/a^2'] = self.bs_data[ens]['t/a^2']

            gv_data[ens]['a/w:orig'] = self.bs_data[ens]['a/w:orig']
            gv_data[ens]['a/w:impr'] = self.bs_data[ens]['a/w:impr']
            gv_data[ens]['t/a^2:orig'] = self.bs_data[ens]['t/a^2:orig']
            gv_data[ens]['t/a^2:impr'] = self.bs_data[ens]['t/a^2:impr']

            gv_data[ens]['L'] = gv.gvar(self.bs_data[ens]['L'], self.bs_data[ens]['L'] / 10**6)
            gv_data[ens]['alpha_s'] = gv.gvar(self.bs_data[ens]['alpha_s'], self.bs_data[ens]['alpha_s'] / 10**6)

        return gv_data
Exemplo n.º 17
0
def test_flat():
    hp = gvar.BufferDict({'log(sdev)': gvar.log(gvar.gvar(1, 1))})
    x = np.linspace(0, 5, 10)

    def gpfactory1(hp):
        gp = lgp.GP(lgp.ExpQuad() * hp['sdev']**2)
        gp.addx(x, 'x')
        return gp

    def gpfactory2(hp):
        gp = lgp.GP(lgp.ExpQuad() * jnp.exp(hp[0])**2)
        gp.addx(x, 'x')
        return gp

    def gpfactory3(hp):
        gp = lgp.GP(lgp.ExpQuad() * jnp.exp(hp)**2)
        gp.addx(x, 'x')
        return gp

    truehp = gvar.sample(hp)
    truegp = gpfactory1(truehp)
    trueprior = truegp.prior()
    data = gvar.sample(trueprior)
    fit1 = lgp.empbayes_fit(hp, gpfactory1, data)
    fit2 = lgp.empbayes_fit(hp.buf, gpfactory2, data)
    fit3 = lgp.empbayes_fit(hp.buf[0], gpfactory3, data)
    util.assert_similar_gvars(fit1.p.buf[0], fit2.p[0], fit3.p)
Exemplo n.º 18
0
 def params(self):
     """
     Read in fit results.
     """
     p = gv.BufferDict()
     dir_str = "./results/"
     pmean = np.loadtxt(dir_str + "pmean.txt")
     p0mean = np.loadtxt(dir_str + "p0mean.txt")
     pcorr = np.loadtxt(dir_str + "corr.txt")
     pcov = np.loadtxt(dir_str + "cov.txt")
     # print pmean, p0mean
     pmean = np.append(pmean, p0mean)
     #print pmean
     #print pcov
     #print pcorr
     D = np.sqrt(np.diag(np.diag(pcov)))
     DInv = np.linalg.inv(D)
     correlationMat = np.matmul(DInv, np.matmul(pcov,DInv))
     #print correlationMat - pcorr
     x = gv.gvar(pmean, pcov)
     p['b'] = x[:4]
     p['b0'] = x[4:]
     #print gv.evalcov(p)
     #print pcov
     return p
Exemplo n.º 19
0
def flat(g):
    """convert dictionary or array to 1D array"""
    if hasattr(g, 'buf'):
        return g.buf
    elif hasattr(g, 'keys'):
        return gvar.BufferDict(g).buf
    else:
        return np.reshape(g, -1)
Exemplo n.º 20
0
 def test_cov(g):
     if hasattr(g, 'keys'):
         g = gvar.BufferDict(g)
     g = g.flat[:]
     cov = np.zeros((len(g), len(g)), dtype=float)
     for idx, bcov in ef.evalcov_blocks(g):
         cov[idx[:, None], idx] = bcov
     assert str(gvar.evalcov(g)) == str(cov)
Exemplo n.º 21
0
def make_prior_all_twostate():
    prior = gv.BufferDict()
    prior["log(Mg1)"] = gv.log(gv.gvar(0.24, 0.2))
    prior["log(Mg2)"] = gv.log(gv.gvar(0.4, 0.2))
    prior["Zg1"] = gv.gvar(1, 1)
    prior["Zg2"] = gv.gvar(1, 1)

    return prior
Exemplo n.º 22
0
 def test_buildprior_marginalized(self):
     " MultiFitter.buildprior with marginalization"
     prior = gv.BufferDict(self.prior)
     prior['dummy'] = gv.gvar('12(12)')
     fitter = MultiFitter(models=self.make_models(ncg=1), mopt=True)
     prior = fitter.buildprior(prior=prior, mopt=fitter.mopt)
     del self.prior['b']
     self.assertEqual(str(prior), str(self.prior))
Exemplo n.º 23
0
def make_prior():
    prior = gv.BufferDict(c=gv.gvar(['0(5)', '0(5)']))
    if LSQFIT_ONLY:
        return prior
    if MULTI_W:
        prior['erfinv(2w-1)'] = gv.gvar(19 * ['0(1)']) / 2**0.5
    else:
        prior['erfinv(2w-1)'] = gv.gvar('0(1)') / 2**0.5
    return prior
Exemplo n.º 24
0
 def test_extend(self):
     " MultiFitter.lsqfit(..., extend=True) "
     fitter = MultiFitter(models=self.make_models(ncg=1))
     prior = gv.BufferDict([('log(a)', gv.log(self.prior['a'])),
                            ('b', self.prior['b'])])
     fit5 = fitter.lsqfit(data=self.data, prior=prior, extend=True)
     self.assertEqual(str(fit5.p['a']), str(self.ref_fit.p['a']))
     self.assertEqual(gv.fmt_chi2(fit5), gv.fmt_chi2(self.ref_fit))
     self.assertTrue('log(a)' in fit5.p)
Exemplo n.º 25
0
def make_prior():
    prior = gv.BufferDict(c=gv.gvar(['0(5)', '0(5)']))
    if LSQFIT_ONLY:
        return prior
    if MULTI_W:
        prior['unif(w)'] = gv.BufferDict.uniform('unif', 0., 1., shape=19)
    else:
        prior['unif(w)'] = gv.BufferDict.uniform('unif', 0., 1.)
    return prior
Exemplo n.º 26
0
    def optimize_prior(self, empbayes_grouping='order'):
        prior = {}
        for observable in ['w0', 't0']:
            temp_prior = self.fitter[observable]._make_empbayes_fit(
                empbayes_grouping).prior
            prior[observable] = gv.BufferDict()
            for key in self.fit_keys[observable]:
                prior[observable][key] = temp_prior[key]

        return prior
Exemplo n.º 27
0
 def test_extend(self):
     " MultiFitter.lsqfit(...) "
     fitter = MultiFitter(models=self.make_models(ncg=1))
     prior = gv.BufferDict([('log(a)', gv.log(self.prior['a'])),
                            ('b', self.prior['b'])])
     fit5 = fitter.lsqfit(data=self.data, prior=prior)
     self.assertTrue(self.agree_ref(fit5.p))
     self.assertTrue(
         abs(fit5.chi2 - self.ref_fit.chi2) / 0.1 / self.ref_fit.chi2)
     self.assertTrue('log(a)' in fit5.p)
Exemplo n.º 28
0
 def test_copy(self):
     global b,bkeys,bvalues,bslices,bbuf
     b = gv.BufferDict(b, buf=b.buf * gv.gvar('2(1)'))
     c = copy.copy(b)
     self.assertTrue(gv.equivalent(b, c))
     c['vector'] *= -1 
     self.assertEqual(c['vector'].tolist(), (-b['vector']).tolist())
     c = copy.deepcopy(b)
     self.assertTrue(gv.equivalent(b, c))
     c['vector'] *= -1 
     self.assertEqual(c['vector'].tolist(), (-b['vector']).tolist())
Exemplo n.º 29
0
def test_data():

    hp = gvar.BufferDict({'log(sdev)': gvar.log(gvar.gvar(1, 1))})
    x = np.linspace(0, 5, 10)

    def gpfactory(hp):
        gp = lgp.GP(lgp.ExpQuad() * hp['sdev']**2)
        gp.addx(x, 'x')
        return gp

    truehp = gvar.sample(hp)
    truegp = gpfactory(truehp)
    trueprior = truegp.prior()

    def makeerr(bd, err):
        return gvar.BufferDict(bd, buf=np.full_like(bd.buf, err))

    data_noerr = gvar.sample(trueprior)
    error = makeerr(data_noerr, 0.1)
    zeroerror = makeerr(data_noerr, 0)
    zerocov = gvar.evalcov(gvar.gvar(data_noerr, zeroerror))
    data_err = gvar.make_fake_data(gvar.gvar(data_noerr, error))

    datas = [
        [
            data_noerr,
            gvar.gvar(data_noerr),
            (data_noerr, ),
            (data_noerr, zerocov),
            lambda _: data_noerr,
            lambda _: gvar.gvar(data_noerr),
            lambda _: (data_noerr, ),
            lambda _: (data_noerr, zerocov),
        ],
        [
            data_err,
            (data_err, ),
            (gvar.mean(data_err), gvar.evalcov(data_err)),
            lambda _: data_err,
            lambda _: (data_err, ),
            lambda _: (gvar.mean(data_err), gvar.evalcov(data_err)),
        ],
    ]

    for datasets in datas:
        fits = []
        for data in datasets:
            fit = lgp.empbayes_fit(hp, gpfactory, data)
            fits.append(fit)

        p = fits[0].minresult.x
        for fit in fits[1:]:
            np.testing.assert_allclose(fit.minresult.x, p, atol=1e-6)
Exemplo n.º 30
0
 def buildprior(self, prior, mopt=None, extend=False):
     nprior = gv.BufferDict()
     if mopt is None:
         for k in [self.a, self.b]:
             k = gv.dictkey(prior, k)
             nprior[k] = prior[k]
     else:
         k = gv.dictkey(prior, self.a)
         nprior[k] = prior[k]
     self.mopt = mopt
     # use self.mopt to marginalize fitfcn
     return nprior