Exemplo n.º 1
0
    def test_ravgdict_wgtd(self):
        " weighted RAvgDict "
        # scalar
        mean_s = np.random.uniform(-10., 10.)
        xbig_s = gv.gvar(mean_s, 1.)
        xsmall_s = gv.gvar(mean_s, 0.1)
        # array
        mean_a = np.random.uniform(-10., 10., (2, ))
        cov_a = np.array([[1., 0.5], [0.5, 2.]])
        invcov = np.linalg.inv(cov_a)
        N = 30
        xbig_a = gv.gvar(mean_a, cov_a)
        rbig_a = gv.raniter(xbig_a, N)
        xsmall_a = gv.gvar(mean_a, cov_a / 10.)
        rsmall_a = gv.raniter(xsmall_a, N)

        ravg = RAvgDict(dict(scalar=1.0, array=[[2., 3.]]))
        for rb, rw in zip(rbig_a, rsmall_a):
            ravg.add(
                dict(scalar=gv.gvar(xbig_s(), 1.), array=[gv.gvar(rb, cov_a)]))
            ravg.add(
                dict(scalar=gv.gvar(xsmall_s(), 0.1),
                     array=[gv.gvar(rw, cov_a / 10.)]))
        np_assert_allclose(
            ravg['scalar'].sdev,
            1 / (N * (1. / xbig_s.var + 1. / xsmall_s.var))**0.5)
        self.assertLess(abs(ravg['scalar'].mean - mean_s),
                        5 * ravg['scalar'].sdev)
        np_assert_allclose(gv.evalcov(ravg['array'].flat),
                           cov_a / (10. + 1.) / N)
        for i in range(2):
            self.assertLess(abs(mean_a[i] - ravg['array'][0, i].mean),
                            5 * ravg['array'][0, i].sdev)
        self.assertEqual(ravg.dof, 4 * N - 2 + 2 * N - 1)
        self.assertGreater(ravg.Q, 0.5e-3)
Exemplo n.º 2
0
 def setUp(self):
     ## prior 
     self.prior = gv.BufferDict()
     nt = NTERM
     self.prior['a'] = gv.gvar(nt*["0.50(1)"])
     self.prior['ao'] = gv.gvar(nt*["0.250(5)"])
     self.prior['logb'] = gv.log(gv.gvar(nt*["0.60(1)"]))
     self.prior['bo'] = gv.gvar(nt*["0.30(1)"])
     self.prior['logdEa'] = gv.log(gv.gvar(nt*["0.50(1)"]))
     self.prior['logdEao'] = gv.log(gv.gvar(nt*["0.60(1)"]))
     self.prior['logdEb'] = gv.log(gv.gvar(nt*["0.45(1)"]))
     self.prior['logdEbo'] = gv.log(gv.gvar(nt*["0.65(1)"]))
     self.prior['Vnn'] = gv.gvar(nt*[nt*["2.00(1)"]])
     self.prior['Vno'] = gv.gvar(nt*[nt*["1.00(1)"]])
     self.prior['Von'] = gv.gvar(nt*[nt*["1.00(1)"]])
     self.prior['Voo'] = gv.gvar(nt*[nt*["2.00(1)"]])
     nsym = int(nt*(nt+1)/2)
     self.prior['Vnn_sym'] = gv.gvar(nsym*["2.00(1)"])
     self.prior['Voo_sym'] = gv.gvar(nsym*["2.00(1)"])
     
     ## actual parameters, time ranges, corr counter 
     self.p = next(gv.raniter(self.prior))
     for x in ['b', 'dEa', 'dEao', 'dEb', 'dEbo']:
         self.p[x] = gv.exp(self.p['log' + x])
     self.T = 18.
     self.tdata = np.arange(self.T)
     self.tfit = self.tdata[1:]
     self.ncorr = 0
     
     self.ran = gv.gvar(0,1)
Exemplo n.º 3
0
 def test_simulation(self):
     """ CorrFitter.simulated_data_iter """
     models = [ self.mkcorr(a="a", b="a", dE="dE", tp=None) ]
     fitter = self.dofit(models)
     data = self.data
     diter = gv.BufferDict()
     k = list(data.keys())[0]
     # make n config dataset corresponding to data
     n = 100
     diter = gv.raniter(
         g = gv.gvar(gv.mean(self.data[k]), gv.evalcov(self.data[k]) * n),
         n = n
         )
     dataset = gv.dataset.Dataset()
     for d in diter:
         dataset.append(k, d)
     pexact = fitter.fit.pmean
     covexact = gv.evalcov(gv.dataset.avg_data(dataset)[k])
     for sdata in fitter.simulated_data_iter(n=2, dataset=dataset):
         sfit = fitter.lsqfit(
             data=sdata, prior=self.prior, p0=pexact, print_fit=False
             )
         diff = dict()
         for i in ['a', 'logdE']:
             diff[i] = sfit.p[i][0] - pexact[i][0]
         c2 = gv.chi2(diff)
         self.assertLess(c2/c2.dof, 15.)
         self.assert_arraysclose(gv.evalcov(sdata[k]), covexact)
Exemplo n.º 4
0
 def test_ravgdict_unwgtd(self):
     " unweighted RAvgDict "
     # scalar
     mean_s = np.random.uniform(-10., 10.)
     sdev_s = 0.1
     x_s = gv.gvar(mean_s, sdev_s)
     # array
     mean_a = np.random.uniform(-10., 10., (2, ))
     cov_a = np.array([[1., 0.5], [0.5, 2.]]) / 10.
     x_a = gv.gvar(mean_a, cov_a)
     N = 30
     r_a = gv.raniter(x_a, N)
     ravg = RAvgDict(dict(scalar=1.0, array=[[2., 3.]]), weighted=False)
     for ri in r_a:
         ravg.add(
             dict(scalar=gv.gvar(x_s(), sdev_s), array=[gv.gvar(ri,
                                                                cov_a)]))
     np_assert_allclose(ravg['scalar'].sdev, x_s.sdev / (N**0.5))
     self.assertLess(abs(ravg['scalar'].mean - mean_s),
                     5 * ravg['scalar'].sdev)
     np_assert_allclose(gv.evalcov(ravg['array'].flat), cov_a / N)
     for i in range(2):
         self.assertLess(abs(mean_a[i] - ravg['array'][0, i].mean),
                         5 * ravg['array'][0, i].sdev)
     self.assertEqual(ravg.dof, 2 * N - 2 + N - 1)
     self.assertGreater(ravg.Q, 1e-3)
Exemplo n.º 5
0
    def setUp(self):
        ## prior
        self.prior = gv.BufferDict()
        nt = NTERM
        self.prior["a"] = gv.gvar(nt * ["0.50(1)"])
        self.prior["ao"] = gv.gvar(nt * ["0.250(5)"])
        self.prior["log(b)"] = gv.log(gv.gvar(nt * ["0.60(1)"]))
        self.prior["bo"] = gv.gvar(nt * ["0.30(1)"])
        self.prior["log(dEa)"] = gv.log(gv.gvar(nt * ["0.50(1)"]))
        self.prior["log(dEao)"] = gv.log(gv.gvar(nt * ["0.60(1)"]))
        self.prior["log(dEb)"] = gv.log(gv.gvar(nt * ["0.45(1)"]))
        self.prior["log(dEbo)"] = gv.log(gv.gvar(nt * ["0.65(1)"]))
        self.prior["Vnn"] = gv.gvar(nt * [nt * ["2.00(1)"]])
        self.prior["Vno"] = gv.gvar(nt * [nt * ["1.00(1)"]])
        self.prior["Von"] = gv.gvar(nt * [nt * ["1.00(1)"]])
        self.prior["Voo"] = gv.gvar(nt * [nt * ["2.00(1)"]])
        nsym = int(nt * (nt + 1) / 2)
        self.prior["Vnn_sym"] = gv.gvar(nsym * ["2.00(1)"])
        self.prior["Voo_sym"] = gv.gvar(nsym * ["2.00(1)"])

        ## actual parameters, time ranges, corr counter
        self.p = next(gv.raniter(self.prior))
        for x in ["b", "dEa", "dEao", "dEb", "dEbo"]:
            self.p[x] = gv.exp(self.p["log(" + x + ")"])
        self.T = 18.0
        self.tdata = np.arange(self.T)
        self.tfit = self.tdata[1:]
        self.ncorr = 0

        self.ran = gv.gvar(0, 1)
Exemplo n.º 6
0
 def test_ravgdict_unwgtd(self):
     " unweighted RAvgDict "
     # scalar
     mean_s = np.random.uniform(-10., 10.)
     sdev_s = 0.1
     x_s = gv.gvar(mean_s, sdev_s)
     # array
     mean_a = np.random.uniform(-10., 10., (2,))
     cov_a = np.array([[1., 0.5], [0.5, 2.]]) / 10.
     x_a = gv.gvar(mean_a, cov_a)
     N = 30
     r_a = gv.raniter(x_a, N)
     ravg = RAvgDict(dict(scalar=1.0, array=[[2., 3.]]), weighted=False)
     for ri in r_a:
         ravg.add(dict(
             scalar=gv.gvar(x_s(), sdev_s), array=[gv.gvar(ri, cov_a)]
             ))
     np_assert_allclose( ravg['scalar'].sdev, x_s.sdev / (N ** 0.5))
     self.assertLess(
         abs(ravg['scalar'].mean - mean_s),
         5 * ravg['scalar'].sdev
         )
     np_assert_allclose(gv.evalcov(ravg['array'].flat), cov_a / N)
     for i in range(2):
         self.assertLess(
             abs(mean_a[i] - ravg['array'][0, i].mean),
             5 * ravg['array'][0, i].sdev
             )
     self.assertEqual(ravg.dof, 2 * N - 2 + N - 1)
     self.assertGreater(ravg.Q, 1e-3)
Exemplo n.º 7
0
 def test_ravgarray_wgtd(self):
     " weighted RAvgArray "
     if not have_gvar:
         return
     mean = np.random.uniform(-10., 10., (2, ))
     cov = np.array([[1., 0.5], [0.5, 2.]])
     invcov = np.linalg.inv(cov)
     N = 30
     xbig = gv.gvar(mean, cov)
     rbig = gv.raniter(xbig, N)
     xsmall = gv.gvar(mean, cov / 10.)
     rsmall = gv.raniter(xsmall, N)
     ravg = RAvgArray(2)
     for rb, rs in zip(rbig, rsmall):
         ravg.add(gv.gvar(rb, cov))
         ravg.add(gv.gvar(rs, cov / 10.))
     np_assert_allclose(gv.evalcov(ravg), cov / (10. + 1.) / N)
     for i in range(2):
         self.assertLess(abs(mean[i] - ravg[i].mean), 5 * ravg[i].sdev)
     self.assertEqual(ravg.dof, 4 * N - 2)
     self.assertGreater(ravg.Q, 1e-3)
Exemplo n.º 8
0
 def test_ravgarray_wgtd(self):
     " weighted RAvgArray "
     # if not have_gvar:
     #     return
     mean = np.random.uniform(-10., 10., (2,))
     cov = np.array([[1., 0.5], [0.5, 2.]])
     invcov = np.linalg.inv(cov)
     N = 30
     xbig = gv.gvar(mean, cov)
     rbig = gv.raniter(xbig, N)
     xsmall = gv.gvar(mean, cov / 10.)
     rsmall = gv.raniter(xsmall, N)
     ravg = RAvgArray((1, 2))
     for rb, rs in zip(rbig, rsmall):
         ravg.add([gv.gvar(rb, cov)])
         ravg.add([gv.gvar(rs, cov / 10.)])
     np_assert_allclose(gv.evalcov(ravg.flat), cov / (10. + 1.) / N)
     for i in range(2):
         self.assertLess(abs(mean[i] - ravg[0, i].mean), 5 * ravg[0, i].sdev)
     self.assertEqual(ravg.dof, 4 * N - 2)
     self.assertGreater(ravg.Q, 1e-3)
Exemplo n.º 9
0
    def test_ravgdict_wgtd(self):
        " weighted RAvgDict "
        # scalar
        mean_s = np.random.uniform(-10., 10.)
        xbig_s = gv.gvar(mean_s, 1.)
        xsmall_s = gv.gvar(mean_s, 0.1)
        # array
        mean_a = np.random.uniform(-10., 10., (2,))
        cov_a = np.array([[1., 0.5], [0.5, 2.]])
        invcov = np.linalg.inv(cov_a)
        N = 30
        xbig_a = gv.gvar(mean_a, cov_a)
        rbig_a = gv.raniter(xbig_a, N)
        xsmall_a = gv.gvar(mean_a, cov_a / 10.)
        rsmall_a = gv.raniter(xsmall_a, N)

        ravg = RAvgDict(dict(scalar=1.0, array=[[2., 3.]]))
        for rb, rw in zip(rbig_a, rsmall_a):
            ravg.add(dict(
                scalar=gv.gvar(xbig_s(), 1.), array=[gv.gvar(rb, cov_a)]
                ))
            ravg.add(dict(
                scalar=gv.gvar(xsmall_s(), 0.1),
                array=[gv.gvar(rw, cov_a / 10.)]
                ))
        np_assert_allclose(
            ravg['scalar'].sdev,
            1/ (N * ( 1. / xbig_s.var + 1. / xsmall_s.var)) ** 0.5
            )
        self.assertLess(
            abs(ravg['scalar'].mean - mean_s), 5 * ravg['scalar'].sdev
            )
        np_assert_allclose(gv.evalcov(ravg['array'].flat), cov_a / (10. + 1.) / N)
        for i in range(2):
            self.assertLess(
                abs(mean_a[i] - ravg['array'][0, i].mean),
                5 * ravg['array'][0, i].sdev
                )
        self.assertEqual(ravg.dof, 4 * N - 2 + 2 * N - 1)
        self.assertGreater(ravg.Q, 1e-3)
Exemplo n.º 10
0
 def test_ravgarray_unwgtd(self):
     " unweighted RAvgArray "
     # if not have_gvar:
     #     return
     mean = np.random.uniform(-10., 10., (2,))
     cov = np.array([[1., 0.5], [0.5, 2.]]) / 10.
     N = 30
     x = gv.gvar(mean, cov)
     r = gv.raniter(x, N)
     ravg = RAvgArray((1, 2), weighted=False)
     for ri in r:
         ravg.add([gv.gvar(ri, cov)])
     np_assert_allclose(gv.evalcov(ravg.flat), cov / N)
     for i in range(2):
         self.assertLess(abs(mean[i] - ravg[0, i].mean), 5 * ravg[0, i].sdev)
     self.assertEqual(ravg.dof, 2 * N - 2)
     self.assertGreater(ravg.Q, 1e-3)
Exemplo n.º 11
0
 def test_ravgarray_unwgtd(self):
     " unweighted RAvgArray "
     # if not have_gvar:
     #     return
     mean = np.random.uniform(-10., 10., (2,))
     cov = np.array([[1., 0.5], [0.5, 2.]]) / 10.
     N = 30
     x = gv.gvar(mean, cov)
     r = gv.raniter(x, N)
     ravg = RAvgArray((1, 2), weighted=False)
     for ri in r:
         ravg.add([gv.gvar(ri, cov)])
     np_assert_allclose(gv.evalcov(ravg.flat), cov / N)
     for i in range(2):
         self.assertLess(abs(mean[i] - ravg[0, i].mean), 5 * ravg[0, i].sdev)
     self.assertEqual(ravg.dof, 2 * N - 2)
     self.assertGreater(ravg.Q, 1e-3)
Exemplo n.º 12
0
    def setUp(self):
        ## prior
        self.prior = gv.BufferDict()
        nt = NTERM
        self.prior["a"] = gv.gvar(nt * ["0.50(1)"])
        self.prior["ao"] = gv.gvar(nt * ["0.250(5)"])
        self.prior["log(b)"] = gv.log(gv.gvar(nt * ["0.60(1)"]))
        self.prior["bo"] = gv.gvar(nt * ["0.30(1)"])
        self.prior["log(dE)"] = gv.log(gv.gvar(nt * ["0.50(1)"]))
        self.prior["log(dEo)"] = gv.log(gv.gvar(nt * ["0.60(1)"]))

        ## actual parameters, time ranges, corr counter
        self.p = gv.ExtendedDict(next(gv.raniter(self.prior)))
        self.tp = 10.0
        self.tdata = np.arange(self.tp)
        self.tfit = self.tdata[1:]
        self.ncorr = 0

        self.ran = gv.gvar(0, 1)
Exemplo n.º 13
0
 def setUp(self):
     ## prior 
     self.prior = gv.BufferDict()
     nt = NTERM
     self.prior['a'] = gv.gvar(nt*["0.50(1)"])
     self.prior['ao'] = gv.gvar(nt*["0.250(5)"])
     self.prior['logb'] = gv.log(gv.gvar(nt*["0.60(1)"]))
     self.prior['bo'] = gv.gvar(nt*["0.30(1)"])
     self.prior['logdE'] = gv.log(gv.gvar(nt*["0.50(1)"]))
     self.prior['logdEo'] = gv.log(gv.gvar(nt*["0.60(1)"]))
     
     ## actual parameters, time ranges, corr counter 
     self.p = next(gv.raniter(self.prior))
     for x in ['b', 'dE', 'dEo']:
         self.p[x] = gv.exp(self.p['log' + x])
     self.tp = 10.
     self.tdata = np.arange(self.tp)
     self.tfit = self.tdata[1:]
     self.ncorr = 0
     
     self.ran = gv.gvar(0,1)
Exemplo n.º 14
0
 def samples(self, n):
     dim = self.r.shape[1]
     H = linalg.hilbert(dim) / (2 * self.s**2)
     x = gv.gvar(self.r[0], H)
     print(gv.evalcorr(x))
     return np.array([rx for rx in gv.raniter(x, n)])
Exemplo n.º 15
0
    def test_svd_diagnosis(self):
        " svd_diagnosis "
        # random correlated data (10x10 correlation matrix)
        chebval = np.polynomial.chebyshev.chebval
        gv.ranseed(1)
        x = np.linspace(-.9, .9, 10)
        c = gv.raniter(gv.gvar(len(x) * ['0(1)']))

        # small dataset (big svdcut)
        dset = []
        for n in range(15):
            dset.append(chebval(x, next(c)))
        gv.ranseed(2)
        s = gv.dataset.svd_diagnosis(dset)
        self.assertGreater(s.svdcut, 0.01)
        # print(s.svdcut)
        # s.plot_ratio(show=True)
        # test with dictionary
        gv.ranseed(2)
        sd = gv.dataset.svd_diagnosis(dict(a=dset))
        self.assertEqual(s.svdcut, sd.svdcut)

        # large dataset (small or no svdcut)
        dset = []
        for n in range(100):
            dset.append(chebval(x, next(c)))
        gv.ranseed(3)
        s = svd_diagnosis(dset)
        self.assertGreater(0.01, s.svdcut)
        # print(s.svdcut)
        # s.plot_ratio(show=True)

        # with models (only if lsqfit installed)
        if lsqfit is None:
            return

        class Linear(lsqfit.MultiFitterModel):
            def __init__(self, datatag, x, intercept, slope):
                super(Linear, self).__init__(datatag)
                self.x = np.array(x)
                self.intercept = intercept
                self.slope = slope

            def fitfcn(self, p):
                return p[self.intercept] + p[self.slope] * self.x

            def buildprior(self, prior, mopt=None):
                " Extract the model's parameters from prior. "
                newprior = {}
                newprior[self.intercept] = prior[self.intercept]
                newprior[self.slope] = prior[self.slope]
                return newprior

            def builddata(self, data):
                " Extract the model's fit data from data. "
                return data[self.datatag]

            def builddataset(self, dset):
                " Extract the model's fit data from a dataset. "
                return dset[self.datatag]

        x = np.array([1., 2., 3., 4., 5., 6., 7., 8., 9., 10.])
        y_samples = [
            [
                2.8409, 4.8393, 6.8403, 8.8377, 10.8356, 12.8389, 14.8356,
                16.8362, 18.8351, 20.8341
            ],
            [
                2.8639, 4.8612, 6.8597, 8.8559, 10.8537, 12.8525, 14.8498,
                16.8487, 18.8460, 20.8447
            ],
            [
                3.1048, 5.1072, 7.1071, 9.1076, 11.1090, 13.1107, 15.1113,
                17.1134, 19.1145, 21.1163
            ],
            [
                3.0710, 5.0696, 7.0708, 9.0705, 11.0694, 13.0681, 15.0693,
                17.0695, 19.0667, 21.0678
            ],
            [
                3.0241, 5.0223, 7.0198, 9.0204, 11.0191, 13.0193, 15.0198,
                17.0163, 19.0154, 21.0155
            ],
            [
                2.9719, 4.9700, 6.9709, 8.9706, 10.9707, 12.9705, 14.9699,
                16.9686, 18.9676, 20.9686
            ],
            [
                3.0688, 5.0709, 7.0724, 9.0730, 11.0749, 13.0776, 15.0790,
                17.0800, 19.0794, 21.0795
            ],
            [
                3.1471, 5.1468, 7.1452, 9.1451, 11.1429, 13.1445, 15.1450,
                17.1435, 19.1425, 21.1432
            ],
            [
                3.0233, 5.0233, 7.0225, 9.0224, 11.0225, 13.0216, 15.0224,
                17.0217, 19.0208, 21.0222
            ],
            [
                2.8797, 4.8792, 6.8803, 8.8794, 10.8800, 12.8797, 14.8801,
                16.8797, 18.8803, 20.8812
            ],
            [
                3.0388, 5.0407, 7.0409, 9.0439, 11.0443, 13.0459, 15.0455,
                17.0479, 19.0493, 21.0505
            ],
            [
                3.1353, 5.1368, 7.1376, 9.1367, 11.1360, 13.1377, 15.1369,
                17.1400, 19.1384, 21.1396
            ],
            [
                3.0051, 5.0063, 7.0022, 9.0052, 11.0040, 13.0033, 15.0007,
                16.9989, 18.9994, 20.9995
            ],
            [
                3.0221, 5.0197, 7.0193, 9.0183, 11.0179, 13.0184, 15.0164,
                17.0177, 19.0159, 21.0155
            ],
            [
                3.0188, 5.0200, 7.0184, 9.0183, 11.0189, 13.0188, 15.0191,
                17.0183, 19.0177, 21.0186
            ],
        ]
        dset = dict(y=y_samples)
        model = Linear('y', x, intercept='y0', slope='s')
        prior = gv.gvar(dict(y0='1(1)', s='2(2)'))
        gv.ranseed(4)
        s = svd_diagnosis(dset, models=[model])
        self.assertGreater(s.nmod, 0)
        self.assertGreater(s.svdcut, s.val[s.nmod - 1] / s.val[-1])
        self.assertGreater(s.val[s.nmod] / s.val[-1], s.svdcut)
        return
        # skip rest
        fitter = lsqfit.MultiFitter(models=[model])
        fit = fitter.lsqfit(prior=prior, svdcut=s.svdcut, data=s.avgdata)
        print(fit)
        # s.avgdata = gv.gvar(gv.mean(s.avgdata), gv.sdev(s.avgdata))
        fit = fitter.lsqfit(prior=prior, data=s.avgdata)
        print(fit)
        s.plot_ratio(show=True)
Exemplo n.º 16
0
                    line = ''.ljust(15) + nextfield
                else:
                    line = line + nextfield
            table.append(lines + line + '\n')
    return '\n'.join(table)


##

if __name__ == '__main__':
    import gvar
    gvar.ranseed((1950, 1))
    r1 = gvar.gvar(8., 1.)
    r2 = gvar.gvar([-10., -9.], [2., 3.])
    r3 = gvar.gvar([[0., 1.], [2., 3.]], [[1., 2.], [3., 4.]])
    r3_iter = gvar.raniter(r3)
    r2_iter = gvar.raniter(r2)
    N = 1001
    d = Dataset(bstrap=False)
    for x in range(N):
        d.append('x', r3_iter.next())
    for x in range(N):
        d.append('y', r2_iter.next())
    d2 = Dataset(bstrap=False)
    for x in range(N):
        d2.append('z', r1())
    d.copy(d2)
    med = d.gdev()
    for k in med:
        print(k, med[k])
    avg = d.avg()
Exemplo n.º 17
0
xpred = np.linspace(-15, 25, 200)
y = np.sin(xdata)

print('make GP...')
gp = lgp.GP(lgp.ExpQuad(scale=3))
gp.addx(xdata, 'data')
gp.addx(xpred, 'pred')
gp.addx(xpred, 'deriv', 1)

print('fit...')
u = gp.pred({'data': y}, ['pred', 'deriv'], fromdata=True)

print('figure...')
fig, ax = plt.subplots(num='b', clear=True)

colors = dict()
for label in u:
    m = gvar.mean(u[label])
    s = gvar.sdev(u[label])
    patch = ax.fill_between(xpred, m - s, m + s, label=label, alpha=0.5)
    colors[label] = patch.get_facecolor()[0]

print('samples...')
for sample in gvar.raniter(u, 1):
    for label in u:
        ax.plot(xpred, sample[label], '-', color=colors[label])
ax.plot(xdata, y, 'k.', label='data')
ax.legend(loc='best')

fig.show()
Exemplo n.º 18
0
kernel = lgp.where(lambda comp: comp == 'short', kshort, klong, dim='comp')
gp = lgp.GP(kernel)


def addcomps(key, time):
    gp.addx(makex(time, 'short'), key + 'short')
    gp.addx(makex(time, 'long'), key + 'long')
    gp.addtransf({key + 'short': 0.3, key + 'long': 1}, key)


addcomps('data', time)
addcomps('pred', time_pred)

print('generate data...')
prior = gp.prior(['data', 'datashort', 'datalong'])
data = next(gvar.raniter(prior))

print('prediction...')
pred = gp.predfromdata({'data': data['data']},
                       ['pred', 'predshort', 'predlong'])

print('sample posterior...')
mean = gvar.mean(pred)
sdev = gvar.sdev(pred)
samples = list(gvar.raniter(pred, 1))

print('figure...')
fig, axs = plt.subplots(3, 1, num='w', clear=True, figsize=[6, 7])

for ax, comp in zip(axs, ['', 'short', 'long']):
    key = 'pred' + comp
Exemplo n.º 19
0
gp = lgp.GP(lgp.Zeta(nu=2.5),
            checkpos=False)  # TODO is this checkpos necessary
gp.addkernelop('fourier', True, 'F')
x = np.linspace(0, 1, 100)
gp.addx(x, 'x')
gp.addx(1, 's1', proc='F')
gp.addx(2, 'c1', proc='F')

comb = [
    [0, 0],
    [1, 0],
    [0, 1],
    [1, 1],
]

fig, ax = plt.subplots(num='fourier', clear=True)

for s, c in comb:
    y = gp.predfromdata(dict(s1=s, c1=c), 'x')
    m = gvar.mean(y)
    u = gvar.sdev(y)
    pc = ax.fill_between(x, m - u, m + u, alpha=0.5, label=f's{s}c{c}')
    color = pc.get_facecolor()
    for sample in gvar.raniter(y, 3):
        ax.plot(x, sample, color=color)

ax.legend()

fig.show()
Exemplo n.º 20
0
gp.addx(xdata, 'xdata', proc='f')
gp.addtransf({'xdata': M}, 'data', axes=2)

gp.addx(xinteg, 'xinteg', proc='primitive of f')
gp.addtransf({'xinteg': suminteg}, 'suminteg', axes=2)

gp.addx(xinteg, 'xintegx', proc='primitive of xf(x)')
gp.addtransf({'xintegx': suminteg}, 'sumintegx', axes=2)

#### GENERATE FAKE DATA ####

prior = gp.predfromdata({
    'suminteg': 1,
    'sumintegx': 1,
}, ['data', 'xdata'])
priorsample = next(gvar.raniter(prior))

datamean = priorsample['data']
dataerr = np.full_like(datamean, 1)
datamean = datamean + dataerr * np.random.randn(*dataerr.shape)
data = gvar.gvar(datamean, dataerr)

# check the integral is one with trapezoid rule
x = xdata['x']
y = priorsample['xdata']
checksum = np.sum((y[:, 1:] + y[:, :-1]) / 2 * np.diff(x, axis=1))
print('sum_i int dx   f_i(x) =', checksum)
checksum = np.sum(((y * x)[:, 1:] + (y * x)[:, :-1]) / 2 * np.diff(x, axis=1))
print('sum_i int dx x f_i(x) =', checksum)

#### FIT ####
Exemplo n.º 21
0
print('fit...')
u = gp.predfromdata({
    'data': y[0::2],
    'dataderiv': y[1::2]
}, ['pred', 'predderiv'])

print('figure...')
fig, ax = plt.subplots(num='e', clear=True)

colors = dict()
for label in u:
    m = gvar.mean(u[label])
    s = gvar.sdev(u[label])
    patch = ax.fill_between(xpred, m - s, m + s, label=label, alpha=0.5)
    colors[label] = patch.get_facecolor()[0]

print('samples...')
for sample in gvar.raniter(u, 30):
    for label in u:
        ax.plot(xpred, sample[label], '-', color=colors[label])

for deriv, marker in (0, '+'), (1, 'x'):
    ax.plot(xdata[deriv::2],
            y[deriv::2],
            f'k{marker}',
            label=f'data deriv {deriv}')
ax.legend(loc='best')

fig.show()
Exemplo n.º 22
0
                    lines = lines + line + '\n'
                    line = ''.ljust(15) + nextfield
                else:
                    line = line + nextfield
            table.append(lines + line +'\n')
    return '\n'.join(table)
##    
    

if __name__ == '__main__':
    import gvar
    gvar.ranseed((1950,1))
    r1 = gvar.gvar(8.,1.)
    r2 = gvar.gvar([-10.,-9.],[2.,3.])
    r3 = gvar.gvar([[0.,1.],[2.,3.]],[[1.,2.],[3.,4.]])
    r3_iter = gvar.raniter(r3)
    r2_iter = gvar.raniter(r2)
    N = 1001
    d = Dataset(bstrap=False)
    for x in range(N):
        d.append('x',r3_iter.next())
    for x in range(N):
        d.append('y',r2_iter.next())
    d2 = Dataset(bstrap=False)
    for x in range(N):
        d2.append('z',r1())
    d.copy(d2)
    med = d.gdev()
    for k in med:
        print( k,med[k])
    avg = d.avg()
Exemplo n.º 23
0
    gp.addx(xinteg, 'xmomrule', proc='primitive of xf(x)')
    gp.addtransf({'xmomrule': suminteg}, 'momrule', axes=2)
    
    # quark sum rules
    for quark in 'ducs':
        idx = indices[quark]
        label = f'{quark}{quark}bar' # the one appearing in `constraints`
        xlabel = f'x{label}'
        gp.addx(xinteg[idx], xlabel, proc='primitive')
        gp.addtransf({xlabel: suminteg[idx] * qdiff}, label, axes=2)

    return gp

#### GENERATE FAKE DATA ####

hpsample = next(gvar.raniter(hyperprior))
gp = makegp(hpsample)
prior = gp.predfromdata(constraints, ['data', 'xdata'])
priorsample = next(gvar.raniter(prior))

datamean = priorsample['data']
dataerr = np.full_like(datamean, 1)
datamean = datamean + dataerr * np.random.randn(*dataerr.shape)
data = gvar.gvar(datamean, dataerr)

# check sum rules approximately with trapezoid rule
def check_integrals(x, y):
    checksum = np.sum(((y * x)[:, 1:] + (y * x)[:, :-1]) / 2 * np.diff(x, axis=1))
    print('sum_i int dx x f_i(x) =', checksum)
    for q in 'ducs':
        idx = indices[q]
Exemplo n.º 24
0
pred = gp.predfromdata(datadict, [0, 1])

fig, ax = plt.subplots(num='u', clear=True)

colors = dict()
for deriv in pred:
    m = gvar.mean(pred[deriv])
    s = gvar.sdev(pred[deriv])
    polys = ax.fill_between(time_pred,
                            m - s,
                            m + s,
                            alpha=0.5,
                            label=f'deriv {deriv}')
    colors[deriv] = polys.get_facecolor()[0]

for sample in gvar.raniter(pred, 3):
    for deriv in pred:
        ax.plot(time_pred, sample[deriv], color=colors[deriv])

ax.errorbar(time,
            gvar.mean(data),
            yerr=gvar.sdev(data),
            fmt='.',
            color=colors[data_deriv],
            alpha=1,
            label='data')

ax.legend(loc='best')
ax.set_xlabel('time')

fig.show()
Exemplo n.º 25
0
def fcn(params):

    xdata = params['xdata']
    # data2 = np.einsum('dfxy,fx,fy->d', M2, xdata, xdata)
    xdata2 = xdata[:, None, :nx2] * xdata[:, :nx2, None]
    data2 = np.tensordot(M2, xdata2, axes=3)

    return dict(data=params['data'], data2=data2)


params_prior = gp.predfromdata(constraints, ['xdata', 'data'])

#### GENERATE FAKE DATA ####

priorsample = next(gvar.raniter(params_prior))
fcnsample = fcn(priorsample)

datamean = gvar.BufferDict({
    'data': fcnsample['data'],
    'data2': fcnsample['data2'],
})
dataerr = gvar.BufferDict({
    'data': np.full(ndata, 1),
    'data2': np.full(ndata2, 100),
})
datamean.buf += dataerr.buf * np.random.randn(*dataerr.buf.shape)
data = gvar.gvar(datamean, dataerr)


# check sum rules approximately with trapezoid rule
Exemplo n.º 26
0
def main():
    gv.ranseed(SEED)
    y = exact(NSAMPLE)
    ysamples = [yi for yi in gv.raniter(y, n=NSAMPLE)]
    # above code (don't comment it out) generates the following
    ysamples = [
        [0.0092441016, 0.0068974057, 0.0051480509, 0.0038431422, 0.0028690492],
        [0.0092477405, 0.0069030565, 0.0051531383, 0.0038455855, 0.0028700587],
        [0.0092558569, 0.0069102437, 0.0051596569, 0.0038514537, 0.0028749153],
        [0.0092294581, 0.0068865156, 0.0051395262, 0.003835656, 0.0028630454],
        [0.009240534, 0.0068961523, 0.0051480046, 0.0038424661, 0.0028675632],
    ]
    dstr = '['
    for yi in ysamples:
        dstr += ('[' + len(yi) * '{:10.8g},' + '],').format(*yi)
    dstr += ']'
    ysamples = eval(dstr)
    print(np.array(ysamples).tolist())
    s = gv.dataset.svd_diagnosis(ysamples)
    # s.plot_ratio(show=True)
    y = s.avgdata
    x = np.array([15., 16., 17., 18., 19.])

    def f(p):
        return p['a'] * gv.exp(-p['b'] * x)

    prior = gv.gvar(dict(a='0.75(5)', b='0.30(3)'))
    sys_stdout = sys.stdout

    sys.stdout = tee.tee(sys_stdout, open('eg10a.out', 'w'))
    fit = lsqfit.nonlinear_fit(data=y, fcn=f, prior=prior, svdcut=0.0)
    print(fit)

    sys.stdout = tee.tee(sys_stdout, open('eg10b.out', 'w'))
    fit = lsqfit.nonlinear_fit(data=y, fcn=f, prior=prior, svdcut=s.svdcut)
    print(fit)

    sys.stdout = tee.tee(sys_stdout, open('eg10c.out', 'w'))
    fit = lsqfit.nonlinear_fit(data=y,
                               fcn=f,
                               prior=prior,
                               svdcut=s.svdcut,
                               add_svdnoise=True)
    print(fit)

    sys.stdout = tee.tee(sys_stdout, open('eg10d.out', 'w'))
    yex = gv.gvar(gv.mean(y), gv.evalcov(exact(1.)))
    fit = lsqfit.nonlinear_fit(data=yex, fcn=f, prior=prior, svdcut=0)
    print(fit)
    # fit.plot_residuals().show()

    sys.stdout = tee.tee(sys_stdout, open('eg10e.out', 'w'))
    fit = lsqfit.nonlinear_fit(data=y, fcn=f, prior=prior, svdcut=s.svdcut)
    print(fit)
    print('\n================ Add noise to prior, SVD')
    noisyfit = lsqfit.nonlinear_fit(data=y,
                                    prior=prior,
                                    fcn=f,
                                    svdcut=s.svdcut,
                                    add_svdnoise=True,
                                    add_priornoise=True)
    print(noisyfit.format(True))
    # save figures
    fit.qqplot_residuals(plot=plt).savefig('eg10e1.png', bbox_inches='tight')
    plt.cla()
    noisyfit.qqplot_residuals(plot=plt).savefig('eg10e2.png',
                                                bbox_inches='tight')