Example #1
0
def solve(a, b):
    """ Find ``x`` such that ``a.dot(x) = b`` for matrix ``a``. 

    Args:
        a: Two-dimensional, square matrix/array of numbers 
            and/or :class:`gvar.GVar`\s.
        b: One-dimensional vector/array of numbers and/or 
            :class:`gvar.GVar`\s, or an array of such vectors.
            Requires ``b.shape[0] == a.shape[1]``.

    Returns:
        The solution ``x`` of ``a.dot(x) = b``, which is equivalent
        to ``inv(a).dot(b)``.

    Raises:
        ValueError: If ``a`` is not square and two-dimensional.
        ValueError: If shape of ``b`` does not match that of ``a``
            (that is ``b.shape[0] != a.shape[1]``).
    """
    amean = gvar.mean(a)
    if amean.ndim != 2 or amean.shape[0] != amean.shape[1]:
        raise ValueError('Bad matrix shape: ' + str(a.shape))
    bmean = gvar.mean(b)
    if bmean.shape[0] != a.shape[1]:
        raise ValueError(
            'Mismatch between shapes of a and b: {} {}'.format(a.shape, b.shape)
            )
    xmean = numpy.linalg.solve(amean, bmean)
    ainv = inv(a)
    return xmean + ainv.dot(b-bmean - (a-amean).dot(xmean))
Example #2
0
def main():
    gv.ranseed([2009,2010,2011,2012,2013]) # initialize random numbers (opt.)
    x,y = make_data()               # make fit data
    p0 = None                       # make larger fits go faster (opt.)
    sys_stdout = sys.stdout
    for nexp in range(3,6):
        prior = make_prior(nexp,x)
        fit = lsqfit.nonlinear_fit(data=y,fcn=f,prior=prior,p0=p0) # ,svdcut=SVDCUT)
        if fit.chi2/fit.dof<1.:
            p0 = fit.pmean          # starting point for next fit (opt.)
        fit.check_roundoff()
        if nexp == 4:
            sys.stdout = tee.tee(sys.stdout,open("eg2.out","w"))
        print '************************************* nexp =',nexp
        print fit                   # print the fit results
        E = fit.p['E']              # best-fit parameters
        a = fit.p['a']
        print 'E1/E0 =',E[1]/E[0],'  E2/E0 =',E[2]/E[0]
        print 'a1/a0 =',a[1]/a[0],'  a2/a0 =',a[2]/a[0]
        sys.stdout = sys_stdout
        print

    #
    if DO_BOOTSTRAP:
        Nbs = 10                                     # number of bootstrap copies
        outputs = {'E1/E0':[], 'E2/E0':[], 'a1/a0':[],'a2/a0':[],'E1':[],'a1':[]}   # results
        for bsfit in fit.bootstrap_iter(n=Nbs):
            E = bsfit.pmean['E']                     # best-fit parameters
            a = bsfit.pmean['a']
            outputs['E1/E0'].append(E[1]/E[0])       # accumulate results
            outputs['E2/E0'].append(E[2]/E[0])
            outputs['a1/a0'].append(a[1]/a[0])
            outputs['a2/a0'].append(a[2]/a[0])
            outputs['E1'].append(E[1])
            outputs['a1'].append(a[1])
            # print E[:2]
            # print a[:2]
            # print bsfit.chi2/bsfit.dof

        # extract means and standard deviations from the bootstrap output
        for k in outputs:
            outputs[k] = gv.gvar(np.mean(outputs[k]),np.std(outputs[k]))
        print 'Bootstrap results:'
        print 'E1/E0 =',outputs['E1/E0'],'  E2/E1 =',outputs['E2/E0']
        print 'a1/a0 =',outputs['a1/a0'],'  a2/a0 =',outputs['a2/a0']
        print 'E1 =',outputs['E1'],'  a1 =',outputs['a1']

    if DO_PLOT:
        print fit.format(100)                   # print the fit results
        import pylab as pp
        from gvar import mean,sdev
        fity = f(x,fit.pmean)
        ratio = y/fity
        pp.xlim(0,21)
        pp.xlabel('x')
        pp.ylabel('y/f(x,p)')
        pp.errorbar(x=gv.mean(x),y=gv.mean(ratio),yerr=gv.sdev(ratio),fmt='ob')
        pp.plot([0.0,21.0],[1.0,1.0])
        pp.show()
Example #3
0
 def test_svd(self):
     " EigenBasis.svd "
     tdata = [1, 2, 3, 4]
     G = self.make_G(tdata, keyfmt="{s1}{s2}", srcs="ab")
     basis = EigenBasis(data=G, keyfmt="{s1}{s2}", srcs="ab", t=2, tdata=tdata)
     Gsvd = basis.svd(G, svdcut=0.9)
     self.assertEqual(basis.svdn, 15)
     self.assertEqual(str(basis.svdcorrection), "0.000(30)")
     for k in G:
         np.testing.assert_allclose(gv.mean(G[k]), gv.mean(Gsvd[k]))
         self.assertTrue(np.all(gv.sdev(Gsvd[k]) > gv.sdev(G[k])))
Example #4
0
def make_plot(x, y, fit, ylabel='y(x)', xmax=1.0):
	if not MAKE_PLOTS:
		return
	plt.errorbar(x, gv.mean(y), gv.sdev(y), fmt='bo')
	x = np.arange(0., xmax, 0.01)
	yfit = f(x, fit.p)
	plt.plot(x, gv.mean(yfit), 'k--')
	yplus = gv.mean(yfit) + gv.sdev(yfit)
	yminus = gv.mean(yfit) - gv.sdev(yfit)
	plt.fill_between(x, yminus, yplus, color='0.8')
	plt.xlim(0,1)
	plt.ylim(0.3,1.9)
	plt.xlabel('x')
	plt.ylabel(ylabel)
	plt.show()
 def test_simulation(self):
     """ CorrFitter.simulated_data_iter """
     models = [ self.mkcorr(a="a", b="a", dE="dE", tp=None) ]
     fitter = self.dofit(models)
     data = self.data
     diter = gv.BufferDict()
     k = list(data.keys())[0]
     # make n config dataset corresponding to data
     n = 100
     diter = gv.raniter(
         g = gv.gvar(gv.mean(self.data[k]), gv.evalcov(self.data[k]) * n),
         n = n
         )
     dataset = gv.dataset.Dataset()
     for d in diter:
         dataset.append(k, d)
     pexact = fitter.fit.pmean
     covexact = gv.evalcov(gv.dataset.avg_data(dataset)[k])
     for sdata in fitter.simulated_data_iter(n=2, dataset=dataset):
         sfit = fitter.lsqfit(
             data=sdata, prior=self.prior, p0=pexact, print_fit=False
             )
         diff = dict()
         for i in ['a', 'logdE']:
             diff[i] = sfit.p[i][0] - pexact[i][0]
         c2 = gv.chi2(diff)
         self.assertLess(c2/c2.dof, 15.)
         self.assert_arraysclose(gv.evalcov(sdata[k]), covexact)
Example #6
0
def eigvalsh(a, eigvec=False):
    """ Eigenvalues of Hermitian matrix ``a``.

    Args:
        a: Two-dimensional, square matrix/array of numbers 
            and/or :class:`gvar.GVar`\s.
        eigvec (bool): If ``True``, method returns a tuple of arrays
            ``(val, vec)`` where the ``val[i]`` are the
            eigenvalues. Arrays ``vec[:, i]`` are the corresponding
            eigenvectors of ``a`` when one ignores uncertainties (that is, 
            they are eigenvectors of ``gvar.mean(a)``). Only ``val`` is 
            returned if ``eigvec=False`` (default).

    Returns: 
        Array of eigenvalues of matrix ``a`` if parameter 
        ``eigvec==False`` (default).  where the ``val[i]`` are the
        eigenvalues; otherwise it returns a tuple of arrays ``(val, vec)`` 
        where the ``val[i]`` are the eigenvalues. Arrays ``vec[:, i]`` are 
        the corresponding eigenvectors of ``a`` when one ignores 
        uncertainties (that is, they are eigenvectors of ``gvar.mean(a)``).

    Raises:
        ValueError: If matrix is not square and two-dimensional.
    """
    amean = gvar.mean(a)
    if amean.ndim != 2 or amean.shape[0] != amean.shape[1]:
        raise ValueError('Bad matrix shape: ' + str(a.shape))
    da = a - amean 
    val, vec = numpy.linalg.eigh(amean)
    val = val + [vec[:, i].dot(da.dot(vec[:, i])) for i in range(vec.shape[1])]
    return (val, vec) if eigvec else val
Example #7
0
def main():
    # pendulum data exhibits experimental error in ability to measure theta
    t = gv.gvar([ 
        '0.10(1)', '0.20(1)', '0.30(1)', '0.40(1)',  '0.50(1)', 
        '0.60(1)',  '0.70(1)',  '0.80(1)',  '0.90(1)', '1.00(1)'
        ])
    theta = gv.gvar([
        '1.477(79)', '0.791(79)', '-0.046(79)', '-0.852(79)', 
        '-1.523(79)', '-1.647(79)', '-1.216(79)', '-0.810(79)', 
        '0.185(79)', '0.832(79)'
        ])

    for t_n, theta_n in zip(t, theta):
        print("{}  {:>10}".format(t_n.fmt(2), theta_n.fmt(3)))
    # prior: assume experimental error in ability to specify theta(0)
    prior = gv.BufferDict()
    prior['g/l'] = gv.gvar('40(20)')
    prior['theta(0)'] = gv.gvar('1.571(50)')
    prior['t'] = t

    # fit function: use class Pendulum object to integrate pendulum motion
    def fitfcn(p, t=None):
        if t is None:
            t = p['t']
        pendulum = Pendulum(p['g/l'])
        return pendulum(p['theta(0)'], t)

    # do the fit and print results
    fit = lsqfit.nonlinear_fit(data=theta, prior=prior, fcn=fitfcn)
    sys.stdout = tee.tee(STDOUT, open('case-pendulum.out', 'w'))
    print(fit.format(maxline=True))
    sys.stdout = STDOUT
    print('fit/exact for (g/l) =', fit.p['g/l'] / (2*np.pi) ** 2)
    print('fit/exact for theta(0) =', fit.p['theta(0)'] / (np.pi / 2.))
    
    if MAKE_PLOT:
        # make figure (saved to file pendulum.pdf)
        plt.figure(figsize=(4,3))
        # start plot with data
        plt.errorbar(
            x=gv.mean(t), xerr=gv.sdev(t), y=gv.mean(theta), yerr=gv.sdev(theta),
            fmt='k.',
            )
        # use best-fit function to add smooth curve for 100 points
        t = np.linspace(0., 1.1, 100)
        th = fitfcn(fit.p, t)
        show_plot(t, th)
Example #8
0
 def test_eigvalsh(self):
     m = gv.gvar([['2.1(1)', '0(0)'], ['0(0)', '0.5(3)']])
     th = 0.92
     cth = numpy.cos(th)
     sth = numpy.sin(th)
     u = numpy.array([[cth, sth], [-sth, cth]])
     mrot = u.T.dot(m.dot(u))
     val =  linalg.eigvalsh(mrot)
     self.assertTrue(gv.equivalent(val[0], m[1, 1]))
     self.assertTrue(gv.equivalent(val[1], m[0, 0]))
     val, vec = linalg.eigvalsh(mrot, eigvec=True)
     np.testing.assert_allclose(
         gv.mean(mrot).dot(vec[:, 0]), val[0].mean * vec[:, 0]
         )
     np.testing.assert_allclose(
         gv.mean(mrot).dot(vec[:, 1]), val[1].mean * vec[:, 1]
         )
Example #9
0
 def test_apply(self):
     " EigenBasis EigenBasis.apply EigenBasis.unapply "
     for tdata in [[1.0, 2.0, 3.0, 4.0], [2.0, 4.0, 6.0, 8.0], [0, 1.0, 2.0]]:
         tdata = np.array(tdata)
         G = self.make_G(tdata, keyfmt="{s1}{s2}", srcs="ab")
         basis = EigenBasis(data=G, keyfmt="{s1}{s2}", srcs="ab", t=2, tdata=tdata)
         np.testing.assert_allclose(basis.E, self.E)
         newG = basis.apply(G, "{s1}{s2}")
         newG_mean = gv.mean(newG)
         np.testing.assert_allclose(newG_mean["00"], gv.exp(-self.E[0] * tdata))
         np.testing.assert_allclose(newG_mean["11"], gv.exp(-self.E[1] * tdata))
         np.testing.assert_allclose(newG_mean["01"], 0, atol=1e-10)
         np.testing.assert_allclose(newG_mean["10"], 0, atol=1e-10)
         oldG = basis.unapply(newG, "{s1}{s2}")
         for k in ["aa", "ab", "ba", "bb"]:
             np.testing.assert_allclose(gv.mean(oldG[k] - G[k]), 0, atol=1e-10)
             np.testing.assert_allclose(gv.sdev(oldG[k] - G[k]), 0, atol=1e-10)
Example #10
0
def main():
    gv.ranseed([2009,2010,2011,2012]) # initialize random numbers (opt.)
    x,y = make_data()               # make fit data
    p0 = None                       # make larger fits go faster (opt.)
    for nexp in range(3,8):
        print('************************************* nexp =',nexp)
        prior = make_prior(nexp)
        # eps = gv.gvar(1,1e-300)   # use svdcut to make it independent
        # prior['a'] *= eps
        # y *= eps
        fit = lsqfit.nonlinear_fit(data=(x,y),fcn=f,prior=prior, 
                                   p0=p0,svdcut=SVDCUT)
        print(fit)                  # print the fit results
        E = fit.p['E']              # best-fit parameters
        a = fit.p['a']
        print('E1/E0 =',(E[1]/E[0]).fmt(),'  E2/E0 =',(E[2]/E[0]).fmt())
        print('a1/a0 =',(a[1]/a[0]).fmt(),'  a2/a0 =',(a[2]/a[0]).fmt())
        print()
        if fit.chi2/fit.dof<1.:
            p0 = fit.pmean          # starting point for next fit (opt.)
    
    if DO_BOOTSTRAP:
        Nbs = 10                                     # number of bootstrap copies
            
        outputs = {'E1/E0':[], 'E2/E0':[], 'a1/a0':[],'a2/a0':[],'E1':[],'a1':[]}   # results
        for bsfit in fit.bootstrap_iter(n=Nbs):
            E = bsfit.pmean['E']                     # best-fit parameters
            a = bsfit.pmean['a']
            outputs['E1/E0'].append(E[1]/E[0])       # accumulate results
            outputs['E2/E0'].append(E[2]/E[0])
            outputs['a1/a0'].append(a[1]/a[0])
            outputs['a2/a0'].append(a[2]/a[0])
            outputs['E1'].append(E[1])
            outputs['a1'].append(a[1])
            # print E[:2]
            # print a[:2]
            # print bsfit.chi2/bsfit.dof

        # extract means and standard deviations from the bootstrap output
        for k in outputs:
            outputs[k] = gv.dataset.avg_data(outputs[k],bstrap=True).fmt(3)
                                 # gv.gvar(np.mean(outputs[k]),
                                 # np.std(outputs[k])).fmt(3)
        print('Bootstrap results:')
        print('E1/E0 =',outputs['E1/E0'],'  E2/E0 =',outputs['E2/E0'])
        print('a1/a0 =',outputs['a1/a0'],'  a2/a0 =',outputs['a2/a0'])
        print('E1 =',outputs['E1'],'  a1 =',outputs['a1'])
        
    if DO_PLOT:
        print(fit.format(100))                   # print the fit results
        import pylab as plt   
        ratio = y/f(x,fit.pmean)
        plt.xlim(0,21)
        plt.xlabel('x')
        plt.ylabel('y/f(x,p)')
        plt.errorbar(x=x,y=gv.mean(ratio),yerr=gv.sdev(ratio),fmt='ob')
        plt.plot([0.0,21.0],[1.0,1.0])
        plt.show()
Example #11
0
def make_adv_init_from_fit_file_3pt(models,filename,nst=-1,ost=-1,n3st=-1,o3st=-1,
  fresh_overlap=False,fresh_amplitude=True):
 init = {}
 infit = __import__(filename)
 nn = nst
 no = ost
 if nn < 0:
  nn = df.num_nst
 if no < 0:
  no = df.num_ost
 if n3st < 0:
  nn3 = df.num_nst_3pt
 else:
  nn3 = n3st
 if o3st < 0:
  no3 = df.num_ost_3pt
 else:
  no3 = o3st

 ## -- save a list of keys for quick reference
 klst = tuple() ## all keys
 glst = tuple() ## diagonal terms only
 olst = tuple() ## overlaps only
 for model in models:
  try:
   for item in [model.dEa,model.dEb,model.a,model.b]:
    klst += tuple(item)
   klst += tuple(model.V[0]) + tuple(model.V[1]) ## -- need to split matrix up
   klst += tuple(model.g) ## -- probably not necessary
  except AttributeError:  ## -- 2 point function
   for item in [model.dE,model.a,model.b]:
    klst += tuple(item)
  olst += tuple(model.a) + tuple(model.b)
  try:
   glst += tuple(model.g)
  except AttributeError:
   pass
 klst = tuple(set(klst)) ## -- delete duplicates
 olst = tuple(set(olst))
 glst = tuple(set(glst))

 for key in infit.init_val_import:
  skey = key.split('_')
  if skey[0] in klst:
   ## -- add to initial value dictionary
   init[key] = gv.mean(infit.init_val_import[key])
   ## -- if requested, wipe values
   sk = skey[0][-2:]
   if fresh_amplitude and\
     (sk == 'nn' or sk == 'no' or sk == 'on' or sk == 'oo'):
    init[key] = np.ones(np.shape(init[key]))
   if fresh_amplitude and skey[0] in glst:
    init[key] = np.ones(np.shape(init[key]))
   if fresh_overlap and (skey[0] in olst):
    init[key] = np.ones(np.shape(init[key]))
 ## -- finish up
 return mpa.truncate_prior_states(init,nn,no,nn3,no3)
Example #12
0
def main():
    gv.ranseed([2009,2010,2011,2012]) # initialize random numbers (opt.)
    x,y = make_data()               # make fit data
    p0 = None                       # make larger fits go faster (opt.)
    for nexp in range(3,8):
        print('************************************* nexp =',nexp)
        prior = make_prior(nexp)
        fit = lsqfit.nonlinear_fit(data=(x,y),fcn=f,prior=prior,p0=p0,svdcut=SVDCUT)
        print(fit)                  # print the fit results
        E = fit.p['E']              # best-fit parameters
        a = fit.p['a']
        print('E1/E0 =',(E[1]/E[0]).fmt(),'  E2/E0 =',(E[2]/E[0]).fmt())
        print('a1/a0 =',(a[1]/a[0]).fmt(),'  a2/a0 =',(a[2]/a[0]).fmt())
        print()
        if fit.chi2/fit.dof<1.:
            p0 = fit.pmean          # starting point for next fit (opt.)
    
    if DO_ERRORBUDGET:
        outputs = OrderedDict([
            ('E1/E0', E[1]/E[0]), ('E2/E0', E[2]/E[0]),         
            ('a1/a0', a[1]/a[0]), ('a2/a0', a[2]/a[0])
            ])
        inputs = OrderedDict([
            ('E', fit.prior['E']), ('a', fit.prior['a']),
            ('y', y), ('svd', fit.svdcorrection)
            ])
        print(fit.fmt_values(outputs))
        print(fit.fmt_errorbudget(outputs,inputs))
        
    if DO_EMPBAYES:
        def fitargs(z,nexp=nexp,prior=prior,f=f,data=(x,y),p0=p0):
            z = gv.exp(z)
            prior['a'] = [gv.gvar(0.5,0.5*z[0]) for i in range(nexp)]
            return dict(prior=prior,data=data,fcn=f,p0=p0)
        ##
        z0 = [0.0]
        fit,z = lsqfit.empbayes_fit(z0,fitargs,tol=1e-3)
        print(fit)                  # print the optimized fit results
        E = fit.p['E']              # best-fit parameters
        a = fit.p['a']
        print('E1/E0 =',(E[1]/E[0]).fmt(),'  E2/E0 =',(E[2]/E[0]).fmt())
        print('a1/a0 =',(a[1]/a[0]).fmt(),'  a2/a0 =',(a[2]/a[0]).fmt())
        print("prior['a'] =",fit.prior['a'][0].fmt())
        print()
    
    if DO_PLOT:
        import pylab as pp   
        from gvar import mean,sdev     
        fity = f(x,fit.pmean)
        ratio = y/fity
        pp.xlim(0,21)
        pp.xlabel('x')
        pp.ylabel('y/f(x,p)')
        pp.errorbar(x=x,y=mean(ratio),yerr=sdev(ratio),fmt='ob')
        pp.plot([0.0,21.0],[1.0,1.0])
        pp.show()
def dump_precompute(g, outputfile):
    if isinstance(outputfile, str):
        outputfile = open(outputfile, 'wb')
    mn = gv.mean(g)
    evb = gv.evalcov(g.buf)
    covm = {}
    for keyi in g:
     for keyj in g:
      covm[keyi,keyj] = evb[g.slice(keyi),g.slice(keyj)]
    pickle.dump((mn, covm), outputfile)
    outputfile.close()
Example #14
0
def show_plot(t_array, th_array):
    """ Display theta vs t plot. """
    th_mean = gv.mean(th_array) 
    th_sdev = gv.sdev(th_array)
    thp = th_mean + th_sdev
    thm = th_mean - th_sdev
    plt.fill_between(t_array, thp, thm, color='0.8')
    plt.plot(t_array, th_mean, linewidth=0.5)
    plt.xlabel('$t$')
    plt.ylabel(r'$\theta(t)$')
    plt.savefig('pendulum.pdf', bbox_inches='tight')
    plt.show()
Example #15
0
def tabulate_avg(avgout,format=(6,3)):
    """ Tabulates averages and standard deviations.
        
    tabulate_avg(...) creates a nicely formatted table displaying the
    output from functions like ``dataset.Dataset.gdev``. Here ``avgout`` is
    the output. Parameter ``format`` specifies the output format:
    ``format=(N,D)`` implies that format ``'%N.Df(%Dd)'`` is used to print
    ``avg,int(10**D * std_dev)``. The table is returned as a single string,
    for printing.
    """
    table = []
    output = avgout.items()
    output.sort()
    for tag,avsd in output:
        try:
            av = avsd.mean
            sd = avsd.sdev
        except AttributeError:
            av = gvar.mean(avsd)
            sd = gvar.sdev(avsd)
        lines = ''
        line = '%15s' % str(tag)
        try:
            sdfac = 10**format[1]
            fmt = (' %'+str(format[0])+'.'+str(format[1])+
                  'f(%'+str(format[1])+'d)')
            def avgfmt(av,sd,fmt=fmt,sdfac=sdfac):
                try:
                    return fmt % (av,int(sdfac*sd+0.5))
                except:
                    return (' %g (%.4g)' % (av,sd))
            ##
        except:
            def avgfmt(av,sd):
                return (' %g (%.4g)' % (av,sd))
            ##
        na = len(av)
        if len(sd)<na:
            na = len(sd)
        if na>=1:
            for i in xrange(na):
                if len(sd.shape)==2:
                    sdi = math.sqrt(sd[i][i])
                else:
                    sdi = sd[i]
                nextfield = avgfmt(av[i],sdi)
                if (len(nextfield)+len(line))>78:
                    lines = lines + line + '\n'
                    line = ''.ljust(15) + nextfield
                else:
                    line = line + nextfield
            table.append(lines + line +'\n')
    return '\n'.join(table)
Example #16
0
    def test_expval(self):
        " integrator(f ...) "
        xarray = gv.gvar([5., 3.], [[4., 0.9], [0.9, 1.]])
        xdict = gv.BufferDict([(0, 1), (1, 1)])
        xdict = gv.BufferDict(xdict, buf=xarray)
        xscalar = xarray[0]
        def fscalar(x):
            if hasattr(x, 'keys'):
                x = x.buf
            return x.flat[0]
        def farray(x):
            if hasattr(x, 'keys'):
                x = x.buf
            return gv.PDFStatistics.moments(x.flat[0])
        def fdict(x):
            if hasattr(x, 'keys'):
                x = x.buf
            return gv.BufferDict([
                 (0, x.flat[0]), (1, x.flat[0] ** 2),
                (2, x.flat[0] ** 3), (3, x.flat[0] ** 4)
                ])
        for x in [xscalar, xarray, xdict]:
            integ = PDFIntegrator(x)
            integ(neval=1000, nitn=5)
            for f in [fscalar, farray, fdict]:
                r = integ(f, neval=1000, nitn=5, adapt=False)
                if f is fscalar:
                    self.assertTrue(abs(r.mean - 5) < 5. * r.sdev)
                else:
                    if hasattr(r, 'keys'):
                        r = r.buf
                    s = gv.PDFStatistics(r)
                    self.assertTrue(abs(s.mean.mean - 5.) < 10. * s.mean.sdev)
                    self.assertTrue(abs(s.sdev.mean - 2.) < 10. * s.sdev.sdev)
                    self.assertTrue(abs(s.skew.mean) < 10. * s.skew.sdev)
                    self.assertTrue(abs(s.ex_kurt.mean) < 10. * s.ex_kurt.sdev)

        # covariance test
        def fcov(x):
            return dict(x=x, xx=np.outer(x, x))
        integ = PDFIntegrator(xarray)
        r = integ(fcov, neval=1000, nitn=5)
        rmean = r['x']
        rcov = r['xx'] - np.outer(r['x'], r['x'])
        xmean = gv.mean(xarray)
        xcov = gv.evalcov(xarray)
        for i in [0, 1]:
            self.assertTrue(abs(rmean[i].mean - xmean[i]) < 5. * rmean[i].sdev)
            for j in [0, 1]:
                self.assertTrue(abs(rcov[i,j].mean - xcov[i,j]) < 5. * rcov[i,j].sdev)
 def do_plot_corr_effective_mass_check(idx,fig=fig):
   fig.clear()
   ax = fig.add_subplot(111)
   key = models[idx[0]].datatag
   ax.set_ylim(utp.get_option("y_limit",[0.0,1.2],**kwargs[key]))
   #
   ## -- plot fit
   ax.plot(_emTPosFit[idx[0]],_emFitCentral[idx[0]],
    color=utp.get_option("color3",'b',**kwargs[key]))
   ax.plot(_emTPosFit[idx[0]],_emFitError[idx[0]][0],
    color=utp.get_option("color3",'b',**kwargs[key]),
    ls=utp.get_option("linestyle2",'--',**kwargs[key]))
   ax.plot(_emTPosFit[idx[0]],_emFitError[idx[0]][1],
    color=utp.get_option("color3",'b',**kwargs[key]),
    ls=utp.get_option("linestyle2",'--',**kwargs[key]))
   ## -- plot reference
   for val in df.ec_reference_lines:
     vt = [val for t in _emTPosFit[idx[0]]]
     ax.plot(_emTPosFit[idx[0]],vt,
      color=utp.get_option("color2",'g',**kwargs[key]))
   # -- plot correlator data
   ax.errorbar(_emTPosRatio[idx[0]],_emLogRatioCentral[idx[0]],yerr=_emLogRatioError[idx[0]],
    mfc=utp.get_option("markerfacecolor1",'None',**kwargs[key]),
    mec=utp.get_option("markeredgecolor1",'k',**kwargs[key]),
    color=utp.get_option("markeredgecolor1",'k',**kwargs[key]),
    ls=utp.get_option("linestyle1",'None',**kwargs[key]),
    marker=utp.get_option("marker1",'o',**kwargs[key]),
    ms=utp.get_option("markersize",6,**kwargs[key]))
   ax.scatter(_emTPosFit[idx[0]],gv.mean(_emLogRatioFit[idx[0]]),
    color=utp.get_option("color1",'r',**kwargs[key]),
    marker=utp.get_option("marker",'o',**kwargs[key]),
    s=utp.get_option("markersize",36,**kwargs[key]))
   fig.suptitle(utp.get_option("plottitle",str(idx[0])+" default title "+str(key),**kwargs[key]),
    fontsize=utp.get_option("titlesize",20,**kwargs[key]))
   # -- modify some options 
   ax.set_xlabel(r'$t$ slice')
   ax.set_ylabel(utp.get_option("yaxistitle",
    r"$-\frac{1}{"+str(_emSep)+r"}\,log\frac{C(t+"+str(_emSep)+r")}{C(t)}$",**kwargs[key]))
   for item in ([ax.xaxis.label,ax.yaxis.label]):
    # must be after setting label content (LaTeX ruins it)
    item.set_fontsize(fontsize=utp.get_option("fontsize",20,**kwargs[key]))
   rect =fig.patch
   rect.set_facecolor('white')
   if utp.get_option("to_file",False,**kwargs[key]):
    save_dir  = utp.get_option("ec_save_dir","./plotdump",**kwargs[key])
    save_name = utp.get_option("ec_save_name","ecplot-"+key+".pdf",**kwargs[key])
    plt.savefig(save_dir+'/'+save_name)
   if utp.get_option("to_terminal",True,**kwargs[key]):
    plt.draw()
   pass
Example #18
0
 def test_inv(self):
     m = self.make_random([[1., 0.1], [0.1, 2.]])
     one = gv.gvar([['1(0)', '0(0)'], ['0(0)', '1(0)']])
     invm = linalg.inv(m)
     self.assertTrue(gv.equivalent(linalg.inv(invm), m))
     for mm in [invm.dot(m), m.dot(invm)]:
         np.testing.assert_allclose(
             gv.mean(mm), [[1, 0], [0, 1]], rtol=1e-10, atol=1e-10
             )
         np.testing.assert_allclose(
             gv.sdev(mm), [[0, 0], [0, 0]], rtol=1e-10, atol=1e-10
             )
     p = linalg.det(m) * linalg.det(invm)
     self.assertAlmostEqual(p.mean, 1.)
     self.assertGreater(1e-10, p.sdev)
 def test_apply(self):
     " EigenBasis EigenBasis.apply EigenBasis.unapply "
     for tdata in [
         [1., 2., 3., 4.],
         [2., 4., 6., 8.],
         [0, 1., 2.],
         ]:
         tdata = np.array(tdata)
         G = self.make_G(tdata, keyfmt='{s1}{s2}', srcs='ab')
         basis = EigenBasis(
             data=G, keyfmt='{s1}{s2}', srcs='ab',
             t=2, tdata=tdata,
             )
         np.testing.assert_allclose(basis.E, self.E)
         newG = basis.apply(G, '{s1}{s2}')
         newG_mean = gv.mean(newG)
         np.testing.assert_allclose(newG_mean['00'], gv.exp(-self.E[0] * tdata))
         np.testing.assert_allclose(newG_mean['11'], gv.exp(-self.E[1] * tdata))
         np.testing.assert_allclose(newG_mean['01'], 0, atol=1e-10)
         np.testing.assert_allclose(newG_mean['10'], 0, atol=1e-10)
         oldG = basis.unapply(newG, '{s1}{s2}')
         for k in ['aa', 'ab', 'ba', 'bb']:
             np.testing.assert_allclose(gv.mean(oldG[k] - G[k]), 0, atol=1e-10)
             np.testing.assert_allclose(gv.sdev(oldG[k] - G[k]), 0, atol=1e-10)
Example #20
0
 def test_histogram(self):
     x = gv.gvar([5., 3.], [[4., 0.2], [0.2, 1.]])
     xsum = x[0] + x[1]
     integ = PDFIntegrator(x)
     hist = gv.PDFHistogram(xsum, nbin=40, binwidth=0.2)
     integ(neval=1000, nitn=5)
     def fhist(x):
         return hist.count(x[0] + x[1])
     r = integ(fhist, neval=1000, nitn=5, adapt=False)
     bins, prob, stat, norm = hist.analyze(r)
     self.assertTrue(abs(gv.mean(np.sum(prob)) - 1.) < 5. * gv.sdev(np.sum(prob)))
     self.assertTrue(abs(stat.mean.mean - xsum.mean) < 5. * stat.mean.sdev)
     self.assertTrue(abs(stat.sdev.mean - xsum.sdev) < 5. * stat.sdev.sdev)
     self.assertTrue(abs(stat.skew.mean) < 5. * stat.skew.sdev)
     self.assertTrue(abs(stat.ex_kurt.mean) < 5. * stat.ex_kurt.sdev)
Example #21
0
def inv(a):
    """ Inverse of matrix ``a``. 

    Args:
        a: Two-dimensional, square matrix/array of numbers 
            and/or :class:`gvar.GVar`\s.

    Returns:
        The inverse of matrix ``a``.

    Raises:
        ValueError: If matrix is not square and two-dimensional.
    """
    amean = gvar.mean(a)
    if amean.ndim != 2 or amean.shape[0] != amean.shape[1]:
        raise ValueError('Bad matrix shape: ' + str(a.shape))
    da = a - amean
    ainv = numpy.linalg.inv(amean)
    return ainv - ainv.dot(da.dot(ainv))
Example #22
0
def det(a):
    """ Determinant of matrix ``a``. 

    Args:
        a: Two-dimensional, square matrix/array of numbers 
            and/or :class:`gvar.GVar`\s.

    Returns:
        Deterimant of the matrix.

    Raises:
        ValueError: If matrix is not square and two-dimensional.
    """
    amean = gvar.mean(a)
    if amean.ndim != 2 or amean.shape[0] != amean.shape[1]:
        raise ValueError('Bad matrix shape: ' + str(a.shape))
    da = a - amean
    ainv = inv(amean)
    return numpy.linalg.det(amean) * (1 + numpy.matrix.trace(da.dot(ainv)))
def correlator_pion_ratio_fn(cora,corb,t,T,expm,fac):
  """
  -- average corb*expm^t for a range of t,
     then divide all of cora by fac*sqrt(avg)
  ==
  -- prototype function
     must define a new function with t,T,expm,fac fixed to use with fn_apply_tags
  """
  def fexp(cor,tp):
    if tp<T/2:
     return np.abs(cor[tp])*np.power(expm,float(tp))
    else:
     return np.abs(cor[tp])*np.power(expm,float(T-tp))
  cor2=average_tag_fn(corb)
  cor1=apply_t_fn(cor2,fexp,t)
  cor0=plateau_tag_fn(cor1)
  cor0=gv.mean(np.sqrt(cor0)*fac)
  cnew=list()
  for c in cora:
   cnew.append(c*cor0)
  return cnew
Example #24
0
def slogdet(a):
    """ Sign and logarithm of determinant of matrix ``a``. 

    Args:
        a: Two-dimensional, square matrix/array of numbers 
            and/or :class:`gvar.GVar`\s.

    Returns:
        Tuple ``(s, logdet)`` where the determinant of matrix ``a`` is
            ``s * exp(logdet)``.

    Raises:
        ValueError: If matrix is not square and two-dimensional.
    """
    amean = gvar.mean(a)
    if amean.ndim != 2 or amean.shape[0] != amean.shape[1]:
        raise ValueError('Bad matrix shape: ' + str(a.shape))
    da = a - amean
    ainv = inv(amean)
    s, ldet = numpy.linalg.slogdet(amean)
    ldet += numpy.matrix.trace(da.dot(ainv))
    return s, ldet
Example #25
0
def main():
    # pendulum data exhibits experimental error in ability to measure theta
    t = [ 0.1, 0.2, 0.3, 0.4,  0.5, 0.6,  0.7,  0.8,  0.9, 1.]
    theta = gv.gvar([
        '1.477(79)', '0.791(79)', '-0.046(79)', '-0.852(79)',
        '-1.523(79)', '-1.647(79)', '-1.216(79)', '-0.810(79)',
        '0.185(79)', '0.832(79)'
        ])

    # prior: assume experimental error in ability to specify theta(0)
    prior = gv.BufferDict()
    prior['g/l'] = (2 * math.pi) ** 2 * gv.gvar(1, 0.1)
    prior['theta(0)'] = gv.gvar(math.pi / 2., 0.05)

    # fit function: use class Pendulum object to integrate pendulum motion
    def fitfcn(p, t=t):
        pendulum = Pendulum(p['g/l'])
        return pendulum(p['theta(0)'], t)

    # do the fit and print results
    fit = lsqfit.nonlinear_fit(data=theta, prior=prior, fcn=fitfcn)
    print(fit.format(maxline=True))
    print('fit/exact for (g/l) =', fit.p['g/l'] / (2*math.pi) ** 2)
    print('fit/exact for theta(0) =', fit.p['theta(0)'] / (math.pi / 2.))

    if MAKE_PLOT:
        # make figure (saved to file pendulum.pdf)
        plt.figure(figsize=(4,3))
        # start plot with data
        plt.errorbar(
            x=t, y=gv.mean(theta), yerr=gv.sdev(theta),
            fmt='k.',
            )
        # use best-fit function to add smooth curve for 100 points
        t = np.linspace(0., 1.1, 100)
        th = fitfcn(fit.p, t)
        show_plot(t, th)
Example #26
0
def fit_velocity_error_model(x,
                             y,
                             vel,
                             vel_err,
                             nm_laser,
                             calibration_laser_map,
                             pixel_size,
                             binning=6):
    """Fit a model of the spectral calibration error based on a simplified
    optical model of the interferometer.

    :param x: Positions of the velocities along x axis

    :param y: Positions of the velocities along y axis

    :param vel: Measured velocity errors at (x, y)

    :param vel_err: Uncertainty on the measured velocity errors.

    :param nm_laser: Calibration laser wavelength in nm

    :param calibration_laser_map: Calibration laser map

    :param pixel_size: Pixel size in um

    param binning: (Optional) Binning during computation (process is faster with
      marginal precision loss) (default 6)
    """
    def model(p, wf, pixel_size, orig_fit_map, x, y):
        # 0: mirror_distance
        # 1: theta_cx
        # 2: theta_cy
        # 3: phi_x
        # 4: phi_y
        # 5: phi_r
        # 6: calib_laser_nm
        new_map = (orb.utils.image.simulate_calibration_laser_map(
            wf.shape[0], wf.shape[1], pixel_size, p[0], p[1], p[2], p[3], p[4],
            p[5], p[6]) + wf)
        dl_map = new_map - orig_fit_map
        dl_mod = list()
        for i in range(len(x)):
            dl_mod.append(dl_map[int(x[i]), int(y[i])])
        return np.array(dl_mod)

    def get_p(p_var, p_fix, p_ind):
        """p_ind = 0: variable parameter, index=1: fixed parameter
        """
        p_all = np.empty_like(p_ind, dtype=float)
        p_all[np.nonzero(p_ind == 0.)] = p_var
        p_all[np.nonzero(p_ind > 0.)] = p_fix
        return p_all

    def diff(p_var, p_fix, p_ind, wf, pixel_size, orig_fit_map, x, y, dl):
        p = get_p(p_var, p_fix, p_ind)
        dl_mod = model(p, wf, pixel_size, orig_fit_map, x, y)

        res = ((dl_mod - gvar.mean(dl)) / gvar.sdev(dl)).astype(float)
        return res[~np.isnan(res)]

    def print_params(params):
        logging.info(
            '    > New calibration laser map fit parameters:\n' +
            '    distance to mirror: {} cm\n'.format(params[0] * 1e-4) +
            '    X angle from the optical axis to the center: {} degrees\n'.
            format(np.fmod(float(params[1]), 360)) +
            '    Y angle from the optical axis to the center: {} degrees\n'.
            format(np.fmod(float(params[2]), 360)) +
            '    Tip-tilt angle of the detector along X: {} degrees\n'.format(
                np.fmod(float(params[3]), 360)) +
            '    Tip-tilt angle of the detector along Y: {} degrees\n'.format(
                np.fmod(float(params[4]), 360)) +
            '    Rotation angle of the detector: {} degrees\n'.format(
                np.fmod(float(params[5]), 360)) +
            '    Calibration laser wavelength: {} nm\n'.format(params[6]))

    if (x.shape != y.shape or x.shape != vel.shape
            or vel.shape != vel_err.shape):
        raise TypeError('x, y, vel and vel_err must have the same shape')

    if x.ndim != 1: raise TypeError('x must have only one dimension')

    # create weights map
    w = 1. / (vel_err)
    #w /= np.nanmax(w)
    #w[np.isnan(w)] = 1e-35
    x = x[~np.isnan(w)]
    y = y[~np.isnan(w)]
    vel = vel[~np.isnan(w)]
    vel_err = vel_err[~np.isnan(w)]
    w = w[~np.isnan(w)]

    vel[vel < np.nanpercentile(vel, 5)] = np.nan
    vel[vel > np.nanpercentile(vel, 95)] = np.nan
    x = x[~np.isnan(vel)]
    y = y[~np.isnan(vel)]
    w = w[~np.isnan(vel)]
    vel_err = vel_err[~np.isnan(vel)]
    vel = vel[~np.isnan(vel)]

    # transform velocity error in calibration error (v = dl/l with
    # l = 543.5 nm) (velocity error is the inverse of the velocity
    # measured)
    vel = -gvar.gvar(vel, vel_err)
    shift_map = orb.utils.spectrum.line_shift(vel, nm_laser)

    # compute a first estimation of the real calibration laser
    # wavelength
    new_nm_laser = nm_laser + np.nanmedian(gvar.mean(shift_map))
    logging.info('First laser wavelentgh calibration estimation: {} nm'.format(
        new_nm_laser))

    new_shift_map = shift_map - (new_nm_laser - nm_laser)

    # convert shift map to velocity map
    new_vel = orb.utils.spectrum.compute_radial_velocity(
        new_shift_map + new_nm_laser, new_nm_laser)

    # fit calibration map to get model + wavefront
    (orig_params, orig_fit_map,
     orig_model) = orb.utils.image.fit_calibration_laser_map(
         calibration_laser_map,
         new_nm_laser,
         pixel_size=pixel_size,
         return_model_fit=True)

    ######################
    ## orb.utils.io.write_fits('orig_fit_map.fits', orig_fit_map,
    ## overwrite=True)
    ## orb.utils.io.write_fits('orig_model.fits', orig_model, overwrite=True)
    ## orb.utils.io.write_fits('orig_params.fits', orig_params,
    ## overwrite=True)
    ## orig_fit_map = orb.utils.io.read_fits('orig_fit_map.fits')
    ## orig_model = orb.utils.io.read_fits('orig_model.fits')
    ## orig_params = orb.utils.io.read_fits('orig_params.fits')
    #################

    orig_fit_map_bin = orb.utils.image.nanbin_image(orig_fit_map, binning)
    orig_model_bin = orb.utils.image.nanbin_image(orig_model, binning)
    wf = orig_fit_map - orig_model
    wf_bin = orb.utils.image.nanbin_image(wf, binning)
    pixel_size_bin = pixel_size * float(binning)
    x_bin = x / float(binning)
    y_bin = y / float(binning)

    # calib laser map fit
    #p_var = orig_params[:-1]
    #p_fix = [new_nm_laser]
    #p_ind = np.array([0,0,0,0,0,0,1])
    p_var = orig_params[:-1]

    p_fix = []
    p_ind = np.array([0, 0, 0, 0, 0, 0, 0])
    fit = scipy.optimize.leastsq(diff,
                                 p_var,
                                 args=(p_fix, p_ind, wf_bin, pixel_size_bin,
                                       orig_fit_map_bin, x_bin, y_bin,
                                       new_shift_map),
                                 full_output=True)
    p = fit[0]
    print_params(p)

    # get fit stats
    model_shift_map = model(p, wf, pixel_size, orig_fit_map, x, y)
    model_vel = orb.utils.spectrum.compute_radial_velocity(new_nm_laser +
                                                           model_shift_map,
                                                           new_nm_laser,
                                                           wavenumber=False)

    logging.info('fit residual std (in km/s):'.format(
        np.nanstd(model_vel - gvar.mean(vel))))

    logging.info('median error on the data (in km/s)'.format(
        np.nanmedian(gvar.sdev(vel))))

    # compute new calibration laser map
    model_calib_map = (orb.utils.image.simulate_calibration_laser_map(
        wf.shape[0], wf.shape[1], pixel_size, p[0], p[1], p[2], p[3], p[4],
        p[5], p[6]) + wf)

    # compute new velocity correction map
    final_shift_map = model_calib_map - orig_fit_map
    final_vel_map = orb.utils.spectrum.compute_radial_velocity(
        (new_nm_laser + final_shift_map), nm_laser, wavenumber=False)

    return model_calib_map, wf, final_vel_map, new_nm_laser
Example #27
0
def main():
    sys_stdout = sys.stdout
    sys.stdout = tee.tee(sys.stdout, open("eg3a.out","w"))
    x, y = make_data()
    prior = make_prior()
    fit = lsqfit.nonlinear_fit(prior=prior, data=(x,y), fcn=fcn)
    print fit
    print 'p1/p0 =', fit.p[1] / fit.p[0], '    p3/p2 =', fit.p[3] / fit.p[2]
    print 'corr(p0,p1) =', gv.evalcorr(fit.p[:2])[1,0]

    if DO_PLOT:
        plt.semilogx()
        plt.errorbar(
            x=gv.mean(x), xerr=gv.sdev(x), y=gv.mean(y), yerr=gv.sdev(y),
            fmt='ob'
            )
        # plot fit line
        xx = np.linspace(0.99 * gv.mean(min(x)), 1.01 * gv.mean(max(x)), 100)
        yy = fcn(xx, fit.pmean)
        plt.xlabel('x')
        plt.ylabel('y')
        plt.plot(xx, yy, ':r')
        plt.savefig('eg3.png', bbox_inches='tight')
        plt.show()

    sys.stdout = sys_stdout
    if DO_BOOTSTRAP:
        gv.ranseed(123)
        sys.stdout = tee.tee(sys_stdout, open('eg3c.out', 'w'))
        print fit
        print 'p1/p0 =', fit.p[1] / fit.p[0], '    p3/p2 =', fit.p[3] / fit.p[2]
        print 'corr(p0,p1) =', gv.evalcorr(fit.p[:2])[1,0]
        Nbs = 40
        outputs = {'p':[], 'p1/p0':[], 'p3/p2':[]}
        for bsfit in fit.bootstrap_iter(n=Nbs):
            p = bsfit.pmean
            outputs['p'].append(p)
            outputs['p1/p0'].append(p[1] / p[0])
            outputs['p3/p2'].append(p[3] / p[2])
        print '\nBootstrap Averages:'
        outputs = gv.dataset.avg_data(outputs, bstrap=True)
        print gv.tabulate(outputs)
        print 'corr(p0,p1) =', gv.evalcorr(outputs['p'][:2])[1,0]

        # make histograms of p1/p0 and p3/p2
        sys.stdout = sys_stdout
        print
        sys.stdout = tee.tee(sys_stdout, open('eg3d.out', 'w'))
        print 'Histogram Analysis:'
        count = {'p1/p0':[], 'p3/p2':[]}
        hist = {
            'p1/p0':gv.PDFHistogram(fit.p[1] / fit.p[0]),
            'p3/p2':gv.PDFHistogram(fit.p[3] / fit.p[2]),
            }
        for bsfit in fit.bootstrap_iter(n=1000):
            p = bsfit.pmean
            count['p1/p0'].append(hist['p1/p0'].count(p[1] / p[0]))
            count['p3/p2'].append(hist['p3/p2'].count(p[3] / p[2]))
        count = gv.dataset.avg_data(count)
        plt.rcParams['figure.figsize'] = [6.4, 2.4]
        pltnum = 1
        for k in count:
            print k + ':'
            print hist[k].analyze(count[k]).stats
            plt.subplot(1, 2, pltnum)
            plt.xlabel(k)
            hist[k].make_plot(count[k], plot=plt)
            if pltnum == 2:
                plt.ylabel('')
            pltnum += 1
        plt.rcParams['figure.figsize'] = [6.4, 4.8]
        plt.savefig('eg3d.png', bbox_inches='tight')
        plt.show()

    if DO_BAYESIAN:
        gv.ranseed(123)
        sys.stdout = tee.tee(sys_stdout, open('eg3e.out', 'w'))
        print fit
        expval = lsqfit.BayesIntegrator(fit)

        # adapt integrator to PDF from fit
        neval = 1000
        nitn = 10
        expval(neval=neval, nitn=nitn)

        # <g(p)> gives mean and covariance matrix, and histograms
        hist = [
            gv.PDFHistogram(fit.p[0]), gv.PDFHistogram(fit.p[1]),
            gv.PDFHistogram(fit.p[2]), gv.PDFHistogram(fit.p[3]),
            ]
        def g(p):
            return dict(
                mean=p,
                outer=np.outer(p, p),
                count=[
                    hist[0].count(p[0]), hist[1].count(p[1]),
                    hist[2].count(p[2]), hist[3].count(p[3]),
                    ],
                )

        # evaluate expectation value of g(p)
        results = expval(g, neval=neval, nitn=nitn, adapt=False)

        # analyze results
        print('\nIterations:')
        print(results.summary())
        print('Integration Results:')
        pmean = results['mean']
        pcov =  results['outer'] - np.outer(pmean, pmean)
        print '    mean(p) =', pmean
        print '    cov(p) =\n', pcov

        # create GVars from results
        p = gv.gvar(gv.mean(pmean), gv.mean(pcov))
        print('\nBayesian Parameters:')
        print(gv.tabulate(p))

        # show histograms
        print('\nHistogram Statistics:')
        count = results['count']
        for i in range(4):
            print('p[{}] -'.format(i))
            print(hist[i].analyze(count[i]).stats)
            plt.subplot(2, 2, i + 1)
            plt.xlabel('p[{}]'.format(i))
            hist[i].make_plot(count[i], plot=plt)
            if i % 2 != 0:
                plt.ylabel('')
        plt.savefig('eg3e.png', bbox_inches='tight')
        plt.show()

    if DO_SIMULATION:
        gv.ranseed(1234)
        sys.stdout = tee.tee(sys_stdout, open('eg3f.out', 'w'))
        print(40 * '*' + ' real fit')
        print(fit.format(True))

        Q = []
        p = []
        for sfit in fit.simulated_fit_iter(n=3, add_priornoise=False):
            print(40 * '=' + ' simulation')
            print(sfit.format(True))
            diff = sfit.p - sfit.pexact
            print '\nsfit.p - pexact =', diff
            print(gv.fmt_chi2(gv.chi2(diff)))
            print

    # omit constraint
    sys.stdout = tee.tee(sys_stdout, open("eg3b.out", "w"))
    prior = gv.gvar(4 * ['0(1)'])
    prior[1] = gv.gvar('0(20)')
    fit = lsqfit.nonlinear_fit(prior=prior, data=(x,y), fcn=fcn)
    print fit
    print 'p1/p0 =', fit.p[1] / fit.p[0], '    p3/p2 =', fit.p[3] / fit.p[2]
    print 'corr(p0,p1) =', gv.evalcorr(fit.p[:2])[1,0]
Example #28
0
def noise_to_signal(ax, y, x=None, label=None, color=None):
    """ Plots the noise-to-signal ratio as a percentage. """
    y = 100 * gv.sdev(y) / gv.mean(y)
    mirror(ax=ax, y=y, x=x, label=label, color=color)
    ax.set_ylabel("n/s (%)")
    return ax
def calculate_and_plot_esym():

    den = np.arange(0.001, 0.2, 0.01)

    e_sym_s3 = f_NM(den, NM3_par) - f_SM(den, SM3_par)
    data_esym = te_NM_av - te_SM_av

    fig, axes = plt.subplots(1, 3, figsize=(11, 4), sharey='row')

    axes[0].errorbar(td,
                     gv.mean(data_esym),
                     gv.sdev(data_esym),
                     fmt='ob',
                     label='data (68% CL)')
    axes[0].fill_between(den,
                         gv.mean(e_sym_s3) + gv.sdev(e_sym_s3),
                         gv.mean(e_sym_s3) - gv.sdev(e_sym_s3),
                         label='fit (68% CL)',
                         color='red',
                         alpha=0.8)
    axes[0].fill_between(den,
                         gv.mean(e_sym_s3) + 2 * gv.sdev(e_sym_s3),
                         gv.mean(e_sym_s3) - 2 * gv.sdev(e_sym_s3),
                         label='fit (95% CL)',
                         color='red',
                         alpha=0.2)

    axes[0].set_xlabel('$n$ (fm$^{-3}$)', fontsize='13')
    axes[0].set_ylabel('$e_{\mathrm{sym}}$ (MeV)', fontsize='13')
    axes[0].tick_params(right=True)
    axes[0].tick_params(labelsize='13')
    axes[0].set_xticks(np.arange(0, 0.2 + 0.01, 0.05))
    axes[0].tick_params(right=True)
    axes[0].tick_params(top=True)
    axes[0].tick_params(direction='in')

    ############ E_sym^{pot} #####################

    den = np.arange(0.001, 0.2, 0.01)

    e_sym_pot_s3 = f_NM(den, NM3_par) - f_SM(den,
                                             SM3_par) - T_NM(den) + T_SM(den)
    data_esym_pot = te_NM_pot_av - te_SM_pot_av

    axes[1].errorbar(td,
                     gv.mean(data_esym_pot),
                     gv.sdev(data_esym_pot),
                     fmt='ob',
                     label='data (68% CL)')
    axes[1].fill_between(den,
                         gv.mean(e_sym_pot_s3) + gv.sdev(e_sym_pot_s3),
                         gv.mean(e_sym_pot_s3) - gv.sdev(e_sym_pot_s3),
                         label='fit (68% CL)',
                         color='red',
                         alpha=0.8)
    axes[1].fill_between(den,
                         gv.mean(e_sym_pot_s3) + 2 * gv.sdev(e_sym_pot_s3),
                         gv.mean(e_sym_pot_s3) - 2 * gv.sdev(e_sym_pot_s3),
                         label='fit (95% CL)',
                         color='red',
                         alpha=0.2)

    axes[1].set_xlabel('$n$ (fm$^{-3}$)', fontsize='13')
    axes[1].set_ylabel('$e_{\mathrm{sym}}^{\mathrm{pot}}$ (MeV)',
                       fontsize='13')
    axes[1].tick_params(right=True)
    axes[1].tick_params(labelsize='13')
    axes[1].set_xticks(np.arange(0, 0.2 + 0.01, 0.05))
    axes[1].legend(loc='upper left', fontsize='13')
    axes[1].tick_params(right=True)
    axes[1].tick_params(top=True)
    axes[1].tick_params(direction='in')

    ############ E_sym^{pot*} #####################

    den = np.arange(0.001, 0.2, 0.01)

    e_sym_pot_eff_s3 = f_NM(den, NM3_par) - f_SM(
        den, SM3_par) - T_NM_eff(den) + T_SM_eff(den)
    data_esym_pot_eff = te_NM_pot_eff_av - te_SM_pot_eff_av

    e_sym_pot_eff_1_s3 = f_NM(den, NM3_par) - f_SM(
        den, SM3_par) - T_NM_eff_1(den) + T_SM_eff_1(den)
    data_esym_pot_eff_1 = te_NM_pot_eff_1_av - te_SM_pot_eff_1_av

    axes[2].errorbar(td,
                     gv.mean(data_esym_pot_eff_1),
                     gv.sdev(data_esym_pot_eff_1),
                     fmt='ob',
                     alpha=0.7)
    axes[2].fill_between(
        den,
        gv.mean(e_sym_pot_eff_1_s3) + gv.sdev(e_sym_pot_eff_1_s3),
        gv.mean(e_sym_pot_eff_1_s3) - gv.sdev(e_sym_pot_eff_1_s3),
        color='red',
        alpha=0.8)
    axes[2].fill_between(
        den,
        gv.mean(e_sym_pot_eff_1_s3) + 2 * gv.sdev(e_sym_pot_eff_1_s3),
        gv.mean(e_sym_pot_eff_1_s3) - 2 * gv.sdev(e_sym_pot_eff_1_s3),
        color='red',
        alpha=0.2)

    axes[2].plot(td,
                 gv.mean(data_esym_pot_eff),
                 'xk',
                 markersize=8,
                 label='data (Quad. fit)')
    axes[2].plot(den,
                 gv.mean(e_sym_pot_eff_s3) + gv.sdev(e_sym_pot_eff_s3),
                 '--k',
                 label='Quadratic fit \n(68% CL)')
    axes[2].plot(den,
                 gv.mean(e_sym_pot_eff_s3) - gv.sdev(e_sym_pot_eff_s3), '--k')
    # axes[2].fill_between (den,gv.mean(e_sym_pot_eff_s3)+gv.sdev(e_sym_pot_eff_s3),gv.mean(e_sym_pot_eff_s3)-gv.sdev(e_sym_pot_eff_s3),label='fit (68% CL)',color='red',alpha=0.8)
    # axes[2].fill_between (den,gv.mean(e_sym_pot_eff_s3)+2*gv.sdev(e_sym_pot_eff_s3),gv.mean(e_sym_pot_eff_s3)-2*gv.sdev(e_sym_pot_eff_s3),label='fit (95% CL)',color='red',alpha=0.2)

    axes[2].set_xlabel('$n$ (fm$^{-3}$)', fontsize='13')
    axes[2].set_ylabel('$e_{\mathrm{sym}}^{\mathrm{pot*}}$ (MeV)',
                       fontsize='13')
    axes[2].tick_params(right=True)
    axes[2].legend(loc='upper left', fontsize='11')
    axes[2].tick_params(labelsize='13')
    axes[2].set_xticks(np.arange(0, 0.2 + 0.01, 0.05))
    axes[2].tick_params(right=True)
    axes[2].tick_params(top=True)
    axes[2].tick_params(direction='in')

    plt.tight_layout()
    plt.show()
Example #30
0
def svd(a, compute_uv=True, rcond=None):
    """ svd decomposition of matrix ``a`` containing |GVar|\s.

    Args:
        a: Two-dimensional matrix/array of numbers
            and/or :class:`gvar.GVar`\s.
        compute_uv (bool): It ``True`` (default), returns
            tuple ``(u,s,vT)`` where matrix ``a = u @ np.diag(s) @ vT``
            where matrices ``u`` and ``vT`` satisfy ``u.T @ u = 1``
            and ``vT @ vT.T = 1``, and ``s`` is the list of singular
            values. Only ``s`` is returned if ``compute_uv=False``.
        rcond (float): Singular values whose difference is smaller than
            ``rcond`` times their sum are assumed to be degenerate for
            calculating variances for ``u`` and ``vT``.
            Default (``rcond=None``) is ``max(M,N)`` times machine precision.

    Returns:
        Tuple ``(u,s,vT)`` where matrix ``a = u @ np.diag(s) @ vT``
        where matrices ``u`` and ``vT`` satisfy ``u.T @ u = 1``
        and ``vT @ vT.T = 1``, and ``s`` is the list of singular
        values. If ``a.shape=(N,M)``, then ``u.shape=(N,K)``
        and ``vT.shape=(K,M)`` where ``K`` is the number of
        nonzero singular values (``len(s)==K``).
        If ``compute_uv==False`` only ``s`` is returned.

    Raises:
        ValueError: If matrix is not two-dimensional.
    """
    a = numpy.asarray(a)
    if a.dtype != object:
        return numpy.linalg.svd(a, compute_uv=compute_uv)
    amean = gvar.mean(a)
    if amean.ndim != 2:
        raise ValueError('matrix must have dimension 2: actual shape = ' +
                         str(a.shape))
    if rcond is None:
        rcond = numpy.finfo(float).eps * max(a.shape)
    da = a - amean
    u0, s0, v0T = numpy.linalg.svd(amean, compute_uv=True, full_matrices=True)
    k = min(a.shape)
    s = s0 + [u0[:, i].dot(da.dot(v0T[i, :])) for i in range(k)]
    if compute_uv:
        u = numpy.array(u0, dtype=object)
        vT = numpy.array(v0T, dtype=object)
        # u first
        daaT = da.dot(a.T) + a.dot(da.T)
        s02 = numpy.zeros(daaT.shape[0], float)
        s02[:len(s0)] = s0**2
        for j in range(s02.shape[0]):
            for i in range(k):
                if i == j:
                    continue
                ds2 = s02[i] - s02[j]
                if abs(ds2) < rcond * abs(s02[i] + s02[j]) or ds2 == 0:
                    continue
                u[:, i] += u0[:, j] * u0[:, j].dot(daaT.dot(u0[:, i])) / ds2
        # v next
        daTa = da.T.dot(a) + a.T.dot(da)
        s02 = numpy.zeros(daTa.shape[0], float)
        s02[:len(s0)] = s0**2
        for j in range(s02.shape[0]):
            for i in range(k):
                if i == j:
                    continue
                ds2 = s02[i] - s02[j]
                if abs(ds2) < rcond * abs(s02[i] + s02[j]) or ds2 == 0:
                    continue
                vT[i, :] += v0T[j, :] * v0T[j, :].dot(daTa.dot(
                    v0T[i, :])) / ds2
        return u[:, :k], s, vT[:k, :]
    else:
        return s
Example #31
0
 def r_prior(self):
     src_tag = self.ds.tags.src
     m_src = gv.mean(self.prior[f'{src_tag}:dE'][0])
     matrix_element = self.prior['Vnn'][0, 0]
     return convert_vnn_to_ratio(m_src, matrix_element)
Example #32
0
def wavg(dataseq, prior=None, fast=False, **kargs):
    """ Weighted average of |GVar|\s or arrays/dicts of |GVar|\s.
        
    The weighted average of several |GVar|\s is what one obtains from
    a  least-squares fit of the collection of |GVar|\s to the
    one-parameter fit function ::

        def f(p): 
            return N * [p[0]]

    where ``N`` is the number of |GVar|\s. The average is the best-fit 
    value for ``p[0]``.  |GVar|\s with smaller standard deviations carry 
    more weight than those with larger standard deviations. The averages
    computed by ``wavg`` take account of correlations between the |GVar|\s.

    If ``prior`` is not ``None``, it is added to the list of data 
    used in the average. Thus ``wavg([x2, x3], prior=x1)`` is the 
    same as ``wavg([x1, x2, x3])``. 
        
    Typical usage is ::
        
        x1 = gvar.gvar(...)
        x2 = gvar.gvar(...)
        x3 = gvar.gvar(...)
        xavg = wavg([x1, x2, x3])   # weighted average of x1, x2 and x3
    
    where the result ``xavg`` is a |GVar| containing the weighted average.

    The individual |GVar|\s in the last example can be  replaced by
    multidimensional distributions, represented by arrays of |GVar|\s
    or dictionaries of |GVar|\s (or arrays of |GVar|\s). For example, ::

        x1 = [gvar.gvar(...), gvar.gvar(...)]
        x2 = [gvar.gvar(...), gvar.gvar(...)]
        x3 = [gvar.gvar(...), gvar.gvar(...)]
        xavg = wavg([x1, x2, x3])   
            # xavg[i] is wgtd avg of x1[i], x2[i], x3[i]

    where each array ``x1``, ``x2`` ... must have the same shape. 
    The result ``xavg`` in this case is an array of |GVar|\s, where 
    the shape of the array is the same as that of ``x1``, etc.

    Another example is ::

        x1 = dict(a=[gvar.gvar(...), gvar.gvar(...)], b=gvar.gvar(...))
        x2 = dict(a=[gvar.gvar(...), gvar.gvar(...)], b=gvar.gvar(...))
        x3 = dict(a=[gvar.gvar(...), gvar.gvar(...)])
        xavg = wavg([x1, x2, x3])   
            # xavg['a'][i] is wgtd avg of x1['a'][i], x2['a'][i], x3['a'][i]
            # xavg['b'] is gtd avg of x1['b'], x2['b']  

    where different dictionaries can have (some) different keys. Here the 
    result ``xavg`` is a :class:`gvar.BufferDict`` having the same keys as
    ``x1``, etc.
     
    Weighted averages can become costly when the number of random samples being 
    averaged is large (100s or more). In such cases it might be useful to set
    parameter ``fast=True``. This causes ``wavg`` to estimate the weighted 
    average by incorporating the random samples one at a time into a 
    running average::

        result = prior
        for dataseq_i in dataseq:
            result = wavg([result, dataseq_i], ...)

    This method is much faster when ``len(dataseq)`` is large, and gives the
    exact result when there are no correlations between different elements
    of list ``dataseq``. The results are approximately correct when 
    ``dataseq[i]`` and ``dataseq[j]`` are correlated for ``i!=j``.

    :param dataseq: The |GVar|\s to be averaged. ``dataseq`` is a one-dimensional
        sequence of |GVar|\s, or of arrays of |GVar|\s, or of dictionaries 
        containing |GVar|\s or arrays of |GVar|\s. All ``dataseq[i]`` must
        have the same shape.
    :param prior: Prior values for the averages, to be included in the weighted
        average. Default value is ``None``, in which case ``prior`` is ignored.
    :type prior: |GVar| or array/dictionary of |GVar|\s
    :param fast: Setting ``fast=True`` causes ``wavg`` to compute an 
        approximation to the weighted average that is much faster to calculate 
        when averaging a large number of samples (100s or more). The default is 
        ``fast=False``.
    :type fast: bool 
    :param kargs: Additional arguments (e.g., ``svdcut``) to the fitter 
        used to do the averaging.
    :type kargs: dict
        
    Results returned by :func:`gvar.wavg` have the following extra 
    attributes describing the average:
        
    .. attribute:: chi2
        
        ``chi**2`` for weighted average.
        
    .. attribute:: dof
        
        Effective number of degrees of freedom.
        
    .. attribute:: Q
        
        The probability that the ``chi**2`` could have been larger, 
        by chance, assuming that the data are all Gaussain and consistent
        with each other. Values smaller than 0.1 or suggest that the 
        data are not Gaussian or are inconsistent with each other. Also 
        called the *p-value*.

        Quality factor `Q` (or *p-value*) for fit.

    .. attribute:: time

        Time required to do average.

    .. attribute:: svdcorrection

        The *svd* corrections made to the data when ``svdcut`` is not ``None``.

    .. attribute:: fit

        Fit output from average.
    """
    if len(dataseq) <= 0:
        if prior is None:
            return None 
        wavg.Q = 1
        wavg.chi2 = 0
        wavg.dof = 0
        wavg.time = 0
        wavg.fit = None
        wavg.svdcorrection = None
        if hasattr(prior, 'keys'):
            return BufferDictWAvg(dataseq[0], wavg)
        if numpy.shape(prior) == ():
            return GVarWAvg(prior, wavg)
        else:
            return ArrayWAvg(numpy.asarray(prior), wavg)        
    elif len(dataseq) == 1 and prior is None:
        wavg.Q = 1
        wavg.chi2 = 0
        wavg.dof = 0
        wavg.time = 0
        wavg.fit = None
        wavg.svdcorrection = None
        if hasattr(dataseq[0], 'keys'):
            return BufferDictWAvg(dataseq[0], wavg)
        if numpy.shape(dataseq[0]) == ():
            return GVarWAvg(dataseq[0], wavg)
        else:
            return ArrayWAvg(numpy.asarray(dataseq[0]), wavg)
    if fast:
        chi2 = 0
        dof = 0
        time = 0
        ans = prior
        svdcorrection = gvar.BufferDict()
        for i, dataseq_i in enumerate(dataseq):
            if ans is None:
                ans = dataseq_i
            else:
                ans = wavg([ans, dataseq_i], fast=False, **kargs)
                chi2 += wavg.chi2
                dof += wavg.dof
                time += wavg.time
                if wavg.svdcorrection is not None:
                    for k in wavg.svdcorrection:
                        svdcorrection[str(i) + ':' + k] = wavg.svdcorrection[k]
        wavg.chi2 = chi2
        wavg.dof = dof
        wavg.time = time
        wavg.Q = gammaQ(dof / 2., chi2 / 2.)
        wavg.svdcorrection = svdcorrection
        wavg.fit = None
        ans.dof = wavg.dof
        ans.Q = wavg.Q
        ans.chi2 = wavg.chi2
        ans.time = wavg.time
        ans.svdcorrection = wavg.svdcorrection
        ans.fit = wavg.fit
        return ans
    if hasattr(dataseq[0], 'keys'):
        data = {}
        keys = []
        if prior is not None:
            dataseq = [prior] + list(dataseq)
        for dataseq_i in dataseq:
            for k in dataseq_i:
                if k in data:
                    data[k].append(dataseq_i[k])
                else:
                    data[k] = [dataseq_i[k]]
                    keys.append(k)
        data = gvar.BufferDict(data, keys=keys)
        p0 = gvar.BufferDict()
        for k in data:
            p0[k] = gvar.mean(data[k][0]) + gvar.sdev(data[k][0]) / 10.
        def fcn(p):
            ans = gvar.BufferDict()
            for k in data:
                ans[k] = len(data[k]) * [p[k]]
            return ans
    else:
        p = numpy.asarray(dataseq[0])
        data = [] if prior is None else [prior]
        data += [dataseqi for dataseqi in dataseq]
        p0 = numpy.asarray(gvar.mean(data[0]) + gvar.sdev(data[0]) / 10.)
        data = numpy.array(data)
        def fcn(p):
            return len(data) * [p]
    fit = lsqfit.nonlinear_fit(data=data, fcn=fcn, p0=p0, **kargs)
    # wavg.Q = fit.Q
    # wavg.chi2 = fit.chi2
    # wavg.dof = fit.dof
    # wavg.time = fit.time
    # wavg.svdcorrection = fit.svdcorrection
    # wavg.fit = fit
    if p0.shape is None:
        return BufferDictWAvg(gvar.BufferDict(p0, buf=fit.p.flat), fit)
    elif p0.shape == ():
        return GVarWAvg(fit.p.flat[0], fit)
    else:
        return ArrayWAvg(fit.p.reshape(p0.shape), fit)
Example #33
0
 def _correlate(data, **kwargs):
     """Correlates the data, including correction of covariance matrix."""
     mean = gv.mean(gv.dataset.avg_data(data))
     cov = correct_covariance(data, **kwargs)
     return gv.gvar(mean, cov)
Example #34
0
def plot_crust_core_transition_plot():

    density = np.arange(2,16)
    density = density * 0.01
    d1 = []
    d2 = []
    
    for i in density:
        d1.append(delta_beta(i,model_par_1))
        d2.append(delta_beta(i,model_par_2))
       
       
    t1 = []
    t2 = []
    delta = np.arange(0,99)
    delta = delta*0.01
    
    for i in delta:
        t1.append (transition( i ,model_par_1) )   
        t2.append (transition( i ,model_par_2) )
        
    
    fig, ax = plt.subplots(figsize=(7,5))
    
    ax.fill_between ( density ,gv.mean(d1) +gv.sdev(d1),gv.mean(d1) -gv.sdev(d1) ,label='$\delta^2$ only',color='red',alpha=0.4)
    ax.fill_between ( density ,gv.mean(d2) +gv.sdev(d2),gv.mean(d2) -gv.sdev(d2) ,label='$\delta^2 + \delta^4$',color='blue',alpha=0.4)
    
    ax.fill_betweenx ( delta ,gv.mean(t1) +gv.sdev(t1),gv.mean(t1) -gv.sdev(t1) ,color='red',alpha=0.4)
    ax.fill_betweenx ( delta ,gv.mean(t2) +gv.sdev(t2),gv.mean(t2) -gv.sdev(t2) ,color='blue',alpha=0.4)
    
    plt.legend()
    plt.xlabel('$n$ (fm$^{-3}$)',fontsize='15')
    plt.ylabel (r'$ \delta$',fontsize='15')
    plt.text(0.12, 0.85, r'$\beta$-equilibrium ',fontsize='14')
    plt.text(0.109, 0.314, r'spinodal',fontsize='14')
    ax.tick_params(labelsize='14')
    ax.tick_params(right=True)
    ax.tick_params(top=True)
    ax.tick_params(direction='in')
    ax.legend(loc = 'lower left',fontsize='13.0')
    
    ## Inset
    
    density_inset = np.arange(5,13)
    density_inset = density_inset * 0.01
    d1_inset = []
    d2_inset = []
    
    for i in density_inset:
        d1_inset.append(delta_beta(i,model_par_1))
        d2_inset.append(delta_beta(i,model_par_2))
       
       
    t1_inset = []
    t2_inset = []
    delta_inset = np.arange(90,99)
    delta_inset = delta_inset*0.01
    
    for i in delta_inset:
        t1_inset.append (transition( i ,model_par_1) )   
        t2_inset.append (transition( i ,model_par_2) )
        
        
    left, bottom, width, height = [0.2, 0.45, 0.33, 0.41]
    
    ax2 = fig.add_axes([left, bottom, width, height])
    
    ax2.plot ( gv.mean(transition_density(model_par_1)) ,gv.mean(delta_beta ( transition_density(model_par_1) ,model_par_1  )) , 'rs' )
    ax2.plot ( gv.mean(transition_density(model_par_2)) ,gv.mean(delta_beta ( transition_density(model_par_2) ,model_par_2  )) , 'bs' )
    
    
    ax2.fill_between ( density_inset ,gv.mean(d1_inset) +gv.sdev(d1_inset),gv.mean(d1_inset) -gv.sdev(d1_inset) ,color='red',alpha=0.2)
    ax2.fill_between ( density_inset ,gv.mean(d2_inset) +gv.sdev(d2_inset),gv.mean(d2_inset) -gv.sdev(d2_inset) ,color='blue',alpha=0.2)
    
    ax2.fill_betweenx ( delta_inset ,gv.mean(t1_inset) +gv.sdev(t1_inset),gv.mean(t1_inset) -gv.sdev(t1_inset) ,color='red',alpha=0.4)
    ax2.fill_betweenx ( delta_inset ,gv.mean(t2_inset) +gv.sdev(t2_inset),gv.mean(t2_inset) -gv.sdev(t2_inset) ,color='blue',alpha=0.4)
    
    ax2.set_xlim(left=0.06)
    ax2.set_xlim(right=0.12)
    ax2.set_ylim(bottom=0.9)
    ax2.set_ylim(top=0.98)
    
    ax2.tick_params(labelsize='11')
    ax2.tick_params(right=True)
    ax2.tick_params(top=True)
    ax2.tick_params(direction='in')
    
    plt.tight_layout()
    fig.show()
Example #35
0
"""

import lsqfitgp as lgp
from matplotlib import pyplot as plt
import numpy as np
import gvar

xdata = np.linspace(0, 1, 10)
xpred = np.linspace(0, 1, 500)
y = np.ones_like(xdata)

gp = lgp.GP(lgp.Zeta(nu=1.5))
gp.addx(xdata, 'pere')
gp.addx(xpred, 'banane')

u = gp.predfromdata({'pere': y}, 'banane')
m = gvar.mean(u)
s = gvar.sdev(u)
cov = gvar.evalcov(u)

fig, ax = plt.subplots(num='y', clear=True)

patch = ax.fill_between(xpred, m - s, m + s, label='pred', alpha=0.5)
color = patch.get_facecolor()[0]
simulated_lines = np.random.multivariate_normal(m, cov, size=10)
ax.plot(xpred, simulated_lines.T, '-', color=color)
ax.plot(xdata, y, 'kx', label='data')
ax.legend(loc='best')

fig.show()
Example #36
0
    def fitfcn(self, p, fit_data=None, xi=None, debug=None):
        if debug:
            self.debug = debug
            self.debug_table = {}

        if fit_data is not None:
            for key in fit_data.keys():
                p[key] = fit_data[key]

        for key in self.model_info['exclude']:
            p[key] = 0

        # Variables
        if xi is None:
            xi = {}
        if 'l' not in xi:
            xi['l'] = (p['mpi'] / p['lam_chi'])**2
        if 's' not in xi:
            xi['s'] = (2 * p['mk']**2 - p['mpi']**2) / p['lam_chi']**2
        if 'a' not in xi:
            xi['a'] = p['eps2_a']  #p['a/w']**2 / 4

        # lo
        output = p['c0']

        if self.debug:
            self.debug_table['lo_ct'] = output

        # nlo
        if self.model_info['order'] in ['nlo', 'n2lo', 'n3lo']:
            output += self.fitfcn_nlo_ct(p, xi)
            if self.model_info['include_alphas']:
                output += self.fitfcn_nlo_latt_alphas(p, xi)

        elif self.model_info['latt_ct'] in ['nlo', 'n2lo', 'n3lo']:
            output += self.fitfcn_nlo_latt_ct(p, xi)
            if self.model_info['include_alphas']:
                output += self.fitfcn_nlo_latt_alphas(p, xi)

        # n2lo
        if self.model_info['order'] in ['n2lo', 'n3lo']:
            output += self.fitfcn_n2lo_ct(p, xi)
            if self.model_info['include_log']:
                output += self.fitfcn_n2lo_log(p, xi)

        elif self.model_info['latt_ct'] in ['n2lo', 'n3lo']:
            output += self.fitfcn_n2lo_latt_ct(p, xi)

        # n3lo
        if self.model_info['order'] in ['n3lo']:
            output += self.fitfcn_n3lo_ct(p, xi)
            if self.model_info['include_log']:
                output += self.fitfcn_n3lo_log(p, xi)
            if self.model_info['include_log2']:
                output += self.fitfcn_n3lo_log_sq(p, xi)

        elif self.model_info['latt_ct'] in ['n3lo']:
            output += self.fitfcn_n3lo_latt_ct(p, xi)

        for key in self.model_info['exclude']:
            del (p[key])

        if debug:
            #print(gv.tabulate(self.debug_table))
            temp_string = ''
            for key in self.debug_table:
                temp_string += '  % .15f:  %s\n' % (gv.mean(
                    self.debug_table[key]), key)
            temp_string += '   -----\n'
            temp_string += '  % .15f:  %s\n' % (gv.mean(output), 'total')
            print(temp_string)

        return output
Example #37
0

def makegp(params):
    kernel_time = lgp.ExpQuad(scale=params['time_scale'], dim='time')
    kernel_label = lgp.ExpQuad(scale=label_scale, dim='label')
    gp = lgp.GP(kernel_time * kernel_label)
    gp.addx(x, 'data', deriv=(data_deriv, 'time'))
    gp.addx(np.array([(0, 0)], dtype=x.dtype), 'fixed_point')
    return gp


prior = {'log(time_scale)': gvar.log(gvar.gvar(3, 2))}
datadict = {'data': data, 'fixed_point': [gvar.gvar(0, 1e2)]}
params = lgp.empbayes_fit(prior, makegp, datadict, raises=False, jit=True).p
print('time_scale:', params['time_scale'])
gp = makegp(gvar.mean(params))

time_pred = np.linspace(-10, 10, 100)
xpred = np.empty((2, len(time_pred)), dtype=x.dtype)
xpred['time'] = time_pred
xpred['label'][0] = 0
xpred['label'][1] = 1
gp.addx(xpred[0], 0)
gp.addx(xpred[1], 1, deriv=(1, 'time'))

pred = gp.predfromdata(datadict, [0, 1])

fig, ax = plt.subplots(num='u', clear=True)

colors = dict()
for deriv in pred:
Example #38
0
def main():
    gv.ranseed([2009,2010,2011,2012]) # initialize random numbers (opt.)
    x,y = make_data()               # make fit data
    p0 = None                       # make larger fits go faster (opt.)
    for nexp in range(3,5):
        print '************************************* nexp =',nexp
        prior = make_prior(nexp)
        fit = lsqfit.nonlinear_fit(data=(x,y),fcn=f,prior=prior,p0=p0)
        print fit                   # print the fit results
        E = fit.p['E']              # best-fit parameters
        a = fit.p['a']
        print 'E1/E0 =',E[1]/E[0],'  E2/E0 =',E[2]/E[0]
        print 'a1/a0 =',a[1]/a[0],'  a2/a0 =',a[2]/a[0]
        print
        if fit.chi2/fit.dof<1.:
            p0 = fit.pmean          # starting point for next fit (opt.)
    sys_stdout = sys.stdout
    if DO_ERRORBUDGET:

        lines = [
            "E = fit.p['E']",
            "a = fit.p['a']",
            "print(E[1] / E[0])",
            "print((E[1] / E[0]).partialsdev(fit.prior['E']))",
            "print((E[1] / E[0]).partialsdev(fit.prior['a']))",
            "print((E[1] / E[0]).partialsdev(y))"
            ]
        sys.stdout = tee.tee(sys_stdout, open("eg4c.out","w"))
        for line in lines:
            print ">>>", line
            if line[:5] == "print":
                print(eval(line[5:]))
        # print E[1]/E[0]
        # print (E[1]/E[0]).partialsdev(fit.prior['E'])
        # print (E[1]/E[0]).partialsdev(fit.prior['a'])
        # print (E[1]/E[0]).partialsdev(y)
        outputs = {'E1/E0':E[1]/E[0], 'E2/E0':E[2]/E[0],
                 'a1/a0':a[1]/a[0], 'a2/a0':a[2]/a[0]}
        inputs = {'E':fit.prior['E'],'a':fit.prior['a'],'y':y}

        sys.stdout = tee.tee(sys_stdout, open("eg4b.out","w"))
        print fit.fmt_values(outputs)
        print fit.fmt_errorbudget(outputs,inputs)
        sys.stdout = sys_stdout

    if DO_SIMULATIONS:
        # fit simulations
        sys.stdout = tee.tee(sys_stdout, open("eg4d.out","w"))

        for sfit in fit.simulated_fit_iter(3):
            print '************************************* simulation'
            print(sfit)
            sE = sfit.p['E']             # best-fit parameters
            sa = sfit.p['a']
            E = sfit.pexact['E']
            a = sfit.pexact['a']
            print 'E1/E0 =', sE[1] / sE[0], '  E2/E0 =', sE[2] / sE[0]
            print 'a1/a0 =', sa[1] / sa[0], '  a2/a0 =', sa[2] / sa[0]
            print '\nSimulated Fit Values - Exact Values:'
            print 'E1/E0:', (sE[1] / sE[0]) - (E[1] / E[0]),\
               '  E2/E0:', (sE[2] / sE[0]) - (E[2] / E[0])
            print 'a1/a0:', (sa[1] / sa[0]) - (a[1] / a[0]),\
               '  a2/a0:', (sa[2] / sa[0]) - (a[2] / a[0])

            # compute chi**2 comparing fit results to exact results
            sim_results = [sE[0], sE[1], sa[0], sa[1]]
            exact_results = [E[0], E[1], a[0], a[1]]
            chi2 = gv.chi2(sim_results, exact_results, svdcut=1e-8)
            print '\nParameter chi2/dof [dof] = %.2f' % (chi2/chi2.dof), '[%d]' % chi2.dof, '  Q = %.1f' % chi2.Q
            print
        sys.stdout = sys_stdout

    if DO_EMPBAYES:
        def fitargs(z,nexp=nexp,prior=prior,f=f,data=(x,y),p0=p0):
            z = gv.exp(z)
            prior['a'] = [gv.gvar(0.5,0.5*z[0]) for i in range(nexp)]
            return dict(prior=prior,data=data,fcn=f,p0=p0)
        ##
        z0 = [0.0]
        fit,z = lsqfit.empbayes_fit(z0,fitargs,tol=1e-3)
        sys.stdout = tee.tee(sys_stdout, open("eg4a.out","w"))
        print fit                   # print the optimized fit results
        E = fit.p['E']              # best-fit parameters
        a = fit.p['a']
        print 'E1/E0 =',E[1]/E[0],'  E2/E0 =',E[2]/E[0]
        print 'a1/a0 =',a[1]/a[0],'  a2/a0 =',a[2]/a[0]
        # print "prior['a'] =",fit.prior['a'][0]
        sys.stdout = sys_stdout
        print

    if DO_PLOT:
        import pylab as pp
        from gvar import mean,sdev
        fity = f(x,fit.pmean)
        ratio = y/fity
        pp.xlim(0,21)
        pp.xlabel('x')
        pp.ylabel('y/f(x,p)')
        pp.errorbar(x=x,y=mean(ratio),yerr=sdev(ratio),fmt='ob')
        pp.plot([0.0,21.0],[1.0,1.0])
        pp.show()
Example #39
0
NM3_par = fit.p



############ E_sym #####################

den = np.arange(0.001, 0.2, 0.01)


e_sym_s3 = f_NM(den,NM3_par) - f_SM(den,SM3_par)
data_esym = te_NM_av - te_SM_av


fig, axes =  plt.subplots(1,3,figsize=(11,4), sharey='row')
 
axes[0].errorbar( td, gv.mean(data_esym), gv.sdev(data_esym),fmt='ob', label='data (68% CL)'  )
axes[0].fill_between (den,gv.mean(e_sym_s3)+gv.sdev(e_sym_s3),gv.mean(e_sym_s3)-gv.sdev(e_sym_s3),label='fit (68% CL)',color='red',alpha=0.8)
axes[0].fill_between (den,gv.mean(e_sym_s3)+2*gv.sdev(e_sym_s3),gv.mean(e_sym_s3)-2*gv.sdev(e_sym_s3),label='fit (95% CL)',color='red',alpha=0.2)

axes[0].set_xlabel('$n$ (fm$^{-3}$)',fontsize='13')
axes[0].set_ylabel('$e_{\mathrm{sym}}$ (MeV)',fontsize='13')
axes[0].tick_params(right=True)
axes[0].tick_params(labelsize='13')
axes[0].set_xticks(np.arange(0, 0.2+0.01, 0.05))
axes[0].tick_params(right=True)
axes[0].tick_params(top=True)
axes[0].tick_params(direction='in')

############ E_sym^{pot} #####################

den = np.arange(0.001, 0.2, 0.01)
Example #40
0
    if n in [0, 9]:
        print('******* Results from ', (n + 1) * nfit, 'data points')
        print(fit)
print('******* Results from ', Nstride * nfit, 'data points (final)')
print(fit)

sys.stdout = sys_stdout

print('fit time =', fit_time, '   # of points =', Nstride * nfit)

if False:
    print()
    sys.stdout = tee.tee(sys_stdout, open('eg9b.out', 'w'))

    prior = gv.gvar(['0(1)', '0(1)', '0(1)'])
    y = gv.gvar(gv.mean(y), gv.sdev(y))

    fit = lsqfit.nonlinear_fit(data=(x, y), prior=prior, fcn=fcn)
    print('******* Results from uncorrelated fit')
    print(fit)
"""
******* Results from  2000 data points (final)
Least Square Fit:
  chi2/dof [dof] = 1 [2000]    Q = 0.079    logGBF = 10908

Parameters:
              0    0.519 (10)     [  0.0 (1.0) ]
              1   0.4005 (18)     [  0.0 (1.0) ]
              2   0.6978 (51)     [  0.0 (1.0) ]

Settings:
Example #41
0
def static_potential(size_list=[8, 8, 8, 8],
                     cfgs_list=range(200, 1200, 50),
                     max_R=4.1,
                     max_T=5,
                     ens_tag='',
                     smear_tag='',
                     do_smear=True,
                     smearing_dict={'u0': 0.84},
                     figname='',
                     **lat_kwargs):
    """
    This function computes the statice potential from Monte Carlo averages of `R times T` Wilson loops
    for different values of `R` and `T`.

    This corresponds to the exercise on page 37 of the `lecture notes`_.
    Here we smear the links first and then calculate the Wilson loops.
    One can simply disable the smearing by setting the option `do_smear=False`.
    Note that one can first run :meth:`gauge_tools.examples.ape_smear` to smear the links, save them, and then use them here.
    With a combination of the options `ens_tag` and `smear_tag` one can specify the links to be read,
    and with the option `do_smear` one can request an in-place smearing. In this case, one can control the
    smearing parameters by the `smearing_dict`. The latter one is used by default.
    For the exercise simply use::
       
                >>> import gauge_tools as gt
                >>> gt.examples.static_potential(cfgs_list=range(200,1200,50), ens_tag='W_', figname='static_W_smear4.pdf')

    Parameters:
        - ``size_list``     ([int]*4):  a list of 4 positive integers specifying the lattice size in `[x,y,z,t]` directions.
        - ``cfgs_list``     (list):     a list of indices of configurations to be loaded for further calculations.
        - ``ens_tag``       (str):      a unique tag (label) describing the ensemble:\
                    for details see the parameters of :meth:`gauge_tools.examples.generate_ensemble`.
        - ``smear_tag``     (str):      useful if one is going to use the already saved smeared links;\
                    for details see the parameters of :meth:`gauge_tools.examples.ape_smear`.
                                        Ignore this if the smearing is going to be done in place.
        - ``do_smear``      (bool):     For smearing the links before calculating the Wilson loops;\
                                        the default value is `True`.
        - ``smearing_dict`` (dict)      a dictionary to control the smearing parameters;\      
                                        for available options see :meth:`gauge_tools.examples.ape_smear`.
        - ``max_R``         (float):    calculates the static potential for distances `\le max_R`.
        - ``max_T``         (int):      calculates the static potential for all times `\le max_T`.
        - ``figname``       (str):      if not an empyt string, creates a figure and saves it as a pdf in `figname`.
 
    """
    import gauge_tools as gt
    lat = gt.lattice(*size_list, **lat_kwargs)
    #===============
    fname_load = lambda ind_cfg: "{}{}{}.npy".format(ens_tag, smear_tag, n_cfg)
    fname_save = "{}{}{}.p".format(ens_tag, smear_tag, 'Wilson_loops')
    #===============
    dset = []
    range_T = range(1, 1 + max_T)
    range_R, spatial_paths = define_paths(max_R)
    func = lambda U: [
        lat.meas.avg_RxT_loops(U, path_R, max_T) for path_R in spatial_paths
    ]
    #===============
    for n_cfg in cfgs_list:
        T1 = time.time()
        U = lat.GF.load_cfg(fname_load(n_cfg))
        if do_smear:
            lat.smear.ape_smear(
                U, **smearing_dict)  # the smearing is performed on `U`
        dset.append(func(U))
        print(" RxT is evaluated for cfg={} (#TIME = {:.4g})".format(
            n_cfg,
            time.time() - T1))
    W = gv.dataset.avg_data(dset)
    pickle.dump(dict(r=range_R, Wmean=gv.mean(W), Wcov=gv.evalcov(W)),
                open(fname_save, 'wb'))
    if figname != '' and PLOTS:
        V = gv.log(W[:, :-1] / np.roll(W, -1, axis=1)[:, :-1])
        plot_static_potential(range_R, V, figname=figname)
    static_potential.lat = lat
Example #42
0
#### GP OBJECT ####

gp = lgp.GP()

gp.addproc(kernel, 'h')
gp.addproctransf({'h': 1}, 'primitive', deriv='x'     )
gp.addproctransf({'h': 1}, 'f'        , deriv=(2, 'x'))
gp.addproctransf({
    'primitive': lambda x: x['x'],
    'h'        : -1,
}, 'primitive of xf(x)')

gp.addx(xdata, 'xdata', proc='f')

# linear data (used for warmup fit)
gp.addtransf({'xdata': M(gvar.mean(Mparams))}, 'data', axes=2)

# total momentum rule
gp.addx(xinteg, 'xmomrule', proc='primitive of xf(x)')
gp.addtransf({'xmomrule': suminteg}, 'momrule', axes=2)

# quark sum rules
qdiff = np.array([1, -1])[:, None] # vector to subtract two quarks
for quark in 'ducs':
    idx = indices[quark] # [quark, antiquark] indices
    label = f'{quark}{quark}bar' # the one appearing in `constraints`
    xlabel = f'x{label}'
    gp.addx(xinteg[idx], xlabel, proc='primitive')
    gp.addtransf({xlabel: suminteg[idx] * qdiff}, label, axes=2)
    
#### NONLINEAR FUNCTION ####
Example #43
0
gp = lgp.GP(lgp.Zeta(nu=2.5),
            checkpos=False)  # TODO is this checkpos necessary
gp.addkernelop('fourier', True, 'F')
x = np.linspace(0, 1, 100)
gp.addx(x, 'x')
gp.addx(1, 's1', proc='F')
gp.addx(2, 'c1', proc='F')

comb = [
    [0, 0],
    [1, 0],
    [0, 1],
    [1, 1],
]

fig, ax = plt.subplots(num='fourier', clear=True)

for s, c in comb:
    y = gp.predfromdata(dict(s1=s, c1=c), 'x')
    m = gvar.mean(y)
    u = gvar.sdev(y)
    pc = ax.fill_between(x, m - u, m + u, alpha=0.5, label=f's{s}c{c}')
    color = pc.get_facecolor()
    for sample in gvar.raniter(y, 3):
        ax.plot(x, sample, color=color)

ax.legend()

fig.show()
Example #44
0
data = list()
for tau in taus:
    t = [sets[tau][x][:, 0] for x in sets[tau]]
    R = [sets[tau][x][:, 1] for x in sets[tau]]
    tp, yp = interpolate(t, R, xp=tau * 6.01 / 5)
    ave = yp.mean(axis=0)
    err = yp.std(axis=0) / np.sqrt(yp.shape[0])
    data.append((tau, ave, err))
x, ave, err = np.array(list(zip(*data)))
m, _, _ = ax.errorbar(x, ave, err, ls="", marker="o")
fit = lsqfit.nonlinear_fit(data=(x, gv.gvar(ave, err)),
                           fcn=lambda x, p: p[0] * (x + p[1]),
                           p0=[1, 1])
x = np.linspace(0, max(x) + 3)
y = fit.p[0] * (x + fit.p[1])
ax.plot(x, gv.mean(y), ls="-", color=m.get_color())
ax.fill_between(x,
                gv.mean(y) + gv.sdev(y),
                gv.mean(y) - gv.sdev(y),
                color=m.get_color(),
                alpha=0.2)
#x = np.linspace(0, max(x)+1)
#y = fit.p[0]*(x+fit.p[1])
#ax.plot(x, gv.mean(y), ls="--", color=m.get_color())
if True:
    tau_i = R0_target / fit.p[0] - fit.p[1]
    a, e = gv.mean(tau_i), gv.sdev(tau_i)
    ax.plot([a] * 2, [0, R0_target], ls="--", color="k")
    ax.plot([0, a], [R0_target] * 2, ls="--", color="k")
    ax.fill_betweenx([0, R0_target], [a - e] * 2, [a + e] * 2,
                     color="k",
Example #45
0
def lstsq(a, b, rcond=None, weighted=False, extrainfo=False):
    """ Least-squares solution ``x`` to ``a @ x = b`` for |GVar|\s.

    Here ``x`` is defined to be the solution that minimizes ``||b - a @ x||``.
    If ``b`` has a covariance matrix, another option is to weight the
    norm with the inverse covariance matrix: i.e., minimize
    ``|| isig @ b - isig @ a @ x||`` where ``isig`` is the square root of the
    inverse of ``b``'s covariance matrix. Set parameter ``weighted=True`` to
    obtain the weighted-least-squares solution.

    Args:
        a : Matrix/array of shape ``(M,N)`` containing numbers and/or |GVar|\s.
        b : Vector/array of shape ``(M,)`` containing numbers and/or |GVar|\s.
        rcond (float): Cutoff for singular values of ``a``. Singular values
            smaller than ``rcond`` times the maximum eigenvalue are ignored.
            Default (``rcond=None``) is ``max(M,N)`` times machine precision.
        weighted (bool): If ``True``, use weighted least squares; otherwise
            use unweighted least squares.
        extrainfo (bool): If ``False`` (default) only ``x`` is returned;
            otherwise ``(x, residual, rank, s)`` is returned.
    Returns:
        Array ``x`` of shape ``(N,)`` that minimizes ``|| b - a @ x||``
        if ``extrainfo==False`` (default); otherwise returns a tuple
        ``(x, residual, rank, s)`` where ``residual`` is the sum
        of the squares of ``b - a @ x``, ``rank`` is the rank of matrix
        ``a``, and ``s`` is an array containing the singular values.
    """
    a = numpy.asarray(a)
    b = numpy.asarray(b)
    if a.ndim != 2:
        raise ValueError('a must have dimension 2: actual shape = ' +
                         str(a.shape))
    if a.shape[0] != b.shape[0]:
        raise ValueError('a and b shapes mismatched: {} vs {}'.format(
            a.shape, b.shape))
    if rcond is None:
        rcond = numpy.finfo(float).eps * max(a.shape)
    if weighted:
        try:
            cov = gvar.evalcov(b)
        except ValueError:
            raise ValueError('b does not have a covariance matrix')
        try:
            icov = numpy.linalg.inv(cov)
        except numpy.linalg.LinAlgError:
            raise ValueError("b's covariance matrix cannot be inverted")
        ata = a.T.dot(icov.dot(a))
        atb = a.T.dot(icov.dot(b))
    else:
        ata = a.T.dot(a)
        atb = a.T.dot(b)
    val, vec = gvar.linalg.eigh(ata)
    maxval = numpy.max(gvar.mean(val))  # N.B. val > 0 required
    ans = 0
    for i in range(len(val)):
        if gvar.mean(val[i]) < rcond * maxval:
            continue
        ans += vec[:, i] * vec[:, i].dot(atb) / val[i]
    if not extrainfo:
        return ans
    val = val[val >= rcond * maxval]**0.5
    d = a.dot(ans) - b
    residual = d.dot(icov.dot(d)) if weighted else d.dot(d)
    k = len(val)
    return ans, residual, k, val
Example #46
0
x = xdata['x']
y = pred['xdata']
checksum = np.sum((y[:, 1:] + y[:, :-1]) / 2 * np.diff(x, axis=1))
print('sum_i int dx   f_i(x) =', checksum)
checksum = np.sum(((y * x)[:, 1:] + (y * x)[:, :-1]) / 2 * np.diff(x, axis=1))
print('sum_i int dx x f_i(x) =', checksum)

#### PLOT RESULTS ####

fig, axs = plt.subplots(1, 2, num='pdf2', clear=True, figsize=[9, 4.5])
axs[0].set_title('PDFs')
axs[1].set_title('Data')

for i in range(len(xdata)):
    y = pred['xdata'][i]
    m = gvar.mean(y)
    s = gvar.sdev(y)
    axs[0].fill_between(xdata[i]['x'],
                        m - s,
                        m + s,
                        alpha=0.6,
                        facecolor=f'C{i}')
    y2 = priorsample['xdata'][i]
    axs[0].plot(xdata[i]['x'], y2, color=f'C{i}')

m = gvar.mean(pred['data'])
s = gvar.sdev(pred['data'])
x = np.arange(len(data))
axs[1].fill_between(x, m - s, m + s, step='mid', color='lightgray')
axs[1].errorbar(x, datamean, dataerr, color='black', linestyle='', capsize=2)
Example #47
0
with model:
    mp = pm.find_MAP()
    trace = pm.sample(10000, cores=1)

print('\nMaximum a posteriori (must be the same as lsqfitgp):')
print('log(sdev) {:.2f}'.format(mp['logsdev']))
print('log(scale) {:.2f}'.format(mp['logscale']))

df = pm.trace_to_dataframe(trace)
mean = df.mean()
cov = df.cov()

meandict = {}
covdict = {}
for label1 in df.columns:
    meandict[label1] = mean[label1]
    for label2 in df.columns:
        covdict[label1, label2] = cov[label1][label2]

params = gvar.gvar(meandict, covdict)
print('\nPosterior mean and standard deviation:')
print('log(sdev)', params['logsdev'])
print('log(scale)', params['logscale'])

p = params['logsdev']
prob_gauss = stats.norm.cdf(np.log(1), loc=gvar.mean(p), scale=gvar.sdev(p))
true_prob = np.sum(df['logsdev'] <= np.log(1)) / len(df)
print('\nProbability of having sdev < 1:')
print('prob_gauss {:.3g}'.format(prob_gauss))
print('true_prob {:.3g}'.format(true_prob))
e_symnq_eta_res = e_sym_res - f_esym2_c(td,e_sym2_eta_par)
e_symnq_eta_data = te_NM_av - te_SM_av - e_sym2_eta_av


fig, axes =  plt.subplots(1,3,figsize=(11,4), sharey='row')

for h in range(6):
    if h==5:
        axes[0].plot(td,e_NM[:,h] - e_sat[:,h] -e_sym2[:,h],color='C'+str(h) ,label='H'+str(h+2))
    else:
        axes[0].plot(td,e_NM[:,h] - e_sat[:,h] -e_sym2[:,h],color='C'+str(h) ,label='H'+str(h+1))

# axes[0].plot (td , gv.mean(e_symnq_data) , 'bs',label='Data (delta)')       # without error-bars
# axes[0].plot (td , gv.mean(e_symnq_eta_data) , 'rs',label='Data (eta)')
axes[0].errorbar (td , gv.mean(e_symnq_data),gv.sdev(e_symnq_data)  ,fmt='ob')
axes[0].errorbar (td , gv.mean(e_symnq_eta_data),gv.sdev(e_symnq_eta_data),fmt='or')
axes[0].fill_between (td,gv.mean(e_symnq_res)+gv.sdev(e_symnq_res),gv.mean(e_symnq_res)-gv.sdev(e_symnq_res),color='blue',alpha=0.1)
axes[0].fill_between (td,gv.mean(e_symnq_eta_res)+gv.sdev(e_symnq_eta_res),gv.mean(e_symnq_eta_res)-gv.sdev(e_symnq_eta_res),color='red',alpha=0.2)
axes[0].axhline(color='black')

axes[0].set_xlabel('$n$ (fm$^{-3}$)',fontsize='13')
axes[0].set_ylabel('$e_{\mathrm{sym,nq}}$ (MeV)',fontsize='13')
axes[0].tick_params(labelsize='13')
axes[0].tick_params(right=True)
axes[0].tick_params(top=True)
axes[0].tick_params(direction='in')
axes[0].legend(loc='upper left')

## e_symnq_pot
Example #49
0
def axvspan(ax, x, **kwargs):
    """Wrapper to plot gvars using matplotlib function axvspan."""
    mean = gv.mean(x)
    err = gv.sdev(x)
    ax.axvspan(mean - err, mean + err, **kwargs)
def fit_pot_eff_meanmass(den,delta):
    ans = f_SM(den,SM3_par) - gv.mean(T_SM_eff(den))  + (f_esym2_c(den,e_sym2_par) - gv.mean(T_2_eff(den)))* delta**2  
    ans = ans + ( f_NM(den,NM3_par) - gv.mean(T_NM_eff(den)) - f_SM(den,SM3_par) + gv.mean(T_SM_eff(den)) - f_esym2_c(den,e_sym2_par) + gv.mean(T_2_eff(den)) )* delta**4
    return ans
Example #51
0
def main():
    if not hasattr(lsqfit, 'BayesIntegrator'):
        # fake the run so that `make run` still works
        outfile = open('bayes.out', 'r').read()
        print(outfile[:-1])
        return
    x, y = make_data()
    prior = make_prior()
    fit = lsqfit.nonlinear_fit(prior=prior, data=(x, y), fcn=fcn)
    print(fit)
    # Bayesian integrator
    expval = lsqfit.BayesIntegrator(fit, sync_ran=False)

    # adapt integrator expval to PDF from fit
    neval = 1000
    nitn = 10
    expval(neval=neval, nitn=nitn)

    # <g(p)> gives mean and covariance matrix, and counts for histograms
    hist = [
        gv.PDFHistogram(fit.p[0]),
        gv.PDFHistogram(fit.p[1]),
        gv.PDFHistogram(fit.p[2]),
        gv.PDFHistogram(fit.p[3]),
    ]

    def g(p):
        return dict(
            mean=p,
            outer=np.outer(p, p),
            count=[
                hist[0].count(p[0]),
                hist[1].count(p[1]),
                hist[2].count(p[2]),
                hist[3].count(p[3]),
            ],
        )

    # evaluate expectation value of g(p)
    results = expval(g, neval=neval, nitn=nitn, adapt=False)

    # analyze results
    print('\nIterations:')
    print(results.summary())
    print('Integration Results:')
    pmean = results['mean']
    pcov = results['outer'] - np.outer(pmean, pmean)
    print('    mean(p) =', pmean)
    print('    cov(p) =\n', pcov)

    # create GVars from results
    p = gv.gvar(gv.mean(pmean), gv.mean(pcov))
    print('\nBayesian Parameters:')
    print(gv.tabulate(p))

    # show histograms
    print('\nHistogram Statistics:')
    count = results['count']
    for i in range(4):
        # print histogram statistics
        print('p[{}]:'.format(i))
        print(hist[i].analyze(count[i]).stats)
Example #52
0
    gp.addtransf({key + 'short': 0.3, key + 'long': 1}, key)


addcomps('data', time)
addcomps('pred', time_pred)

print('generate data...')
prior = gp.prior(['data', 'datashort', 'datalong'])
data = next(gvar.raniter(prior))

print('prediction...')
pred = gp.predfromdata({'data': data['data']},
                       ['pred', 'predshort', 'predlong'])

print('sample posterior...')
mean = gvar.mean(pred)
sdev = gvar.sdev(pred)
samples = list(gvar.raniter(pred, 1))

print('figure...')
fig, axs = plt.subplots(3, 1, num='w', clear=True, figsize=[6, 7])

for ax, comp in zip(axs, ['', 'short', 'long']):
    key = 'pred' + comp

    m = mean[key]
    s = sdev[key]
    ax.fill_between(time_pred, m - s, m + s, alpha=0.3, color='b')

    for sample in samples:
        ax.plot(time_pred, sample[key], alpha=0.2, color='b')
def compute_diagonal((dset,key)):
 print "diagonal key ",key
 tdat = compute_correlation_pair(dset,key,key)
 return (key,gv.mean(tdat[key]),gv.sdev(tdat[key]),gv.evalcorr(tdat)[key,key])
Example #54
0
def do_fit(svdcut=None, do_plot=False):
    if svdcut is None:
        svdcut = lsqfit.nonlinear_fit.set()['svdcut']
        sys.stdout = tee.tee(sys_stdout, open('eg5a.out', 'w'))
        default_svd = True
    else:
        default_svd = False
    x, y = make_data()
    prior = make_prior(
        100)  # 20 exponential terms in all (10 gives same result)
    p0 = None
    for nexp in range(1, 6):
        # marginalize the last 100 - nexp terms
        fit_prior = gv.BufferDict()  # part of prior used in fit
        ymod_prior = gv.BufferDict()  # part of prior absorbed in ymod
        for k in prior:
            fit_prior[k] = prior[k][:nexp]
            ymod_prior[k] = prior[k][nexp:]
        ymod = y - fcn(x, ymod_prior)
        # fit modified data with just nexp terms
        fit = lsqfit.nonlinear_fit(data=(x, ymod),
                                   prior=fit_prior,
                                   fcn=fcn,
                                   p0=p0,
                                   tol=1e-10,
                                   svdcut=svdcut)
        if not default_svd and nexp == 5:
            sys.stdout = tee.tee(sys_stdout, open('eg5b.out', 'w'))
        print '************************************* nexp =', nexp
        print fit.format(True)
        p0 = fit.pmean
        if do_plot:
            import matplotlib.pyplot as plt
            if nexp > 4:
                continue
            plt.subplot(2, 2, nexp)
            if nexp not in [1, 3]:
                plt.yticks([0.05, 0.10, 0.15, 0.20, 0.25], [])
            else:
                plt.ylabel('y')
            if nexp not in [3, 4]:
                plt.xticks([1.0, 1.5, 2.0, 2.5], [])
            else:
                plt.xlabel('x')
            plt.errorbar(x=x, y=gv.mean(ymod), yerr=gv.sdev(ymod), fmt='bo')
            plt.plot(x, y, '-r')
            plt.plot(x, fcn(x, fit.pmean), ':k')
            plt.text(1.75, 0.22, 'nexp = {}'.format(nexp))
            if nexp == 4:
                plt.savefig('eg5.png', bbox_inches='tight')
                plt.show()
    # print summary information and error budget
    E = fit.p['E']  # best-fit parameters
    a = fit.p['a']
    outputs = {
        'E1/E0': E[1] / E[0],
        'E2/E0': E[2] / E[0],
        'a1/a0': a[1] / a[0],
        'a2/a0': a[2] / a[0]
    }
    inputs = {
        'E prior': prior['E'],
        'a prior': prior['a'],
        'svd cut': fit.svdcorrection,
    }
    print(gv.fmt_values(outputs))
    print(gv.fmt_errorbudget(outputs, inputs))
    sys.stdout = sys_stdout
Example #55
0
def main():
    x, y = make_data()  # make fit data
    # y = gv.gvar(gv.mean(y), 0.75**2 * gv.evalcov(y))
    p0 = None  # make larger fits go faster (opt.)
    sys_stdout = sys.stdout
    sys.stdout = tee.tee(sys.stdout, open("eg1.out", "w"))
    for nexp in range(1, 7):
        prior = make_prior(nexp)
        fit = lsqfit.nonlinear_fit(data=(x, y), fcn=fcn, prior=prior, p0=p0)
        if fit.chi2 / fit.dof < 1.:
            p0 = fit.pmean  # starting point for next fit (opt.)
        print '************************************* nexp =', nexp
        print fit.format()  # print the fit results
        E = fit.p['E']  # best-fit parameters
        a = fit.p['a']
        if nexp > 2:
            print 'E1/E0 =', E[1] / E[0], '  E2/E0 =', E[2] / E[0]
            print 'a1/a0 =', a[1] / a[0], '  a2/a0 =', a[2] / a[0]
            print

    # error budget
    outputs = {
        'E1/E0': E[1] / E[0],
        'E2/E0': E[2] / E[0],
        'a1/a0': a[1] / a[0],
        'a2/a0': a[2] / a[0]
    }
    inputs = {'E': fit.prior['E'], 'a': fit.prior['a'], 'y': y}
    inputs = collections.OrderedDict()
    inputs['a'] = fit.prior['a']
    inputs['E'] = fit.prior['E']
    inputs['y'] = fit.data[1]
    print '================= Error Budget Analysis'
    print fit.fmt_values(outputs)
    print fit.fmt_errorbudget(outputs, inputs)

    sys.stdout = sys_stdout
    # print(gv.gvar(str(a[1])) / gv.gvar(str(a[0])) )
    # print(gv.evalcorr([fit.p['a'][1], fit.p['E'][1]]))
    # print(fit.format(True))

    # redo fit with 4 parameters since that is enough
    prior = make_prior(4)
    fit = lsqfit.nonlinear_fit(data=(x, y), fcn=fcn, prior=prior, p0=fit.pmean)
    sys.stdout = tee.tee(sys_stdout, open("eg1a.out", "w"))
    print '--------------------- original fit'
    print fit.format()
    E = fit.p['E']  # best-fit parameters
    a = fit.p['a']
    print 'E1/E0 =', E[1] / E[0], '  E2/E0 =', E[2] / E[0]
    print 'a1/a0 =', a[1] / a[0], '  a2/a0 =', a[2] / a[0]
    print
    # extra data 1
    print '\n--------------------- new fit to extra information'

    def ratio(p):
        return p['a'][1] / p['a'][0]

    newfit = lsqfit.nonlinear_fit(data=gv.gvar(1, 1e-5),
                                  fcn=ratio,
                                  prior=fit.p)
    print(newfit.format())
    E = newfit.p['E']
    a = newfit.p['a']
    print 'E1/E0 =', E[1] / E[0], '  E2/E0 =', E[2] / E[0]
    print 'a1/a0 =', a[1] / a[0], '  a2/a0 =', a[2] / a[0]

    if DO_PLOT:
        import matplotlib.pyplot as plt
        ratio = y / fit.fcn(x, fit.pmean)
        plt.xlim(4, 15)
        plt.ylim(0.95, 1.05)
        plt.xlabel('x')
        plt.ylabel('y / f(x,p)')
        plt.yticks([0.96, 0.98, 1.00, 1.02, 1.04],
                   ['0.96', '0.98', '1.00', '1.02', '1.04'])
        plt.errorbar(x=x, y=gv.mean(ratio), yerr=gv.sdev(ratio), fmt='ob')
        plt.plot([4.0, 21.0], [1.0, 1.0], 'b:')
        plt.savefig('eg1.png', bbox_inches='tight')
        plt.show()

    # alternate method for extra data
    sys.stdout = tee.tee(sys_stdout, open("eg1b.out", "w"))
    fit.p['a1/a0'] = fit.p['a'][1] / fit.p['a'][0]
    new_data = {'a1/a0': gv.gvar(1, 1e-5)}
    new_p = lsqfit.wavg([fit.p, new_data])
    print 'chi2/dof = %.2f\n' % (new_p.chi2 / new_p.dof)
    print 'E:', new_p['E'][:4]
    print 'a:', new_p['a'][:4]
    print 'a1/a0:', new_p['a1/a0']

    if DO_BAYES:
        # Bayesian Fit
        gv.ranseed([123])
        prior = make_prior(4)
        fit = lsqfit.nonlinear_fit(data=(x, y),
                                   fcn=f,
                                   prior=prior,
                                   p0=fit.pmean)
        sys.stdout = tee.tee(sys_stdout, open("eg1c.out", "w"))
        # print fit

        expval = lsqfit.BayesIntegrator(fit, limit=10.)
        # adapt integrator to PDF
        expval(neval=40000, nitn=10)

        # calculate expectation value of function g(p)
        fit_hist = gv.PDFHistogram(fit.p['E'][0])

        def g(p):
            parameters = [p['a'][0], p['E'][0]]
            return dict(
                mean=parameters,
                outer=np.outer(parameters, parameters),
                hist=fit_hist.count(p['E'][0]),
            )

        r = expval(g, neval=40000, nitn=10, adapt=False)

        # print results
        print r.summary()
        means = r['mean']
        cov = r['outer'] - np.outer(r['mean'], r['mean'])
        print 'Results from Bayesian Integration:'
        print 'a0: mean =', means[0], '  sdev =', cov[0, 0]**0.5
        print 'E0: mean =', means[1], '  sdev =', cov[1, 1]**0.5
        print 'covariance from Bayesian integral =', np.array2string(
            cov, prefix=36 * ' ')
        print

        print 'Results from Least-Squares Fit:'
        print 'a0: mean =', fit.p['a'][0].mean, '  sdev =', fit.p['a'][0].sdev
        print 'E0: mean =', fit.p['E'][0].mean, '  sdev =', fit.p['E'][0].sdev
        print 'covariance from least-squares fit =', np.array2string(
            gv.evalcov([fit.p['a'][0], fit.p['E'][0]]),
            prefix=36 * ' ',
            precision=3)
        sys.stdout = sys_stdout

        # make histogram of E[0] probabilty
        plt = fit_hist.make_plot(r['hist'])
        plt.xlabel('$E_0$')
        plt.ylabel('probability')
        plt.savefig('eg1c.png', bbox_inches='tight')
        # plt.show()

    if DO_BOOTSTRAP:
        Nbs = 40  # number of bootstrap copies

        outputs = {
            'E1/E0': [],
            'E2/E0': [],
            'a1/a0': [],
            'a2/a0': [],
            'E1': [],
            'a1': []
        }  # results
        for bsfit in fit.bootstrap_iter(n=Nbs):
            E = bsfit.pmean['E']  # best-fit parameters
            a = bsfit.pmean['a']
            outputs['E1/E0'].append(E[1] / E[0])  # accumulate results
            outputs['E2/E0'].append(E[2] / E[0])
            outputs['a1/a0'].append(a[1] / a[0])
            outputs['a2/a0'].append(a[2] / a[0])
            outputs['E1'].append(E[1])
            outputs['a1'].append(a[1])
            # print E[:2]
            # print a[:2]
            # print bsfit.chi2/bsfit.dof

        # extract means and standard deviations from the bootstrap output
        for k in outputs:
            outputs[k] = gv.gvar(np.mean(outputs[k]), np.std(outputs[k]))
        print 'Bootstrap results:'
        print 'E1/E0 =', outputs['E1/E0'], '  E2/E1 =', outputs['E2/E0']
        print 'a1/a0 =', outputs['a1/a0'], '  a2/a0 =', outputs['a2/a0']
        print 'E1 =', outputs['E1'], '  a1 =', outputs['a1']
Example #56
0
from matplotlib import pyplot as plt
import numpy as np
import gvar

xdata = np.linspace(0, 10, 10)
xpred = np.linspace(-15, 25, 300)
y = np.sin(xdata)
yerr = 0.1

gp = lgp.GP(lgp.ExpQuad(scale=3))
gp.addx(xdata, 'pere')
gp.addx(xpred, 'banane')

uy = gvar.gvar(y + yerr * np.random.randn(len(y)), yerr * np.ones_like(y))
u = gp.predfromdata({'pere': uy}, 'banane', keepcorr=False)
assert gvar.cov(u[0], uy[0]) == 0
m = gvar.mean(u)
s = gvar.sdev(u)
cov = gvar.evalcov(u)

fig, ax = plt.subplots(num='i', clear=True)

patch = ax.fill_between(xpred, m - s, m + s, label='pred', alpha=0.5)
color = patch.get_facecolor()[0]
simulated_lines = np.random.multivariate_normal(m, cov, size=10)
ax.plot(xpred, simulated_lines.T, '-', color=color)
ax.errorbar(xdata, gvar.mean(uy), yerr=gvar.sdev(uy), fmt='k.', label='data')
ax.legend(loc='best')

fig.show()
def plot_corr_normalized(models,data,fit,**kwargs):
 """
 Get all data ready so that it can be plotted on command
 Allows for dynamic cycling through plots
 """
 _fnNMod = len(models)
 _fnIdx = [0] ## -- index of plotted function, in array so it can be modified in functions
 ## -- objects to hold all plot data
 ##  - Dat/Fit refers to the correlator data or the fit function
 ##  - Central/Error are the central value and errors
 _fnDatCentral = []
 _fnDatError   = []
 _fnFitOnes    = []
 _fnFitError   = []
 #
 ## -- other objects
 _fnTDataNonZero = []
 _fnTFitNonZero  = []
 _fnTData        = []
 _fnTFit         = []
 _fnTRem         = [] # number of previous timeslices removed
 fig,ax = plt.subplots(1)
 #
 ## -- setup plot function
 def do_plot_normalized(idx,fig=fig):
   fig.clear()
   ax = fig.add_subplot(111)
   key = models[idx[0]].datatag

   ax.set_xlim([-1,len(_fnTData[idx[0]])])
   ax.set_ylim(utp.get_option("y_limit",[0.2,1.8],**kwargs[key]))
   #
   ## -- plot fit
   ax.plot(_fnTDataNonZero[idx[0]],_fnFitOnes[idx[0]],
    color=utp.get_option("color2",'b',**kwargs[key]))
   ax.plot(_fnTDataNonZero[idx[0]],_fnFitError[idx[0]][0],
    color=utp.get_option("color2",'g',**kwargs[key]),
    ls=utp.get_option("linestyle2",'--',**kwargs[key]))
   ax.plot(_fnTDataNonZero[idx[0]],_fnFitError[idx[0]][1],
    color=utp.get_option("color2",'g',**kwargs[key]),
    ls=utp.get_option("linestyle2",'--',**kwargs[key]))
   ## -- plot correlator data
   ax.errorbar(_fnTDataNonZero[idx[0]],_fnDatCentral[idx[0]],yerr=_fnDatError[idx[0]],
    mfc=utp.get_option("markerfacecolor1",'None',**kwargs[key]),
    mec=utp.get_option("markeredgecolor1",'k',**kwargs[key]),
    color=utp.get_option("markeredgecolor1",'k',**kwargs[key]),
    ls=utp.get_option("linestyle1",'None',**kwargs[key]),
    marker=utp.get_option("marker1",'o',**kwargs[key]),
    ms=utp.get_option("markersize",6,**kwargs[key]))
   ax.scatter(_fnTFitNonZero[idx[0]],
    [ _fnDatCentral[idx[0]][t] for t in
    list(np.array(_fnTFitNonZero[idx[0]])-np.array(_fnTRem[idx[0]])) ],
    color=utp.get_option("color1",'r',**kwargs[key]),
    marker=utp.get_option("marker",'o',**kwargs[key]),
    s=utp.get_option("markersize",36,**kwargs[key]))
   fig.suptitle(utp.get_option("plottitlefn",str(idx[0])+" default title "+str(key),**kwargs[key]),
    fontsize=utp.get_option("titlesize",20,**kwargs[key]))
   ## -- modify some options 
   ax.set_xlabel(r'$t$')
   ax.set_ylabel(utp.get_option("yaxistitle",r"$C(t)/C_{fit}(t)$",**kwargs[key]))
   for item in ([ax.xaxis.label,ax.yaxis.label]):
    # must be after setting label content (LaTeX ruins it)
    item.set_fontsize(fontsize=utp.get_option("fontsize",20,**kwargs[key]))
   rect =fig.patch
   rect.set_facecolor('white')
   if utp.get_option("to_file",False,**kwargs[key]):
    save_dir  = utp.get_option("fn_save_dir","./plotdump",**kwargs[key])
    save_name = utp.get_option("fn_save_name","fnplot-"+key+".pdf",**kwargs[key])
    plt.savefig(save_dir+'/'+save_name)
   if utp.get_option("to_terminal",True,**kwargs[key]):
    plt.draw()
   pass
 #
 ## -- setup button press action function
 def press_normalized(event,idx=_fnIdx):
   #print('press_normalized', event.key)
   try:
     ## -- manually indicate index
     idx[0] = int(event.key) + (idx[0])*10
   except ValueError:
     if event.key==' ': ## -- space
       ## -- allows for replotting when changing index by typing number keys
       idx[0] = idx[0] % _fnNMod
       do_plot_normalized(idx)
     elif event.key=='left':
       idx[0] = (idx[0] - 1) % _fnNMod
       do_plot_normalized(idx)
     elif event.key=='right':
       idx[0] = (idx[0] + 1) % _fnNMod
       do_plot_normalized(idx)
     elif event.key=='backspace':
       ## -- reset index so can manually flip through using number keys
       idx[0] = 0
     elif event.key=='d':
       ## -- dump plots into ./plotdump directory
       for ix,model in zip(range(len(models)),models):
         key = model.datatag
         save_dir  = utp.get_option("fn_save_dir","./plotdump",**kwargs[key])
         save_name = utp.get_option("fn_save_name","fnplot-"+key+".png",**kwargs[key])
         do_plot_normalized([ix])
         plt.savefig(save_dir+'/'+save_name)
       do_plot_normalized(idx)
 #
 ## -- 
 fig.canvas.mpl_connect('key_press_event',press_normalized)
 ## -- save plot data
 for idx,model in zip(range(len(models)),models):
   key = model.datatag
   _fnTData.append(model.tdata)
   _fnTFit.append(model.tfit)
   _fnTFit[-1] = np.append(_fnTFit[-1],list(sorted([len(_fnTData[-1]) - t for t in _fnTFit[-1]])))
   ## -- fit
   _fnFitFunc = utp.create_fit_func(model,fit)
   _fnFitMean = gv.mean(_fnFitFunc(_fnTData[-1]))
   _fnTDataNonZero.append([t for t in _fnTData[-1] if np.abs(_fnFitMean[t]) > 1e-20])
   _fnTFitNonZero.append([t for t in _fnTFit[-1] if np.abs(_fnFitMean[t]) > 1e-20])
   _fnTRem.append([(0 if np.abs(_fnFitMean[t]) > 1e-20 else 1) for t in model.tdata])
   _fnTRem[-1] = \
     [sum(_fnTRem[-1][:i+1]) for i in range(len(_fnTRem[-1])) if i in _fnTFitNonZero[-1]]
   _fnFitMean = gv.mean(_fnFitFunc(_fnTDataNonZero[-1]))
   _fnFitSdev = list(np.array(gv.sdev(_fnFitFunc(_fnTDataNonZero[-1])))/np.array(_fnFitMean))
   _fnFitOnes.append(list(np.ones(len(_fnTDataNonZero[-1]))))
   _fnFitError.append([ list(np.array(_fnFitOnes[-1])-np.array(_fnFitSdev)),
     list(np.array(_fnFitOnes[-1])+np.array(_fnFitSdev)) ])
   ## -- data
   _fnDatCentral.append( list(np.array([gv.mean(data[key])[t] for t in _fnTDataNonZero[-1]])/
     np.array(_fnFitMean)) )
   _fnDatSdev = ( np.array([gv.sdev(data[key])[t] for t in _fnTDataNonZero[-1]])/
     np.array(_fnFitMean) )
   _fnDatError.append([ list(_fnDatSdev), list(_fnDatSdev) ])
 ## -- done saving data
 
 if not(utp.get_option("to_terminal",True,**kwargs[key])) and\
    utp.get_option("to_file",False,**kwargs[key]):
  for ix in range(len(models)):
    ## -- loops and saves all without creating window
    do_plot_normalized([ix])
 else:
  do_plot_normalized(_fnIdx)
Example #58
0
def n2s(val):
    """
    Computes the noise-to-signal ratio.
    """
    return gv.sdev(val) / gv.mean(val)
Example #59
0
def main():
    gv.ranseed([2009, 2010, 2011, 2012])  # initialize random numbers (opt.)
    x, y = make_data()  # make fit data
    p0 = None  # make larger fits go faster (opt.)
    sys_stdout = sys.stdout
    for nexp in range(3, 8):
        prior = make_prior(nexp)
        fit = lsqfit.nonlinear_fit(data=(x, y), fcn=f, prior=prior, p0=p0, svdcut=1e-15)  # ,svdcut=SVDCUT)
        if fit.chi2 / fit.dof < 1.0:
            p0 = fit.pmean  # starting point for next fit (opt.)
        if nexp == 5:
            sys.stdout = tee.tee(sys_stdout, open("eg3.out", "w"))
        print "************************************* nexp =", nexp
        print fit  # print the fit results
        E = fit.p["E"]  # best-fit parameters
        a = fit.p["a"]
        print "E1/E0 =", E[1] / E[0], "  E2/E0 =", E[2] / E[0]
        print "a1/a0 =", a[1] / a[0], "  a2/a0 =", a[2] / a[0]
        # print E[1]-E[0], E[-1]-E[-2]
        # print (E[1]/E[0]).partialsdev(fit.prior['E'])
        # print (E[1]/E[0]).partialsdev(fit.prior['a'])
        # print (E[1]/E[0]).partialsdev(fit.y)
        sys.stdout = sys_stdout
        print
    # sys.stdout = tee.tee(sys_stdout, open("eg3a.out", "w"))
    # for i in range(1):
    #     print '--------------------- fit with %d extra data sets' % (i+1)
    #     x, y = make_data(1)
    #     prior = fit.p
    #     fit = lsqfit.nonlinear_fit(data=(x,y),fcn=f1,prior=prior, svdcut=SVDCUT)
    #     print fit
    sys.stdout = sys_stdout

    if DO_BOOTSTRAP:
        Nbs = 10  # number of bootstrap copies
        outputs = {"E1/E0": [], "E2/E0": [], "a1/a0": [], "a2/a0": [], "E1": [], "a1": []}  # results
        for bsfit in fit.bootstrap_iter(n=Nbs):
            E = bsfit.pmean["E"]  # best-fit parameters
            a = bsfit.pmean["a"]
            outputs["E1/E0"].append(E[1] / E[0])  # accumulate results
            outputs["E2/E0"].append(E[2] / E[0])
            outputs["a1/a0"].append(a[1] / a[0])
            outputs["a2/a0"].append(a[2] / a[0])
            outputs["E1"].append(E[1])
            outputs["a1"].append(a[1])
            # print E[:2]
            # print a[:2]
            # print bsfit.chi2/bsfit.dof

        # extract means and standard deviations from the bootstrap output
        for k in outputs:
            outputs[k] = gv.gvar(np.mean(outputs[k]), np.std(outputs[k]))
        print "Bootstrap results:"
        print "E1/E0 =", outputs["E1/E0"], "  E2/E1 =", outputs["E2/E0"]
        print "a1/a0 =", outputs["a1/a0"], "  a2/a0 =", outputs["a2/a0"]
        print "E1 =", outputs["E1"], "  a1 =", outputs["a1"]

    if DO_PLOT:
        print fit.format(100)  # print the fit results
        import pylab as pp
        from gvar import mean, sdev

        fity = f(x, fit.pmean)
        ratio = y / fity
        pp.xlim(0, 21)
        pp.xlabel("x")
        pp.ylabel("y/f(x,p)")
        pp.errorbar(x=x, y=mean(ratio), yerr=sdev(ratio), fmt="ob")
        pp.plot([0.0, 21.0], [1.0, 1.0])
        pp.show()
Example #60
0
    d2.append(delta_beta(i,model_par_2))
   
   
t1 = []
t2 = []
delta = np.arange(0,99)
delta = delta*0.01

for i in delta:
    t1.append (transition( i ,model_par_1) )   
    t2.append (transition( i ,model_par_2) )
    

fig, ax = plt.subplots(figsize=(7,5))

ax.fill_between ( density ,gv.mean(d1) +gv.sdev(d1),gv.mean(d1) -gv.sdev(d1) ,label='$\delta^2$ only',color='red',alpha=0.4)
ax.fill_between ( density ,gv.mean(d2) +gv.sdev(d2),gv.mean(d2) -gv.sdev(d2) ,label='$\delta^2 + \delta^4$',color='blue',alpha=0.4)

ax.fill_betweenx ( delta ,gv.mean(t1) +gv.sdev(t1),gv.mean(t1) -gv.sdev(t1) ,color='red',alpha=0.4)
ax.fill_betweenx ( delta ,gv.mean(t2) +gv.sdev(t2),gv.mean(t2) -gv.sdev(t2) ,color='blue',alpha=0.4)

plt.legend()
plt.xlabel('$n$ (fm$^{-3}$)',fontsize='15')
plt.ylabel (r'$ \delta$',fontsize='15')
plt.text(0.12, 0.85, r'$\beta$-equilibrium ',fontsize='14')
plt.text(0.109, 0.314, r'spinodal',fontsize='14')
ax.tick_params(labelsize='14')
ax.tick_params(right=True)
ax.tick_params(top=True)
ax.tick_params(direction='in')
ax.legend(loc = 'lower left',fontsize='13.0')