Example #1
0
    def form_factor_fixed_qsq(self, form='f+', qsq=0.0,
              var='qsq', decay='Bs2Ds', withpole=False, nz=3, gvar=True):
        """
        Construct the f+ or f0 form factors at a fixed q^2 value.
        """
        if form == 'f0':
            PHI = const_b2d.PHI0
        elif form == 'f+':
            PHI = const_b2d.PHI_PLUS
        #if form not in ('f+', 'f0'):
        else:
            print "Only f+ and f0 form factors are provided."
            return

        z = self.q_sq2z(qsq)
        formfactor = self.fcn(z, self.params(decay), nz)[form] / self.Pphi(qsq, form)
        formfactor /= PHI
        if var == 'qsq':
            if gvar:
                res = [qsq, formfactor]
            else:
                res = [qsq, gv.mean(formfactor), gv.sdev(formfactor)]
        elif var == 'z':
            if gvar:
                res = [qsq, formfactor]
            else:
                res = [z, gv.mean(formfactor), gv.sdev(formfactor)]
        return res
Example #2
0
def errreturn(val):
    if isinstance(val, vegas._vegas.RAvg):
        error = gvar.sdev(val)
    else:
        valp = val[0]
        error = gvar.sdev(valp)
    return error
Example #3
0
    def form_factor_fixed_qsq(self, form='f+', qsq=0.0,
              var='qsq', withpole=True, nz=4, gvar=True):
        """
        Construct the f+ or f0 form factors at a fixed q^2 value.
        """
        if form not in ('f+', 'f0'):
            print "Only f+ and f0 form factors are provided."
            return

        z = self.q_sq2z(qsq)
        if withpole:
            formfactor = self.fcn(z, self.params())[form] / self.Pphi(qsq, form)
        else:
            formfactor = self.fcn(z, self.params())[form]
        if var == 'qsq':
            if gvar:
                res = [qsq, formfactor]
            else:
                res = [qsq, gv.mean(formfactor), gv.sdev(formfactor)]
        elif var == 'z':
            if gvar:
                res = [qsq, formfactor]
            else:
                res = [z, gv.mean(formfactor), gv.sdev(formfactor)]
        return res
Example #4
0
def plot_corre_and_fit(options, num, labels, data, fcnlabel, fcn, choosefit):
    xlow = options.xlow
    xhigh = options.xhigh
    ylow = options.ylow
    yhigh = options.yhigh
    tkxy = options.tkxy
    xmax = options.rmax[num]
    choose_t = options.choose[num]
    outdir = options.outdir

    t = np.arange(NT)
    x = {"tc": t, "tg": t, "tgc": t}
    fcnout = -(fcn(x, choosefit.p)[fcnlabel])

    upper = gv.mean(fcnout) + gv.sdev(fcnout)
    lower = gv.mean(fcnout) - gv.sdev(fcnout)

    idata = data.copy()
    jacksamp = jacksample(idata, 0)
    _, jkerr = jackerror(jacksamp, 0)  # * at_inverse
    exmean = -(np.average(idata, axis=0))

    fig, ax = plt.subplots()
    ax.errorbar(
        t,
        exmean,
        jkerr,
        fmt="o",
        color="blue",
        markersize=5,
        capsize=4,
        linewidth=1.0,
        capthick=1.0,
        markeredgecolor="black",
        fillstyle="full",
        markerfacecolor="w",
    )

    ax.fill_between(t, lower, upper, alpha=0.5, color="dimgray")
    ax.fill_between(
        np.arange(choose_t, xmax + 1),
        lower[choose_t:xmax + 1],
        upper[choose_t:xmax + 1],
        alpha=1.0,
        color="royalblue",
    )

    ax.xaxis.set_major_locator(MultipleLocator(tkxy[0]))
    ax.xaxis.set_minor_locator(MultipleLocator(tkxy[1]))
    # ax.yaxis.set_major_locator(MultipleLocator(tkxy[2]))
    # ax.yaxis.set_minor_locator(MultipleLocator(tkxy[3]))

    ax.set_xlabel(options.xlabel, fontsize=24)
    ax.set_ylabel(labels, fontsize=24)
    ax.set_xlim(xlow[num], xhigh[num])
    ax.set_ylim(ylow[num], yhigh[num])

    plt.tight_layout()
    plt.savefig(outdir + "fit_" + fcnlabel + ".pdf")
    plt.show()
Example #5
0
def errorbar(ax, x, y, bands=False, **kwargs):
    """Wrapper to plot gvars using the matplotlib function errorbar."""
    if hasattr(y, 'values'):
        y = y.values
    if hasattr(x, 'values'):
        x = x.values
    try:
        xerr = gv.sdev(x)
    # sdev throws error w/ x = np.arange(>50)
    except ZeroDivisionError:
        xerr = 0. * np.array(x)  # in case x passed from plot().
    x = gv.mean(x)
    try:
        yerr = gv.sdev(y)
    except ZeroDivisionError:
        yerr = 0. * y
    y = gv.mean(y)
    if bands:
        ax.errorbar(x=x, y=y, **kwargs)
        facecolor = kwargs.get('color', ax.lines[-1].get_color())
        alpha = kwargs.get('alpha', 1.0)
        ax.fill_between(x,
                        y - yerr,
                        y + yerr,
                        facecolor=facecolor,
                        alpha=alpha)
    else:
        ax.errorbar(x=x, xerr=xerr, y=y, yerr=yerr, **kwargs)
    return ax
Example #6
0
def plot_fit(plt, p, data,  *args, **kargs):
    if not ONE_W or not SHOW_PLOT:
        return plt
    xline = np.linspace(data.x[0], data.x[-1], 100)
    yline = data.fitfcn(p, x=xline)
    yp = gv.mean(yline) + gv.sdev(yline)
    ym = gv.mean(yline) - gv.sdev(yline)
    if args[0][0] == 'k':
        plt.plot(xline, gv.mean(yline), *args)
    else:
        plt.fill_between(xline,yp,ym, **kargs)
Example #7
0
 def test_svd(self):
     " EigenBasis.svd "
     tdata = [1, 2, 3, 4]
     G = self.make_G(tdata, keyfmt="{s1}{s2}", srcs="ab")
     basis = EigenBasis(data=G, keyfmt="{s1}{s2}", srcs="ab", t=2, tdata=tdata)
     Gsvd = basis.svd(G, svdcut=0.9)
     self.assertEqual(basis.svdn, 15)
     self.assertEqual(str(basis.svdcorrection), "0.000(30)")
     for k in G:
         np.testing.assert_allclose(gv.mean(G[k]), gv.mean(Gsvd[k]))
         self.assertTrue(np.all(gv.sdev(Gsvd[k]) > gv.sdev(G[k])))
Example #8
0
def fast_w2pix(w, axis_min, axis_step):
    """Fast conversion of wavelength/wavenumber to pixel

    :param w: wavelength/wavenumber
    
    :param axis_min: min axis wavelength/wavenumber
    
    :param axis_step: axis step size in wavelength/wavenumber
    """
    w_ = (w - axis_min)
    if np.any(gvar.sdev(w_) != 0.):
        w_ = gvar.gvar(gvar.mean(w_), gvar.sdev(w_))
    return gvar.fabs(w_) / axis_step
Example #9
0
def f_fit_plot(x,
               y,
               all_dta,
               fit,
               error_band=False,
               semilog=False,
               full_data=True):
    '''
    Function for plotting data with the fit lines and error bands.
    For correlators, using a semi-log plot.
    full_data=True, plots the entire data and the best-fit in the fit region
    '''

    plt.figure()
    # Plots data points
    if full_data:  # Plot all the correlators even those not used in the fit.
        plt.errorbar(x=all_dta['x'],
                     y=gv.mean(all_dta['y']),
                     yerr=gv.sdev(all_dta['y']),
                     color='black',
                     linestyle='None',
                     marker='o')
    else:  # Plot the data points used in the fit
        plt.errorbar(x=x['x'],
                     y=gv.mean(y['y']),
                     yerr=gv.sdev(y['y']),
                     linestyle='None',
                     color='red',
                     marker='s')

    # Plot the best fit line
    # # Using a finer grid to get a continuous curve.
    curvex = dict(x)
    curvex['x'] = np.linspace(min(fit.x['x']), max(fit.x['x']), 500)
    curvey = gv.mean(fit.fcn(curvex, fit.p))
    plt.plot(curvex['x'], curvey, color='blue')

    if error_band:
        obs_fit = gv.mean(fit.fcn(curvex, fit.p))
        err_fit = gv.sdev(fit.fcn(curvex, fit.p))
        sigma = 2.0
        plt.fill_between(curvex['x'],
                         obs_fit - sigma * err_fit,
                         obs_fit + sigma * err_fit,
                         color='yellow')  # providing an error band.

    if semilog: plt.semilogy()

    plt.title("Plot")
Example #10
0
def make_plot(x, y, fit, ylabel='y(x)', xmax=1.0):
	if not MAKE_PLOTS:
		return
	plt.errorbar(x, gv.mean(y), gv.sdev(y), fmt='bo')
	x = np.arange(0., xmax, 0.01)
	yfit = f(x, fit.p)
	plt.plot(x, gv.mean(yfit), 'k--')
	yplus = gv.mean(yfit) + gv.sdev(yfit)
	yminus = gv.mean(yfit) - gv.sdev(yfit)
	plt.fill_between(x, yminus, yplus, color='0.8')
	plt.xlim(0,1)
	plt.ylim(0.3,1.9)
	plt.xlabel('x')
	plt.ylabel(ylabel)
	plt.show()
Example #11
0
    def plot_error_ellipsis(self, x_key, y_key, observable):
        x = self._get_posterior(x_key)[observable]
        y = self._get_posterior(y_key)[observable]

        fig, ax = plt.subplots()

        corr = '{0:.3g}'.format(gv.evalcorr([x, y])[0, 1])
        std_x = '{0:.3g}'.format(gv.sdev(x))
        std_y = '{0:.3g}'.format(gv.sdev(y))
        text = ('$R_{x, y}=$ %s\n $\sigma_x =$ %s\n $\sigma_y =$ %s' %
                (corr, std_x, std_y))

        # these are matplotlib.patch.Patch properties
        props = dict(boxstyle='round', facecolor='wheat', alpha=0.5)

        # place a text box in upper left in axes coords
        ax.text(0.05,
                0.95,
                text,
                transform=ax.transAxes,
                fontsize=14,
                verticalalignment='top',
                bbox=props)

        C = gv.evalcov([x, y])
        eVe, eVa = np.linalg.eig(C)
        for e, v in zip(eVe, eVa.T):
            plt.plot([
                gv.mean(x) - 1 * np.sqrt(e) * v[0],
                1 * np.sqrt(e) * v[0] + gv.mean(x)
            ], [
                gv.mean(y) - 1 * np.sqrt(e) * v[1],
                1 * np.sqrt(e) * v[1] + gv.mean(y)
            ],
                     'k-',
                     lw=2)

        #plt.scatter(x-np.mean(x), y-np.mean(y), rasterized=True, marker=".", alpha=100.0/self.bs_N)
        #plt.scatter(x, y, rasterized=True, marker=".", alpha=100.0/self.bs_N)

        plt.grid()
        plt.gca().set_aspect('equal', adjustable='datalim')
        plt.xlabel(x_key.replace('_', '\_'), fontsize=24)
        plt.ylabel(y_key.replace('_', '\_'), fontsize=24)

        fig = plt.gcf()
        plt.close()
        return fig
Example #12
0
 def bootstrap(self, a=0.45):
     """
     """
     omega = _twopi
     phi = 0
     offset = 0.50
     
     nbootstrap = 500
     amp = []
     for ii in range(nbootstrap):
         mean, cov = self.test_qinfer_estimate(a, omega, phi, offset)
         amp.append(gvar.gvar(mean[0], np.sqrt(np.diag(cov))[0]))
         
     a_weighted = np.mean(gvar.mean(amp))
     print('mean = {}'.format((a_weighted - a) / a))
         
     plt.figure(figsize=(8, 4))
     plt.subplot(1, 2, 1)
     plt.hist((gvar.mean(amp) - a) / a)
     plt.xlabel('ratio of a_fit / a - 1')
     plt.ylabel('frequency')
     
     plt.subplot(1, 2, 2)
     plt.hist(gvar.sdev(amp))
     plt.xlabel('standard error of a')
     plt.ylabel('frequency')
     
     plt.tight_layout()
     
     plt.show()
Example #13
0
def main():
    print(
        gv.ranseed(
            (2050203335594632366, 8881439510219835677, 2605204918634240925)))

    log_stdout('eg3a.out')
    integ = vegas.Integrator(4 * [[0, 1]])

    # adapt grid
    training = integ(f(), nitn=10, neval=1000)

    # evaluate multi-integrands
    result = integ(f(), nitn=10, neval=5000)
    print('I[0] =', result[0], '  I[1] =', result[1], '  I[2] =', result[2])
    print('Q = %.2f\n' % result.Q)
    print('<x> =', result[1] / result[0])
    print('sigma_x**2 = <x**2> - <x>**2 =',
          result[2] / result[0] - (result[1] / result[0])**2)
    print('\ncorrelation matrix:\n', gv.evalcorr(result))
    unlog_stdout()
    r = gv.gvar(gv.mean(result), gv.sdev(result))
    print(r[1] / r[0])
    print((r[1] / r[0]).sdev / (result[1] / result[0]).sdev)
    print(r[2] / r[0] - (r[1] / r[0])**2)
    print(result.summary())
Example #14
0
    def diff_decay_rate_bk(self, lepton='mu', start=const.MMU**2,
                        end=const.TMINUS, num_points=500):
        """
        Calculate the differential decay rate for B -> K mu nu or B -> K tau
        nu.
        """
        if lepton == 'mu':
            ml = const.MMU
        elif lepton == 'tau':
            ml = const.MTAU
        else:
            print "Only the 'mu' or 'tau' lepton is allowed."
            return
        start = ml**2

        res = []
        step = (end - start) / num_points
        for qsq in np.arange(start, end + step, step):
            i = self.q_sq2z(qsq)
            E = self.q_sq2E(qsq)
            PK = self.q_sq2PK(qsq)
            fac_overall = const.GF**2 / (24*np.pi**3*const.MBS**2) * (1 - ml**2 / qsq)**2 * PK
            fac_plus = (1 + ml**2/2./qsq) * const.MBS**2 * PK**2
            fac_zero =  3.0 * ml**2 / 8.0 / qsq * (const.MBS**2 - const.MK**2)**2
            form = 'f+'
            fp = self.fcn(i, self.params())[form] / self.Pphi(qsq, form)
            form = 'f0'
            f0 = self.fcn(i, self.params())[form] / self.Pphi(qsq, form)
            #print qsq,gv.mean(ans), gv.sdev(ans)
            ans = fac_overall * ( fac_plus * fp**2 + fac_zero * f0**2 ) / const.GEV_TO_PS
            #print i, qsq, E, PK, fac_overall, fac_plus, fac_zero, fp, f0, ans
            res.append([qsq, gv.mean(ans), gv.sdev(ans)])
        return res
Example #15
0
    def __init__(self, nstates, ds=None, a_fm=None, **kwargs):
        prior = {}
        # Decaying states
        prior['light-light:dE'] = PhysicalSplittings('pion')(nstates.n, a_fm)
        prior['light-light:a'] = decay_amplitudes(nstates.n)
        prior['heavy-light:dE'] = PhysicalSplittings('d')(nstates.m, a_fm)
        prior['heavy-light:a'] = decay_amplitudes(nstates.m)
        # Oscillating states
        if nstates.no:
            prior['light-light:dEo'] = PhysicalSplittings('pion_osc')(
                nstates.no, a_fm)
            prior['light-light:ao'] = osc_amplitudes(nstates.no)
        if nstates.mo:
            prior['heavy-light:dEo'] = PhysicalSplittings('d_osc')(nstates.mo,
                                                                   a_fm)
            prior['heavy-light:ao'] = osc_amplitudes(nstates.mo)

        # Matrix elements Vnn
        for key, value in vmatrix(nstates).items():
            if value.size:  # skip empty matrices
                prior[key] = value

        # Make informed guesses for ground states and the form factor Vnn[0,0].
        # Estimate central values as well as possible, but keep wide priors.
        if ds is not None:
            for tag in ['light-light', 'heavy-light']:
                mean = gv.mean(ds[tag].mass)  # Central value from "meff"
                err = gv.sdev(prior[f"{tag}:dE"][0])
                prior[f"{tag}:dE"][0] = gv.gvar(mean, err)
            mean = gv.mean(ds.v_guess)  # Central value from ratio R
            err = 0.5 * mean
            prior['Vnn'][0, 0] = gv.gvar(mean, err)
        super(FormFactorPriorD2Pi, self).__init__(mapping=prior, **kwargs)
Example #16
0
    def fit_interpolation(self, simultaneous=None):
        if simultaneous is None:
            simultaneous = self._simultaneous

        if self._fit_interpolation is None or simultaneous != self._simultaneous:
            self._simultaneous = simultaneous
            #make_gvar = lambda g : gv.gvar(gv.mean(g), gv.sdev(g))
            #y_data = make_gvar(1 / self.fit_data['a/w'])

            make_gvar = lambda g: gv.gvar(gv.mean(g), gv.sdev(g))
            if self.observable == 'w0':
                data = {
                    self.model_info['name'] + '_interpolation':
                    1 / make_gvar(self.fit_data['a/w'])
                }
            elif self.observable == 't0':
                data = {
                    self.model_info['name'] + '_interpolation':
                    make_gvar(self.fit_data['t/a^2'])
                }

            if simultaneous:
                data[self.model_info['name']] = self.y

            models = self._make_models(interpolation=True,
                                       simultaneous=simultaneous)
            prior = self._make_prior(interpolation=True,
                                     simultaneous=simultaneous)

            fitter = lsqfit.MultiFitter(models=models)
            fit = fitter.lsqfit(data=data, prior=prior, fast=False, mopt=False)
            self._fit_interpolation = fit

        return self._fit_interpolation
Example #17
0
def main():
    # pendulum data exhibits experimental error in ability to measure theta
    t = gv.gvar([ 
        '0.10(1)', '0.20(1)', '0.30(1)', '0.40(1)',  '0.50(1)', 
        '0.60(1)',  '0.70(1)',  '0.80(1)',  '0.90(1)', '1.00(1)'
        ])
    theta = gv.gvar([
        '1.477(79)', '0.791(79)', '-0.046(79)', '-0.852(79)', 
        '-1.523(79)', '-1.647(79)', '-1.216(79)', '-0.810(79)', 
        '0.185(79)', '0.832(79)'
        ])

    for t_n, theta_n in zip(t, theta):
        print("{}  {:>10}".format(t_n.fmt(2), theta_n.fmt(3)))
    # prior: assume experimental error in ability to specify theta(0)
    prior = gv.BufferDict()
    prior['g/l'] = gv.gvar('40(20)')
    prior['theta(0)'] = gv.gvar('1.571(50)')
    prior['t'] = t

    # fit function: use class Pendulum object to integrate pendulum motion
    def fitfcn(p, t=None):
        if t is None:
            t = p['t']
        pendulum = Pendulum(p['g/l'])
        return pendulum(p['theta(0)'], t)

    # do the fit and print results
    fit = lsqfit.nonlinear_fit(data=theta, prior=prior, fcn=fitfcn)
    sys.stdout = tee.tee(STDOUT, open('case-pendulum.out', 'w'))
    print(fit.format(maxline=True))
    sys.stdout = STDOUT
    print('fit/exact for (g/l) =', fit.p['g/l'] / (2*np.pi) ** 2)
    print('fit/exact for theta(0) =', fit.p['theta(0)'] / (np.pi / 2.))
    
    if MAKE_PLOT:
        # make figure (saved to file pendulum.pdf)
        plt.figure(figsize=(4,3))
        # start plot with data
        plt.errorbar(
            x=gv.mean(t), xerr=gv.sdev(t), y=gv.mean(theta), yerr=gv.sdev(theta),
            fmt='k.',
            )
        # use best-fit function to add smooth curve for 100 points
        t = np.linspace(0., 1.1, 100)
        th = fitfcn(fit.p, t)
        show_plot(t, th)
Example #18
0
def main():
    # pendulum data exhibits experimental error in ability to measure theta
    t = gv.gvar([
        '0.10(1)', '0.20(1)', '0.30(1)', '0.40(1)',  '0.50(1)',
        '0.60(1)',  '0.70(1)',  '0.80(1)',  '0.90(1)', '1.00(1)'
        ])
    theta = gv.gvar([
        '1.477(79)', '0.791(79)', '-0.046(79)', '-0.852(79)',
        '-1.523(79)', '-1.647(79)', '-1.216(79)', '-0.810(79)',
        '0.185(79)', '0.832(79)'
        ])

    for t_n, theta_n in zip(t, theta):
        print("{}  {:>10}".format(t_n.fmt(2), theta_n.fmt(3)))
    # prior: assume experimental error in ability to specify theta(0)
    prior = gv.BufferDict()
    prior['g/l'] = gv.gvar('40(20)')
    prior['theta(0)'] = gv.gvar('1.571(50)')
    prior['t'] = t

    # fit function: use class Pendulum object to integrate pendulum motion
    def fitfcn(p, t=None):
        if t is None:
            t = p['t']
        pendulum = Pendulum(p['g/l'])
        return pendulum(p['theta(0)'], t)

    # do the fit and print results
    fit = lsqfit.nonlinear_fit(data=theta, prior=prior, fcn=fitfcn)
    sys.stdout = tee.tee(STDOUT, open('case-pendulum.out', 'w'))
    print(fit.format(maxline=True))
    sys.stdout = STDOUT
    print('fit/exact for (g/l) =', fit.p['g/l'] / (2*np.pi) ** 2)
    print('fit/exact for theta(0) =', fit.p['theta(0)'] / (np.pi / 2.))

    if MAKE_PLOT:
        # make figure (saved to file pendulum.pdf)
        plt.figure(figsize=(4,3))
        # start plot with data
        plt.errorbar(
            x=gv.mean(t), xerr=gv.sdev(t), y=gv.mean(theta), yerr=gv.sdev(theta),
            fmt='b.',
            )
        # use best-fit function to add smooth curve for 100 points
        t = np.linspace(0., 1.1, 100)
        th = fitfcn(fit.p, t)
        show_plot(t, th)
Example #19
0
def plot_eff_mass_fit_SM(m_e_kf,f_SM_1, par_SM_1,f_SM_2, par_SM_2):
    
    x_plot = np.arange(1,22)
    x_plot = x_plot*0.01
    
    y_plot = []
    for h in range(6):
              y_plot.append (m_e_kf[:,h,0])
    
    y_plot = gv.dataset.avg_data(y_plot,spread=True)
    
    fig = plt.figure()
    
    plt.errorbar(x_plot,gv.mean (y_plot) ,gv.sdev(y_plot), fmt='ok',label='data (68% CL)')
    
    plt.fill_between (x_plot, gv.mean(1/f_SM_2(x_plot,par_SM_2))+ gv.sdev(1/f_SM_2(x_plot,par_SM_2))
                      ,gv.mean(1/f_SM_2(x_plot,par_SM_2))-gv.sdev(1/f_SM_2(x_plot,par_SM_2)),label='quadratic fit (68% CL)'
                      ,color ='red', alpha=0.6)
                    
    plt.fill_between (x_plot, gv.mean(1/f_SM_2(x_plot,par_SM_2))+ 2*gv.sdev(1/f_SM_2(x_plot,par_SM_2))
                      ,gv.mean(1/f_SM_2(x_plot,par_SM_2))-2*gv.sdev(1/f_SM_2(x_plot,par_SM_2)),label='quadratic fit (95% CL)'
                        ,color ='red', alpha=0.4)      
    
    plt.fill_between (x_plot, gv.mean(1/f_SM_1(x_plot,par_SM_1))+ gv.sdev(1/f_SM_1(x_plot,par_SM_1))
                      ,gv.mean(1/f_SM_1(x_plot,par_SM_1))-gv.sdev(1/f_SM_1(x_plot,par_SM_1)),label='linear fit (68% CL)'
                      ,color ='blue', alpha=0.6)
                    
    plt.fill_between (x_plot, gv.mean(1/f_SM_1(x_plot,par_SM_1))+ 2*gv.sdev(1/f_SM_1(x_plot,par_SM_1))
                      ,gv.mean(1/f_SM_1(x_plot,par_SM_1))-2*gv.sdev(1/f_SM_1(x_plot,par_SM_1)),label='linear fit (95% CL)'
                        ,color ='blue', alpha=0.4)  
    
    for h in range(6):
        plt.plot (x_plot, m_e_kf[:,h,0],color='k', alpha=0.6)     
      
    plt.text(0.212,0.6129001102725408-0.01,'H'+str(1)+'')
    plt.text(0.212,0.6249013706200439-0.008,'H'+str(2)+'')
    plt.text(0.212,0.6026532762896706-0.015,'H'+str(3)+'')
    plt.text(0.212,0.6341732874045016+0.008,'H'+str(4)+'')
    plt.text(0.212,0.6505783121773542+0.005,'H'+str(5)+'')
    plt.text(0.212,0.632353164008689-0.003,'H'+str(7)+'')
    
    plt.xlabel ("$n$ (fm$^{-3}$)",fontsize='15')
    plt.ylabel ("$m_n^{*}/m$",fontsize='15')
    plt.text (0.02,0.95,'Symmetric matter',fontsize='15')
    plt.xticks(np.arange(0, 0.2+0.01, 0.05),fontsize='14' )
    plt.yticks(fontsize='14' )
    plt.tick_params(right=True)
    plt.tick_params(top=True)
    plt.tick_params(direction='in')
    
    plt.legend(loc='upper right',fontsize='14')  
    plt.show()
Example #20
0
def main():
    gv.ranseed([2009,2010,2011,2012,2013]) # initialize random numbers (opt.)
    x,y = make_data()               # make fit data
    p0 = None                       # make larger fits go faster (opt.)
    sys_stdout = sys.stdout
    for nexp in range(3,6):
        prior = make_prior(nexp,x)
        fit = lsqfit.nonlinear_fit(data=y,fcn=f,prior=prior,p0=p0) # ,svdcut=SVDCUT)
        if fit.chi2/fit.dof<1.:
            p0 = fit.pmean          # starting point for next fit (opt.)
        fit.check_roundoff()
        if nexp == 4:
            sys.stdout = tee.tee(sys.stdout,open("eg2.out","w"))
        print '************************************* nexp =',nexp
        print fit                   # print the fit results
        E = fit.p['E']              # best-fit parameters
        a = fit.p['a']
        print 'E1/E0 =',E[1]/E[0],'  E2/E0 =',E[2]/E[0]
        print 'a1/a0 =',a[1]/a[0],'  a2/a0 =',a[2]/a[0]
        sys.stdout = sys_stdout
        print

    #
    if DO_BOOTSTRAP:
        Nbs = 10                                     # number of bootstrap copies
        outputs = {'E1/E0':[], 'E2/E0':[], 'a1/a0':[],'a2/a0':[],'E1':[],'a1':[]}   # results
        for bsfit in fit.bootstrap_iter(n=Nbs):
            E = bsfit.pmean['E']                     # best-fit parameters
            a = bsfit.pmean['a']
            outputs['E1/E0'].append(E[1]/E[0])       # accumulate results
            outputs['E2/E0'].append(E[2]/E[0])
            outputs['a1/a0'].append(a[1]/a[0])
            outputs['a2/a0'].append(a[2]/a[0])
            outputs['E1'].append(E[1])
            outputs['a1'].append(a[1])
            # print E[:2]
            # print a[:2]
            # print bsfit.chi2/bsfit.dof

        # extract means and standard deviations from the bootstrap output
        for k in outputs:
            outputs[k] = gv.gvar(np.mean(outputs[k]),np.std(outputs[k]))
        print 'Bootstrap results:'
        print 'E1/E0 =',outputs['E1/E0'],'  E2/E1 =',outputs['E2/E0']
        print 'a1/a0 =',outputs['a1/a0'],'  a2/a0 =',outputs['a2/a0']
        print 'E1 =',outputs['E1'],'  a1 =',outputs['a1']

    if DO_PLOT:
        print fit.format(100)                   # print the fit results
        import pylab as pp
        from gvar import mean,sdev
        fity = f(x,fit.pmean)
        ratio = y/fity
        pp.xlim(0,21)
        pp.xlabel('x')
        pp.ylabel('y/f(x,p)')
        pp.errorbar(x=gv.mean(x),y=gv.mean(ratio),yerr=gv.sdev(ratio),fmt='ob')
        pp.plot([0.0,21.0],[1.0,1.0])
        pp.show()
Example #21
0
def main():
    sys_stdout = sys.stdout

    # version 1 - relative errors
    sys.stdout = tee.tee(sys_stdout, open("eg7a.out", "w"))

    # fit data and prior
    x = np.array([1., 2., 3., 4.])
    y = np.array([3.4422, 1.2929, 0.4798, 0.1725])
    prior = gv.gvar(['10(1)', '1.0(1)'])

    # fit function
    def fcn(x, p):
        return p[0] * gv.exp(-p[1] * x)

    # find optimal dy
    def fitargs(z):
        dy = y * z
        newy = gv.gvar(y, dy)
        return dict(data=(x, newy), fcn=fcn, prior=prior)

    fit, z = lsqfit.empbayes_fit(0.001, fitargs)
    print fit.format(True)
    if MAKE_PLOT:
        ratio = fit.y / fcn(x, fit.pmean)
        plt.errorbar(x=fit.x, y=gv.mean(ratio), yerr=gv.sdev(ratio), c='b')
        plt.plot([0.5, 4.5], [1.0, 1.0], c='r')

    # version 2 - additive errors
    sys.stdout = tee.tee(sys_stdout, open("eg7b.out", "w"))

    def fitargs(z):
        dy = np.ones_like(y) * z
        newy = gv.gvar(y, dy)
        return dict(data=(x, newy), fcn=fcn, prior=prior)

    fit, z = lsqfit.empbayes_fit(0.001, fitargs)
    print fit.format(True)

    if MAKE_PLOT:
        ratio = fit.y / fcn(x, fit.pmean)
        plt.errorbar(x=fit.x + 0.1,
                     y=gv.mean(ratio),
                     yerr=gv.sdev(ratio),
                     c='g')
        plt.show()
Example #22
0
 def diff_decay_rate_fixed_q_sq(self, ml, qsq):
     z = self.q_sq2z(qsq)
     fp = self.fcn(z, self.params())['f+'] / self.Pphi(qsq, 'f+')
     f0 = self.fcn(z, self.params())['f0'] / self.Pphi(qsq, 'f0')
     ans_GeV = self.factor_overall(ml, qsq) * (self.factor_fplus(ml, qsq) * fp**2 +
                                           self.factor_fzero(ml, qsq) * f0**2)
     ans_PS = ans_GeV / const.GEV_TO_PS
     return [qsq, gv.mean(ans_PS), gv.sdev(ans_PS)]
Example #23
0
def main():
    gv.ranseed([2009,2010,2011,2012]) # initialize random numbers (opt.)
    x,y = make_data()               # make fit data
    p0 = None                       # make larger fits go faster (opt.)
    for nexp in range(3,8):
        print('************************************* nexp =',nexp)
        prior = make_prior(nexp)
        # eps = gv.gvar(1,1e-300)   # use svdcut to make it independent
        # prior['a'] *= eps
        # y *= eps
        fit = lsqfit.nonlinear_fit(data=(x,y),fcn=f,prior=prior, 
                                   p0=p0,svdcut=SVDCUT)
        print(fit)                  # print the fit results
        E = fit.p['E']              # best-fit parameters
        a = fit.p['a']
        print('E1/E0 =',(E[1]/E[0]).fmt(),'  E2/E0 =',(E[2]/E[0]).fmt())
        print('a1/a0 =',(a[1]/a[0]).fmt(),'  a2/a0 =',(a[2]/a[0]).fmt())
        print()
        if fit.chi2/fit.dof<1.:
            p0 = fit.pmean          # starting point for next fit (opt.)
    
    if DO_BOOTSTRAP:
        Nbs = 10                                     # number of bootstrap copies
            
        outputs = {'E1/E0':[], 'E2/E0':[], 'a1/a0':[],'a2/a0':[],'E1':[],'a1':[]}   # results
        for bsfit in fit.bootstrap_iter(n=Nbs):
            E = bsfit.pmean['E']                     # best-fit parameters
            a = bsfit.pmean['a']
            outputs['E1/E0'].append(E[1]/E[0])       # accumulate results
            outputs['E2/E0'].append(E[2]/E[0])
            outputs['a1/a0'].append(a[1]/a[0])
            outputs['a2/a0'].append(a[2]/a[0])
            outputs['E1'].append(E[1])
            outputs['a1'].append(a[1])
            # print E[:2]
            # print a[:2]
            # print bsfit.chi2/bsfit.dof

        # extract means and standard deviations from the bootstrap output
        for k in outputs:
            outputs[k] = gv.dataset.avg_data(outputs[k],bstrap=True).fmt(3)
                                 # gv.gvar(np.mean(outputs[k]),
                                 # np.std(outputs[k])).fmt(3)
        print('Bootstrap results:')
        print('E1/E0 =',outputs['E1/E0'],'  E2/E0 =',outputs['E2/E0'])
        print('a1/a0 =',outputs['a1/a0'],'  a2/a0 =',outputs['a2/a0'])
        print('E1 =',outputs['E1'],'  a1 =',outputs['a1'])
        
    if DO_PLOT:
        print(fit.format(100))                   # print the fit results
        import pylab as plt   
        ratio = y/f(x,fit.pmean)
        plt.xlim(0,21)
        plt.xlabel('x')
        plt.ylabel('y/f(x,p)')
        plt.errorbar(x=x,y=gv.mean(ratio),yerr=gv.sdev(ratio),fmt='ob')
        plt.plot([0.0,21.0],[1.0,1.0])
        plt.show()
Example #24
0
def plot_eff_mass_fit_NM(m_e_kf,f_NM_1, par_NM_1,f_NM_2, par_NM_2):
    x_plot = np.arange(1,22)
    x_plot = x_plot*0.01
    
    y_plot = []
    for h in range(6):
              y_plot.append (m_e_kf[:,h,1])
    
    y_plot = gv.dataset.avg_data(y_plot,spread=True)
    
    fig = plt.figure()
    
    plt.errorbar(x_plot,gv.mean (y_plot) ,gv.sdev(y_plot), fmt='ok',label='data (68% CL)')
    
    plt.fill_between (x_plot, gv.mean(1/f_NM_2(x_plot,par_NM_2))+ gv.sdev(1/f_NM_2(x_plot,par_NM_2))
                    ,gv.mean(1/f_NM_2(x_plot,par_NM_2))-gv.sdev(1/f_NM_2(x_plot,par_NM_2)),label='quadratic fit (68% CL)'
                    ,color ='red', alpha=0.6)
                    
    plt.fill_between (x_plot, gv.mean(1/f_NM_2(x_plot,par_NM_2))+ 2*gv.sdev(1/f_NM_2(x_plot,par_NM_2))
                    ,gv.mean(1/f_NM_2(x_plot,par_NM_2))-2*gv.sdev(1/f_NM_2(x_plot,par_NM_2)),label='quadratic fit (95% CL)'
                    ,color ='red', alpha=0.4)    
    
    plt.fill_between (x_plot, gv.mean(1/f_NM_1(x_plot,par_NM_1))+ gv.sdev(1/f_NM_1(x_plot,par_NM_1))
                    ,gv.mean(1/f_NM_1(x_plot,par_NM_1))-gv.sdev(1/f_NM_1(x_plot,par_NM_1)),label='linear fit (68% CL)'
                    ,color ='blue', alpha=0.6)
                    
    plt.fill_between (x_plot, gv.mean(1/f_NM_1(x_plot,par_NM_1))+ 2*gv.sdev(1/f_NM_1(x_plot,par_NM_1))
                    ,gv.mean(1/f_NM_1(x_plot,par_NM_1))-2*gv.sdev(1/f_NM_1(x_plot,par_NM_1)),label='linear fit (95% CL)'
                    ,color ='blue', alpha=0.4)     
                                  
    for h in range(6):
        plt.plot (x_plot, m_e_kf[:,h,1],color='k', alpha=0.6)    
    
    plt.text(0.212,m_e_kf[20,0,1],'H'+str(1)+'')
    plt.text(0.212,m_e_kf[20,1,1]-0.005,'H'+str(2)+'')
    plt.text(0.212,m_e_kf[20,2,1],'H'+str(3)+'')
    plt.text(0.212,m_e_kf[20,3,1]-0.006,'H'+str(4)+'')
    plt.text(0.212,m_e_kf[20,4,1],'H'+str(5)+'')
    plt.text(0.212,m_e_kf[20,5,1],'H'+str(7)+'')
    
    #legend = plt.legend (loc='lower left')
    plt.xlabel ("$n$ (fm$^{-3}$)",fontsize='15')
    plt.ylabel ("$m_n^{*}/m$",fontsize='15')
    plt.text (0.06,1.03,'Neutron matter',fontsize='15')
    plt.xticks(np.arange(0, 0.2+0.01, 0.05),fontsize='14' )
    plt.yticks(fontsize='14' )
    plt.tick_params(right=True)
    plt.tick_params(top=True)
    plt.tick_params(direction='in')
    plt.show()
Example #25
0
def tabulate_avg(avgout, format=(6, 3)):
    """ Tabulates averages and standard deviations.
        
    tabulate_avg(...) creates a nicely formatted table displaying the
    output from functions like ``dataset.Dataset.gdev``. Here ``avgout`` is
    the output. Parameter ``format`` specifies the output format:
    ``format=(N,D)`` implies that format ``'%N.Df(%Dd)'`` is used to print
    ``avg,int(10**D * std_dev)``. The table is returned as a single string,
    for printing.
    """
    table = []
    output = avgout.items()
    output.sort()
    for tag, avsd in output:
        try:
            av = avsd.mean
            sd = avsd.sdev
        except AttributeError:
            av = gvar.mean(avsd)
            sd = gvar.sdev(avsd)
        lines = ''
        line = '%15s' % str(tag)
        try:
            sdfac = 10**format[1]
            fmt = (' %' + str(format[0]) + '.' + str(format[1]) + 'f(%' +
                   str(format[1]) + 'd)')

            def avgfmt(av, sd, fmt=fmt, sdfac=sdfac):
                try:
                    return fmt % (av, int(sdfac * sd + 0.5))
                except:
                    return (' %g (%.4g)' % (av, sd))

            ##
        except:

            def avgfmt(av, sd):
                return (' %g (%.4g)' % (av, sd))

            ##
        na = len(av)
        if len(sd) < na:
            na = len(sd)
        if na >= 1:
            for i in xrange(na):
                if len(sd.shape) == 2:
                    sdi = math.sqrt(sd[i][i])
                else:
                    sdi = sd[i]
                nextfield = avgfmt(av[i], sdi)
                if (len(nextfield) + len(line)) > 78:
                    lines = lines + line + '\n'
                    line = ''.ljust(15) + nextfield
                else:
                    line = line + nextfield
            table.append(lines + line + '\n')
    return '\n'.join(table)
Example #26
0
def make_plot(x, y, fit, ylabel='y(x)', xmax=1.0, name='appendix1'):
    global NPLT
    if not MAKE_PLOTS:
        return
    plt.errorbar(x, gv.mean(y), gv.sdev(y), fmt='bo')
    x = np.arange(0., xmax, 0.01)
    yfit = f(x, fit.p)
    plt.plot(x, gv.mean(yfit), 'k--')
    yplus = gv.mean(yfit) + gv.sdev(yfit)
    yminus = gv.mean(yfit) - gv.sdev(yfit)
    plt.fill_between(x, yminus, yplus, color='0.8')
    plt.xlim(0, 1)
    plt.ylim(0.3, 1.9)
    plt.xlabel('x')
    NPLT += 1
    plt.ylabel(ylabel)
    plt.savefig(name + '.png', bbox_inches='tight')
    plt.show()
Example #27
0
def _valid(arr):
    """Restricts to elements which are neither infinite nor nans."""
    mean = gv.mean(arr)
    sdev = gv.sdev(arr)
    mask = np.isfinite(mean)\
        & np.isfinite(sdev)\
        & ~np.isnan(mean)\
        & ~np.isnan(sdev)
    return arr[mask]
Example #28
0
def main():
    gv.ranseed([2009,2010,2011,2012]) # initialize random numbers (opt.)
    x,y = make_data()               # make fit data
    p0 = None                       # make larger fits go faster (opt.)
    for nexp in range(3,8):
        print('************************************* nexp =',nexp)
        prior = make_prior(nexp)
        fit = lsqfit.nonlinear_fit(data=(x,y),fcn=f,prior=prior,p0=p0,svdcut=SVDCUT)
        print(fit)                  # print the fit results
        E = fit.p['E']              # best-fit parameters
        a = fit.p['a']
        print('E1/E0 =',(E[1]/E[0]).fmt(),'  E2/E0 =',(E[2]/E[0]).fmt())
        print('a1/a0 =',(a[1]/a[0]).fmt(),'  a2/a0 =',(a[2]/a[0]).fmt())
        print()
        if fit.chi2/fit.dof<1.:
            p0 = fit.pmean          # starting point for next fit (opt.)
    
    if DO_ERRORBUDGET:
        outputs = OrderedDict([
            ('E1/E0', E[1]/E[0]), ('E2/E0', E[2]/E[0]),         
            ('a1/a0', a[1]/a[0]), ('a2/a0', a[2]/a[0])
            ])
        inputs = OrderedDict([
            ('E', fit.prior['E']), ('a', fit.prior['a']),
            ('y', y), ('svd', fit.svdcorrection)
            ])
        print(fit.fmt_values(outputs))
        print(fit.fmt_errorbudget(outputs,inputs))
        
    if DO_EMPBAYES:
        def fitargs(z,nexp=nexp,prior=prior,f=f,data=(x,y),p0=p0):
            z = gv.exp(z)
            prior['a'] = [gv.gvar(0.5,0.5*z[0]) for i in range(nexp)]
            return dict(prior=prior,data=data,fcn=f,p0=p0)
        ##
        z0 = [0.0]
        fit,z = lsqfit.empbayes_fit(z0,fitargs,tol=1e-3)
        print(fit)                  # print the optimized fit results
        E = fit.p['E']              # best-fit parameters
        a = fit.p['a']
        print('E1/E0 =',(E[1]/E[0]).fmt(),'  E2/E0 =',(E[2]/E[0]).fmt())
        print('a1/a0 =',(a[1]/a[0]).fmt(),'  a2/a0 =',(a[2]/a[0]).fmt())
        print("prior['a'] =",fit.prior['a'][0].fmt())
        print()
    
    if DO_PLOT:
        import pylab as pp   
        from gvar import mean,sdev     
        fity = f(x,fit.pmean)
        ratio = y/fity
        pp.xlim(0,21)
        pp.xlabel('x')
        pp.ylabel('y/f(x,p)')
        pp.errorbar(x=x,y=mean(ratio),yerr=sdev(ratio),fmt='ob')
        pp.plot([0.0,21.0],[1.0,1.0])
        pp.show()
Example #29
0
def _infer_tmax(ydata, noise_threshy):
    """Infer the maximum time with noise-to-signal below a threshold."""
    if noise_threshy is None:
        return len(ydata) - 1
    good = gv.sdev(ydata) / gv.mean(ydata) < noise_threshy
    if np.all(good):
        tmax = len(ydata) - 1
    else:
        tmax = np.argmin(good)
    return tmax
Example #30
0
def inflate(params, frac):
    """
    Inflates the width on the priors to frac*mean, unless the existing width is
    already wider.
    """
    for key, value in params.items():
        mean = gv.mean(value)
        sdev = np.maximum(frac * np.abs(mean), gv.sdev(value))
        params[key] = gv.gvar(mean, sdev)
    return params
Example #31
0
def show_plot(t_array, th_array):
    """ Display theta vs t plot. """
    th_mean = gv.mean(th_array) 
    th_sdev = gv.sdev(th_array)
    thp = th_mean + th_sdev
    thm = th_mean - th_sdev
    plt.fill_between(t_array, thp, thm, color='0.8')
    plt.plot(t_array, th_mean, linewidth=0.5)
    plt.xlabel('$t$')
    plt.ylabel(r'$\theta(t)$')
    plt.savefig('pendulum.pdf', bbox_inches='tight')
    plt.show()
Example #32
0
def show_plot(t_array, th_array):
    """ Display theta vs t plot. """
    th_mean = gv.mean(th_array)
    th_sdev = gv.sdev(th_array)
    thp = th_mean + th_sdev
    thm = th_mean - th_sdev
    plt.fill_between(t_array, thp, thm, color='r', alpha=0.2) # color='0.8')
    plt.plot(t_array, th_mean, linewidth=0.5, color='r')
    plt.xlabel('$t$')
    plt.ylabel(r'$\theta(t)$')
    plt.savefig('case-pendulum.png', bbox_inches='tight')
    plt.show()
Example #33
0
def tabulate_avg(avgout,format=(6,3)):
    """ Tabulates averages and standard deviations.
        
    tabulate_avg(...) creates a nicely formatted table displaying the
    output from functions like ``dataset.Dataset.gdev``. Here ``avgout`` is
    the output. Parameter ``format`` specifies the output format:
    ``format=(N,D)`` implies that format ``'%N.Df(%Dd)'`` is used to print
    ``avg,int(10**D * std_dev)``. The table is returned as a single string,
    for printing.
    """
    table = []
    output = avgout.items()
    output.sort()
    for tag,avsd in output:
        try:
            av = avsd.mean
            sd = avsd.sdev
        except AttributeError:
            av = gvar.mean(avsd)
            sd = gvar.sdev(avsd)
        lines = ''
        line = '%15s' % str(tag)
        try:
            sdfac = 10**format[1]
            fmt = (' %'+str(format[0])+'.'+str(format[1])+
                  'f(%'+str(format[1])+'d)')
            def avgfmt(av,sd,fmt=fmt,sdfac=sdfac):
                try:
                    return fmt % (av,int(sdfac*sd+0.5))
                except:
                    return (' %g (%.4g)' % (av,sd))
            ##
        except:
            def avgfmt(av,sd):
                return (' %g (%.4g)' % (av,sd))
            ##
        na = len(av)
        if len(sd)<na:
            na = len(sd)
        if na>=1:
            for i in xrange(na):
                if len(sd.shape)==2:
                    sdi = math.sqrt(sd[i][i])
                else:
                    sdi = sd[i]
                nextfield = avgfmt(av[i],sdi)
                if (len(nextfield)+len(line))>78:
                    lines = lines + line + '\n'
                    line = ''.ljust(15) + nextfield
                else:
                    line = line + nextfield
            table.append(lines + line +'\n')
    return '\n'.join(table)
Example #34
0
def avg_quark_prop(size_list=[8, 8, 8, 8],
                   cfgs_list=range(200, 1200, 50),
                   src_type='evenodd_wall',
                   t0=0,
                   color_list=[0],
                   ens_tag='',
                   prop_tag='eoprop_',
                   mass=0.5,
                   figname='',
                   **lat_kwargs):
    """
    This function loads the propagators calculated in :meth:`gauge_tools.examples.staggered_quark_prop`
    and averages them. Most key word arguments are similar to those of :meth:`gauge_tools.examples.staggered_quark_prop`,
    except for ``figname``, which if not an empyt string, is a signal to create a figure and save it as a pdf in `figname`.
    """
    fname_load = lambda ind_cfg: "{}{}m{}_{}.npy".format(
        ens_tag, prop_tag, mass, ind_cfg)
    fname_save = "{}{}m{}_avg.p".format(ens_tag, prop_tag, mass)
    import gauge_tools as gt
    lat = gt.lattice(*size_list, **lat_kwargs)
    props_list = []
    props_projected = []
    for n_cfg in cfgs_list:
        prop_v_field = np.load(fname_load(n_cfg), allow_pickle=True)
        props_list.append(prop_v_field)
        props_projected.append(
            gt.util.quark.propagator.ks_project_spatialmom(
                prop_v_field, color=color_list[0]))
    props_proj_gvar = gv.dataset.avg_data(props_projected)
    pickle.dump(
        dict(mean=gv.mean(props_proj_gvar), cov=gv.evalcov(props_proj_gvar)),
        open(fname_save, 'wb'))
    avg_quark_prop.props_list = props_list
    avg_quark_prop.props_proj_gvar = props_proj_gvar
    avg_quark_prop.lat = lat
    if figname != '' and PLOTS:
        plt.ion()
        fig = plt.figure()
        plt.errorbar(range(len(props_proj_gvar)),
                     np.abs(gv.mean(props_proj_gvar)),
                     gv.sdev(props_proj_gvar),
                     fmt='.',
                     label='interacting')
        free_theory(gt,
                    src_type=src_type,
                    t0=t0,
                    mass=mass,
                    color_list=color_list,
                    print_=False)
        plt.title('qaurk propagator in free and interacting theory')
        plt.legend()
        plt.yscale('log')
        fig.savefig(figname, format="pdf")
Example #35
0
def plot_static_potential(r, V, figname='', T_snap=2):
    fig = plt.figure(figsize=(4, 4))
    Y_mean = [V_r[T_snap] for V_r in gv.mean(V)]
    Y_sdev = [V_r[T_snap] for V_r in gv.sdev(V)]
    plt.errorbar(r, Y_mean, Y_sdev, fmt='.', capsize=4)
    plt.xlabel(r"$R/a$")
    plt.ylabel(r"$V(R)|_{%d,%d}$" % (T_snap, 1 + T_snap))
    plt.xlim([min(r) - 0.2, max(r) + 0.2])
    plt.grid()
    plt.tight_layout()
    if figname != '':
        fig.savefig(figname, format="pdf")
Example #36
0
 def plot_n2s(self, ax=None, **kwargs):
     """Plot the noise-to-signal ratio."""
     if ax is None:
         _, ax = plt.subplots(1)
     y = self.ydata
     y = gv.sdev(y) / gv.mean(y) * 100
     x = self.times.tdata
     ax = plt.mirror(ax=ax, x=x, y=y, **kwargs)
     ax.set_yscale("log")
     ax.set_ylabel("n/s [%]")
     ax.set_xlabel("t/a")
     return ax
Example #37
0
def make_plot(param, data, fit):
    import matplotlib.pyplot as plt 
    plt.cla()
    f = gv.cspline.CSpline(
        fit.p['mknot'], fit.p['fknot'], 
        )
    coliter = iter(['r', 'b', 'g'])
    m = np.arange(1, 9, 0.1)
    fm = f(m)
    fmavg = gv.mean(fm)
    fmplus = fmavg + gv.sdev(fm)
    fmminus = fmavg - gv.sdev(fm)    
    plt.fill_between(m, fmplus, fmminus, color='k', alpha=0.20) 
    plt.plot(m, fmavg, 'k:')
    # true function
    fm = 1. - .3 / m - .3 / m**2
    plt.plot(m, fm, 'k--')
    for s in data:
        plt.plot()
        ainv, am = param[s]
        ms = ainv * am 
        d = gv.mean(data[s])
        derr = gv.sdev(data[s])
        col = next(coliter)
        plt.errorbar(x=ms, y=d, yerr=derr, fmt=col + 'o')
        plt.text(ms[-1] - 0.6, d[-1], s, color=col, fontsize='x-large')
        fs = gv.mean(fm)
        ams = m / ainv
        idx = ams < am[-1]
        ams = ams[idx]
        fs = gv.mean(fm[idx])
        for i, ci in enumerate(fit.p['c']):
            fs += ci.mean * ams ** (2 * (i + 1))
        plt.plot(m[idx], fs, col + ':')
    plt.xlabel('m')
    plt.ylabel('f')
    plt.text(8, 0.65, 'f(m)', fontsize='x-large')
    plt.savefig('eg-spline.png', bbox_inches='tight')
    plt.show()
Example #38
0
def dump_results(output_dir: str,
                 fit: Fit,
                 model: CompartmentModel,
                 extend_days: int = 30):
    """Exports fit and model to pickle file, saves forecast as csv and saves plot
    """
    now_dir = datetime.now().strftime("%Y_%m_%d_%H_%M_%S")
    dir_name = path.join(output_dir, now_dir)
    if not path.exists(dir_name):
        makedirs(dir_name, exist_ok=True)

    # Write fit to file. Can be read with gvar.load(file)
    dump(
        {
            "model": model,
            "fit": fit
        },
        outputfile=path.join(dir_name, "fit.pickle"),
    )

    # Extend day range for next steps
    xx = fit.x.copy()
    if extend_days:
        xx["dates"] = xx["dates"].union(
            date_range(xx["dates"].max(), freq="D", periods=extend_days))

    # Generate new prediction
    prediction_df = model.propagate_uncertainties(xx, fit.p)
    prediction_df.index = prediction_df.index.round("H")
    if model.fit_start_date:
        prediction_df = prediction_df.loc[model.fit_start_date:]

    # Dump forecast
    (prediction_df.stack().apply(
        lambda el: Series(dict(mean=mean(el), sdev=sdev(el)))).reset_index(
            level=1).rename(columns={
                "level_1": "kind"
            }).to_csv(path.join(dir_name, "forecast.csv")))

    # Dump plot
    fig = plot_fit(
        prediction_df,
        columns=(
            ("hospital_census", "vent_census"),
            ("hospital_admits", "vent_admits"),
        ),
        data={key: fit.y.T[ii]
              for ii, key in enumerate(model.fit_columns)},
    )
    fig.savefig(path.join(dir_name, "forecast.pdf"), bbox_inches="tight")
Example #39
0
 def test_inv(self):
     m = self.make_random([[1., 0.1], [0.1, 2.]])
     one = gv.gvar([['1(0)', '0(0)'], ['0(0)', '1(0)']])
     invm = linalg.inv(m)
     self.assertTrue(gv.equivalent(linalg.inv(invm), m))
     for mm in [invm.dot(m), m.dot(invm)]:
         np.testing.assert_allclose(
             gv.mean(mm), [[1, 0], [0, 1]], rtol=1e-10, atol=1e-10
             )
         np.testing.assert_allclose(
             gv.sdev(mm), [[0, 0], [0, 0]], rtol=1e-10, atol=1e-10
             )
     p = linalg.det(m) * linalg.det(invm)
     self.assertAlmostEqual(p.mean, 1.)
     self.assertGreater(1e-10, p.sdev)
Example #40
0
 def test_histogram(self):
     x = gv.gvar([5., 3.], [[4., 0.2], [0.2, 1.]])
     xsum = x[0] + x[1]
     integ = PDFIntegrator(x)
     hist = gv.PDFHistogram(xsum, nbin=40, binwidth=0.2)
     integ(neval=1000, nitn=5)
     def fhist(x):
         return hist.count(x[0] + x[1])
     r = integ(fhist, neval=1000, nitn=5, adapt=False)
     bins, prob, stat, norm = hist.analyze(r)
     self.assertTrue(abs(gv.mean(np.sum(prob)) - 1.) < 5. * gv.sdev(np.sum(prob)))
     self.assertTrue(abs(stat.mean.mean - xsum.mean) < 5. * stat.mean.sdev)
     self.assertTrue(abs(stat.sdev.mean - xsum.sdev) < 5. * stat.sdev.sdev)
     self.assertTrue(abs(stat.skew.mean) < 5. * stat.skew.sdev)
     self.assertTrue(abs(stat.ex_kurt.mean) < 5. * stat.ex_kurt.sdev)
Example #41
0
 def test_apply(self):
     " EigenBasis EigenBasis.apply EigenBasis.unapply "
     for tdata in [[1.0, 2.0, 3.0, 4.0], [2.0, 4.0, 6.0, 8.0], [0, 1.0, 2.0]]:
         tdata = np.array(tdata)
         G = self.make_G(tdata, keyfmt="{s1}{s2}", srcs="ab")
         basis = EigenBasis(data=G, keyfmt="{s1}{s2}", srcs="ab", t=2, tdata=tdata)
         np.testing.assert_allclose(basis.E, self.E)
         newG = basis.apply(G, "{s1}{s2}")
         newG_mean = gv.mean(newG)
         np.testing.assert_allclose(newG_mean["00"], gv.exp(-self.E[0] * tdata))
         np.testing.assert_allclose(newG_mean["11"], gv.exp(-self.E[1] * tdata))
         np.testing.assert_allclose(newG_mean["01"], 0, atol=1e-10)
         np.testing.assert_allclose(newG_mean["10"], 0, atol=1e-10)
         oldG = basis.unapply(newG, "{s1}{s2}")
         for k in ["aa", "ab", "ba", "bb"]:
             np.testing.assert_allclose(gv.mean(oldG[k] - G[k]), 0, atol=1e-10)
             np.testing.assert_allclose(gv.sdev(oldG[k] - G[k]), 0, atol=1e-10)
Example #42
0
def main():
    # pendulum data exhibits experimental error in ability to measure theta
    t = [ 0.1, 0.2, 0.3, 0.4,  0.5, 0.6,  0.7,  0.8,  0.9, 1.]
    theta = gv.gvar([
        '1.477(79)', '0.791(79)', '-0.046(79)', '-0.852(79)',
        '-1.523(79)', '-1.647(79)', '-1.216(79)', '-0.810(79)',
        '0.185(79)', '0.832(79)'
        ])

    # prior: assume experimental error in ability to specify theta(0)
    prior = gv.BufferDict()
    prior['g/l'] = (2 * math.pi) ** 2 * gv.gvar(1, 0.1)
    prior['theta(0)'] = gv.gvar(math.pi / 2., 0.05)

    # fit function: use class Pendulum object to integrate pendulum motion
    def fitfcn(p, t=t):
        pendulum = Pendulum(p['g/l'])
        return pendulum(p['theta(0)'], t)

    # do the fit and print results
    fit = lsqfit.nonlinear_fit(data=theta, prior=prior, fcn=fitfcn)
    print(fit.format(maxline=True))
    print('fit/exact for (g/l) =', fit.p['g/l'] / (2*math.pi) ** 2)
    print('fit/exact for theta(0) =', fit.p['theta(0)'] / (math.pi / 2.))

    if MAKE_PLOT:
        # make figure (saved to file pendulum.pdf)
        plt.figure(figsize=(4,3))
        # start plot with data
        plt.errorbar(
            x=t, y=gv.mean(theta), yerr=gv.sdev(theta),
            fmt='k.',
            )
        # use best-fit function to add smooth curve for 100 points
        t = np.linspace(0., 1.1, 100)
        th = fitfcn(fit.p, t)
        show_plot(t, th)
 def test_apply(self):
     " EigenBasis EigenBasis.apply EigenBasis.unapply "
     for tdata in [
         [1., 2., 3., 4.],
         [2., 4., 6., 8.],
         [0, 1., 2.],
         ]:
         tdata = np.array(tdata)
         G = self.make_G(tdata, keyfmt='{s1}{s2}', srcs='ab')
         basis = EigenBasis(
             data=G, keyfmt='{s1}{s2}', srcs='ab',
             t=2, tdata=tdata,
             )
         np.testing.assert_allclose(basis.E, self.E)
         newG = basis.apply(G, '{s1}{s2}')
         newG_mean = gv.mean(newG)
         np.testing.assert_allclose(newG_mean['00'], gv.exp(-self.E[0] * tdata))
         np.testing.assert_allclose(newG_mean['11'], gv.exp(-self.E[1] * tdata))
         np.testing.assert_allclose(newG_mean['01'], 0, atol=1e-10)
         np.testing.assert_allclose(newG_mean['10'], 0, atol=1e-10)
         oldG = basis.unapply(newG, '{s1}{s2}')
         for k in ['aa', 'ab', 'ba', 'bb']:
             np.testing.assert_allclose(gv.mean(oldG[k] - G[k]), 0, atol=1e-10)
             np.testing.assert_allclose(gv.sdev(oldG[k] - G[k]), 0, atol=1e-10)
def plot_corr_normalized(models,data,fit,**kwargs):
 """
 Get all data ready so that it can be plotted on command
 Allows for dynamic cycling through plots
 """
 _fnNMod = len(models)
 _fnIdx = [0] ## -- index of plotted function, in array so it can be modified in functions
 ## -- objects to hold all plot data
 ##  - Dat/Fit refers to the correlator data or the fit function
 ##  - Central/Error are the central value and errors
 _fnDatCentral = []
 _fnDatError   = []
 _fnFitOnes    = []
 _fnFitError   = []
 #
 ## -- other objects
 _fnTDataNonZero = []
 _fnTFitNonZero  = []
 _fnTData        = []
 _fnTFit         = []
 _fnTRem         = [] # number of previous timeslices removed
 fig,ax = plt.subplots(1)
 #
 ## -- setup plot function
 def do_plot_normalized(idx,fig=fig):
   fig.clear()
   ax = fig.add_subplot(111)
   key = models[idx[0]].datatag

   ax.set_xlim([-1,len(_fnTData[idx[0]])])
   ax.set_ylim(utp.get_option("y_limit",[0.2,1.8],**kwargs[key]))
   #
   ## -- plot fit
   ax.plot(_fnTDataNonZero[idx[0]],_fnFitOnes[idx[0]],
    color=utp.get_option("color2",'b',**kwargs[key]))
   ax.plot(_fnTDataNonZero[idx[0]],_fnFitError[idx[0]][0],
    color=utp.get_option("color2",'g',**kwargs[key]),
    ls=utp.get_option("linestyle2",'--',**kwargs[key]))
   ax.plot(_fnTDataNonZero[idx[0]],_fnFitError[idx[0]][1],
    color=utp.get_option("color2",'g',**kwargs[key]),
    ls=utp.get_option("linestyle2",'--',**kwargs[key]))
   ## -- plot correlator data
   ax.errorbar(_fnTDataNonZero[idx[0]],_fnDatCentral[idx[0]],yerr=_fnDatError[idx[0]],
    mfc=utp.get_option("markerfacecolor1",'None',**kwargs[key]),
    mec=utp.get_option("markeredgecolor1",'k',**kwargs[key]),
    color=utp.get_option("markeredgecolor1",'k',**kwargs[key]),
    ls=utp.get_option("linestyle1",'None',**kwargs[key]),
    marker=utp.get_option("marker1",'o',**kwargs[key]),
    ms=utp.get_option("markersize",6,**kwargs[key]))
   ax.scatter(_fnTFitNonZero[idx[0]],
    [ _fnDatCentral[idx[0]][t] for t in
    list(np.array(_fnTFitNonZero[idx[0]])-np.array(_fnTRem[idx[0]])) ],
    color=utp.get_option("color1",'r',**kwargs[key]),
    marker=utp.get_option("marker",'o',**kwargs[key]),
    s=utp.get_option("markersize",36,**kwargs[key]))
   fig.suptitle(utp.get_option("plottitlefn",str(idx[0])+" default title "+str(key),**kwargs[key]),
    fontsize=utp.get_option("titlesize",20,**kwargs[key]))
   ## -- modify some options 
   ax.set_xlabel(r'$t$')
   ax.set_ylabel(utp.get_option("yaxistitle",r"$C(t)/C_{fit}(t)$",**kwargs[key]))
   for item in ([ax.xaxis.label,ax.yaxis.label]):
    # must be after setting label content (LaTeX ruins it)
    item.set_fontsize(fontsize=utp.get_option("fontsize",20,**kwargs[key]))
   rect =fig.patch
   rect.set_facecolor('white')
   if utp.get_option("to_file",False,**kwargs[key]):
    save_dir  = utp.get_option("fn_save_dir","./plotdump",**kwargs[key])
    save_name = utp.get_option("fn_save_name","fnplot-"+key+".pdf",**kwargs[key])
    plt.savefig(save_dir+'/'+save_name)
   if utp.get_option("to_terminal",True,**kwargs[key]):
    plt.draw()
   pass
 #
 ## -- setup button press action function
 def press_normalized(event,idx=_fnIdx):
   #print('press_normalized', event.key)
   try:
     ## -- manually indicate index
     idx[0] = int(event.key) + (idx[0])*10
   except ValueError:
     if event.key==' ': ## -- space
       ## -- allows for replotting when changing index by typing number keys
       idx[0] = idx[0] % _fnNMod
       do_plot_normalized(idx)
     elif event.key=='left':
       idx[0] = (idx[0] - 1) % _fnNMod
       do_plot_normalized(idx)
     elif event.key=='right':
       idx[0] = (idx[0] + 1) % _fnNMod
       do_plot_normalized(idx)
     elif event.key=='backspace':
       ## -- reset index so can manually flip through using number keys
       idx[0] = 0
     elif event.key=='d':
       ## -- dump plots into ./plotdump directory
       for ix,model in zip(range(len(models)),models):
         key = model.datatag
         save_dir  = utp.get_option("fn_save_dir","./plotdump",**kwargs[key])
         save_name = utp.get_option("fn_save_name","fnplot-"+key+".png",**kwargs[key])
         do_plot_normalized([ix])
         plt.savefig(save_dir+'/'+save_name)
       do_plot_normalized(idx)
 #
 ## -- 
 fig.canvas.mpl_connect('key_press_event',press_normalized)
 ## -- save plot data
 for idx,model in zip(range(len(models)),models):
   key = model.datatag
   _fnTData.append(model.tdata)
   _fnTFit.append(model.tfit)
   _fnTFit[-1] = np.append(_fnTFit[-1],list(sorted([len(_fnTData[-1]) - t for t in _fnTFit[-1]])))
   ## -- fit
   _fnFitFunc = utp.create_fit_func(model,fit)
   _fnFitMean = gv.mean(_fnFitFunc(_fnTData[-1]))
   _fnTDataNonZero.append([t for t in _fnTData[-1] if np.abs(_fnFitMean[t]) > 1e-20])
   _fnTFitNonZero.append([t for t in _fnTFit[-1] if np.abs(_fnFitMean[t]) > 1e-20])
   _fnTRem.append([(0 if np.abs(_fnFitMean[t]) > 1e-20 else 1) for t in model.tdata])
   _fnTRem[-1] = \
     [sum(_fnTRem[-1][:i+1]) for i in range(len(_fnTRem[-1])) if i in _fnTFitNonZero[-1]]
   _fnFitMean = gv.mean(_fnFitFunc(_fnTDataNonZero[-1]))
   _fnFitSdev = list(np.array(gv.sdev(_fnFitFunc(_fnTDataNonZero[-1])))/np.array(_fnFitMean))
   _fnFitOnes.append(list(np.ones(len(_fnTDataNonZero[-1]))))
   _fnFitError.append([ list(np.array(_fnFitOnes[-1])-np.array(_fnFitSdev)),
     list(np.array(_fnFitOnes[-1])+np.array(_fnFitSdev)) ])
   ## -- data
   _fnDatCentral.append( list(np.array([gv.mean(data[key])[t] for t in _fnTDataNonZero[-1]])/
     np.array(_fnFitMean)) )
   _fnDatSdev = ( np.array([gv.sdev(data[key])[t] for t in _fnTDataNonZero[-1]])/
     np.array(_fnFitMean) )
   _fnDatError.append([ list(_fnDatSdev), list(_fnDatSdev) ])
 ## -- done saving data
 
 if not(utp.get_option("to_terminal",True,**kwargs[key])) and\
    utp.get_option("to_file",False,**kwargs[key]):
  for ix in range(len(models)):
    ## -- loops and saves all without creating window
    do_plot_normalized([ix])
 else:
  do_plot_normalized(_fnIdx)
Example #45
0
def main():
    gv.ranseed([2009,2010,2011,2012]) # initialize random numbers (opt.)
    x,y = make_data()               # make fit data
    p0 = None                       # make larger fits go faster (opt.)
    sys_stdout = sys.stdout
    sys.stdout = tee.tee(sys.stdout, open("eg1.out","w"))
    for nexp in range(1, 11):
        prior = make_prior(nexp)
        fit = lsqfit.nonlinear_fit(data=(x,y),fcn=f,prior=prior,p0=p0) #, svdcut=SVDCUT)
        if fit.chi2/fit.dof<1.:
            p0 = fit.pmean          # starting point for next fit (opt.)
        if nexp > 5 and nexp < 10:
            print(".".center(73))
            continue
        elif nexp not in [1]:
            print("")
        print '************************************* nexp =',nexp
        print fit.format()                   # print the fit results
        E = fit.p['E']              # best-fit parameters
        a = fit.p['a']
        if nexp > 2:
            print 'E1/E0 =',E[1]/E[0],'  E2/E0 =',E[2]/E[0]
            print 'a1/a0 =',a[1]/a[0],'  a2/a0 =',a[2]/a[0]

    # redo fit with 4 parameters since that is enough
    prior = make_prior(4)
    fit = lsqfit.nonlinear_fit(data=(x,y), fcn=f, prior=prior, p0=fit.pmean)
    sys.stdout = sys_stdout
    print fit
    # extra data 1
    print '\n--------------------- fit with extra information'
    sys.stdout = tee.tee(sys_stdout, open("eg1a.out", "w"))
    def ratio(p):
        return p['a'][1] / p['a'][0]
    newfit = lsqfit.nonlinear_fit(data=gv.gvar(1,1e-5), fcn=ratio, prior=fit.p)
    print (newfit)
    E = newfit.p['E']
    a = newfit.p['a']
    print 'E1/E0 =',E[1]/E[0],'  E2/E0 =',E[2]/E[0]
    print 'a1/a0 =',a[1]/a[0],'  a2/a0 =',a[2]/a[0]

    # alternate method for extra data
    sys.stdout = tee.tee(sys_stdout, open("eg1b.out", "w"))
    fit.p['a1/a0'] = fit.p['a'][1] / fit.p['a'][0]
    new_data = {'a1/a0' : gv.gvar(1,1e-5)}
    new_p = lsqfit.wavg([fit.p, new_data])
    print 'chi2/dof = %.2f\n' % (new_p.chi2 / new_p.dof)
    print 'E:', new_p['E'][:4]
    print 'a:', new_p['a'][:4]
    print 'a1/a0:', new_p['a1/a0']

    if DO_BAYES:
        # Bayesian Fit
        gv.ranseed([123])
        prior = make_prior(4)
        fit = lsqfit.nonlinear_fit(data=(x,y), fcn=f, prior=prior, p0=fit.pmean)
        sys.stdout = tee.tee(sys_stdout, open("eg1c.out", "w"))
        # print fit

        expval = lsqfit.BayesIntegrator(fit, limit=10.)
        # adapt integrator to PDF
        expval(neval=10000, nitn=10)

        # calculate expectation value of function g(p)
        fit_hist = gv.PDFHistogram(fit.p['E'][0])
        def g(p):
            parameters = [p['a'][0], p['E'][0]]
            return dict(
                mean=parameters,
                outer=np.outer(parameters, parameters),
                hist=fit_hist.count(p['E'][0]),
                )
        r = expval(g, neval=10000, nitn=10, adapt=False)

        # print results
        print r.summary()
        means = r['mean']
        cov = r['outer'] - np.outer(r['mean'], r['mean'])
        print 'Results from Bayesian Integration:'
        print 'a0: mean =', means[0], '  sdev =', cov[0,0]**0.5
        print 'E0: mean =', means[1], '  sdev =', cov[1,1]**0.5
        print 'covariance from Bayesian integral =', np.array2string(cov, prefix=36 * ' ')
        print

        print 'Results from Least-Squares Fit:'
        print 'a0: mean =', fit.p['a'][0].mean, '  sdev =', fit.p['a'][0].sdev
        print 'E0: mean =', fit.p['E'][0].mean, '  sdev =', fit.p['E'][0].sdev
        print 'covariance from least-squares fit =', np.array2string(gv.evalcov([fit.p['a'][0], fit.p['E'][0]]), prefix=36*' ',precision=3)
        sys.stdout = sys_stdout

        # make histogram of E[0] probabilty
        plt = fit_hist.make_plot(r['hist'])
        plt.xlabel('$E_0$')
        plt.ylabel('probability')
        plt.savefig('eg1c.png', bbox_inches='tight')
        # plt.show()


    # # extra data 2
    # sys.stdout = tee.tee(sys_stdout, open("eg1b.out", "w"))
    # newfit = fit
    # for i in range(1):
    #     print '\n--------------------- fit with %d extra data sets' % (i+1)
    #     x, ynew = make_data()
    #     prior = newfit.p
    #     newfit = lsqfit.nonlinear_fit(data=(x,ynew), fcn=f, prior=prior) # , svdcut=SVDCUT)
    #     print newfit
    sys.stdout = sys_stdout
    # def fcn(x, p):
    #     return f(x, p), f(x, p)
    # prior = make_prior(nexp)
    # fit = lsqfit.nonlinear_fit(data=(x, [y, ynew]), fcn=fcn, prior=prior, p0=newfit.pmean) # , svdcut=SVDCUT)
    # print(fit)


    if DO_BOOTSTRAP:
        Nbs = 40                                     # number of bootstrap copies

        outputs = {'E1/E0':[], 'E2/E0':[], 'a1/a0':[],'a2/a0':[],'E1':[],'a1':[]}   # results
        for bsfit in fit.bootstrap_iter(n=Nbs):
            E = bsfit.pmean['E']                     # best-fit parameters
            a = bsfit.pmean['a']
            outputs['E1/E0'].append(E[1]/E[0])       # accumulate results
            outputs['E2/E0'].append(E[2]/E[0])
            outputs['a1/a0'].append(a[1]/a[0])
            outputs['a2/a0'].append(a[2]/a[0])
            outputs['E1'].append(E[1])
            outputs['a1'].append(a[1])
            # print E[:2]
            # print a[:2]
            # print bsfit.chi2/bsfit.dof

        # extract means and standard deviations from the bootstrap output
        for k in outputs:
            outputs[k] = gv.gvar(np.mean(outputs[k]),np.std(outputs[k]))
        print 'Bootstrap results:'
        print 'E1/E0 =',outputs['E1/E0'],'  E2/E1 =',outputs['E2/E0']
        print 'a1/a0 =',outputs['a1/a0'],'  a2/a0 =',outputs['a2/a0']
        print 'E1 =',outputs['E1'],'  a1 =',outputs['a1']

    if DO_PLOT:
        import matplotlib.pyplot as plt
        ratio = y / fit.fcn(x,fit.pmean)
        plt.xlim(4, 21)
        plt.xlabel('x')
        plt.ylabel('y / f(x,p)')
        plt.errorbar(x=x,y=gv.mean(ratio),yerr=gv.sdev(ratio),fmt='ob')
        plt.plot([4.0, 21.0], [1.0, 1.0], 'b:')
        plt.savefig('eg1.png', bbox_inches='tight')
        plt.show()
Example #46
0
def error_function(c,tmax):
  ## -- signal to noise
  return -np.dot(gv.mean(c[1:tmax]),gv.mean(c[1:tmax]))/\
    np.dot(gv.sdev(c[1:tmax]),gv.sdev(c[1:tmax]))
Example #47
0
def main():
    ### 1) least-squares fit to the data
    x = np.array([
        0.2, 0.4, 0.6, 0.8, 1.,
        1.2, 1.4, 1.6, 1.8, 2.,
        2.2, 2.4, 2.6, 2.8, 3.,
        3.2, 3.4, 3.6, 3.8
        ])
    y = gv.gvar([
        '0.38(20)', '2.89(20)', '0.85(20)', '0.59(20)', '2.88(20)',
        '1.44(20)', '0.73(20)', '1.23(20)', '1.68(20)', '1.36(20)',
        '1.51(20)', '1.73(20)', '2.16(20)', '1.85(20)', '2.00(20)',
        '2.11(20)', '2.75(20)', '0.86(20)', '2.73(20)'
        ])
    prior = make_prior()
    fit = lsqfit.nonlinear_fit(data=(x, y), prior=prior, fcn=fitfcn, extend=True)
    if LSQFIT_ONLY:
        sys.stdout = tee.tee(STDOUT, open('case-outliers-lsq.out', 'w'))
    elif not MULTI_W:
        sys.stdout = tee.tee(STDOUT, open('case-outliers.out', 'w'))
    print(fit)

    # plot data
    plt.errorbar(x, gv.mean(y), gv.sdev(y), fmt='o', c='b')

    # plot fit function
    xline = np.linspace(x[0], x[-1], 100)
    yline = fitfcn(xline, fit.p)
    plt.plot(xline, gv.mean(yline), 'k:')
    yp = gv.mean(yline) + gv.sdev(yline)
    ym = gv.mean(yline) - gv.sdev(yline)
    plt.fill_between(xline, yp, ym, color='0.8')
    plt.xlabel('x')
    plt.ylabel('y')
    plt.savefig('case-outliers1.png', bbox_inches='tight')
    if LSQFIT_ONLY:
        return

    ### 2) Bayesian integral with modified PDF
    pdf = ModifiedPDF(data=(x, y), fcn=fitfcn, prior=prior)

    # integrator for expectation values with modified PDF
    expval = lsqfit.BayesIntegrator(fit, pdf=pdf)

    # adapt integrator to pdf
    expval(neval=1000, nitn=15)

    # evaluate expectation value of g(p)
    def g(p):
        w = 0.5 + 0.5 * p['2w-1']
        c = p['c']
        return dict(w=[w, w**2], mean=c, outer=np.outer(c,c))

    results = expval(g, neval=1000, nitn=15, adapt=False)
    print(results.summary())
    # expval.map.show_grid(15)

    if MULTI_W:
        sys.stdout = tee.tee(STDOUT, open('case-outliers-multi.out', 'w'))

    # parameters c[i]
    mean = results['mean']
    cov = results['outer'] - np.outer(mean, mean)
    c = mean + gv.gvar(np.zeros(mean.shape), gv.mean(cov))
    print('c =', c)
    print(
        'corr(c) =',
        np.array2string(gv.evalcorr(c), prefix=10 * ' '),
        '\n',
        )

    # parameter w
    wmean, w2mean = results['w']
    wsdev = gv.mean(w2mean - wmean ** 2) ** 0.5
    w = wmean + gv.gvar(np.zeros(np.shape(wmean)), wsdev)
    print('w =', w, '\n')

    # Bayes Factor
    print('logBF =', np.log(expval.norm))
    sys.stdout = STDOUT

    if MULTI_W:
        return

    # add new fit to plot
    yline = fitfcn(xline, dict(c=c))
    plt.plot(xline, gv.mean(yline), 'r--')
    yp = gv.mean(yline) + gv.sdev(yline)
    ym = gv.mean(yline) - gv.sdev(yline)
    plt.fill_between(xline, yp, ym, color='r', alpha=0.2)
    plt.savefig('case-outliers2.png', bbox_inches='tight')
Example #48
0
def main():
    gv.ranseed([2009,2010,2011,2012]) # initialize random numbers (opt.)
    x,y = make_data()               # make fit data
    p0 = None                       # make larger fits go faster (opt.)
    for nexp in range(3,5):
        print '************************************* nexp =',nexp
        prior = make_prior(nexp)
        fit = lsqfit.nonlinear_fit(data=(x,y),fcn=f,prior=prior,p0=p0)
        print fit                   # print the fit results
        E = fit.p['E']              # best-fit parameters
        a = fit.p['a']
        print 'E1/E0 =',E[1]/E[0],'  E2/E0 =',E[2]/E[0]
        print 'a1/a0 =',a[1]/a[0],'  a2/a0 =',a[2]/a[0]
        print
        if fit.chi2/fit.dof<1.:
            p0 = fit.pmean          # starting point for next fit (opt.)
    sys_stdout = sys.stdout
    if DO_ERRORBUDGET:

        lines = [
            "E = fit.p['E']",
            "a = fit.p['a']",
            "print(E[1] / E[0])",
            "print((E[1] / E[0]).partialsdev(fit.prior['E']))",
            "print((E[1] / E[0]).partialsdev(fit.prior['a']))",
            "print((E[1] / E[0]).partialsdev(y))"
            ]
        sys.stdout = tee.tee(sys_stdout, open("eg4c.out","w"))
        for line in lines:
            print ">>>", line
            if line[:5] == "print":
                print(eval(line[5:]))
        # print E[1]/E[0]
        # print (E[1]/E[0]).partialsdev(fit.prior['E'])
        # print (E[1]/E[0]).partialsdev(fit.prior['a'])
        # print (E[1]/E[0]).partialsdev(y)
        outputs = {'E1/E0':E[1]/E[0], 'E2/E0':E[2]/E[0],
                 'a1/a0':a[1]/a[0], 'a2/a0':a[2]/a[0]}
        inputs = {'E':fit.prior['E'],'a':fit.prior['a'],'y':y}

        sys.stdout = tee.tee(sys_stdout, open("eg4b.out","w"))
        print fit.fmt_values(outputs)
        print fit.fmt_errorbudget(outputs,inputs)
        sys.stdout = sys_stdout

    if DO_SIMULATIONS:
        # fit simulations
        sys.stdout = tee.tee(sys_stdout, open("eg4d.out","w"))

        for sfit in fit.simulated_fit_iter(3):
            print '************************************* simulation'
            print(sfit)
            sE = sfit.p['E']             # best-fit parameters
            sa = sfit.p['a']
            E = sfit.pexact['E']
            a = sfit.pexact['a']
            print 'E1/E0 =', sE[1] / sE[0], '  E2/E0 =', sE[2] / sE[0]
            print 'a1/a0 =', sa[1] / sa[0], '  a2/a0 =', sa[2] / sa[0]
            print '\nSimulated Fit Values - Exact Values:'
            print 'E1/E0:', (sE[1] / sE[0]) - (E[1] / E[0]),\
               '  E2/E0:', (sE[2] / sE[0]) - (E[2] / E[0])
            print 'a1/a0:', (sa[1] / sa[0]) - (a[1] / a[0]),\
               '  a2/a0:', (sa[2] / sa[0]) - (a[2] / a[0])

            # compute chi**2 comparing fit results to exact results
            sim_results = [sE[0], sE[1], sa[0], sa[1]]
            exact_results = [E[0], E[1], a[0], a[1]]
            chi2 = gv.chi2(sim_results, exact_results, svdcut=1e-8)
            print '\nParameter chi2/dof [dof] = %.2f' % (chi2/chi2.dof), '[%d]' % chi2.dof, '  Q = %.1f' % chi2.Q
            print
        sys.stdout = sys_stdout

    if DO_EMPBAYES:
        def fitargs(z,nexp=nexp,prior=prior,f=f,data=(x,y),p0=p0):
            z = gv.exp(z)
            prior['a'] = [gv.gvar(0.5,0.5*z[0]) for i in range(nexp)]
            return dict(prior=prior,data=data,fcn=f,p0=p0)
        ##
        z0 = [0.0]
        fit,z = lsqfit.empbayes_fit(z0,fitargs,tol=1e-3)
        sys.stdout = tee.tee(sys_stdout, open("eg4a.out","w"))
        print fit                   # print the optimized fit results
        E = fit.p['E']              # best-fit parameters
        a = fit.p['a']
        print 'E1/E0 =',E[1]/E[0],'  E2/E0 =',E[2]/E[0]
        print 'a1/a0 =',a[1]/a[0],'  a2/a0 =',a[2]/a[0]
        # print "prior['a'] =",fit.prior['a'][0]
        sys.stdout = sys_stdout
        print

    if DO_PLOT:
        import pylab as pp
        from gvar import mean,sdev
        fity = f(x,fit.pmean)
        ratio = y/fity
        pp.xlim(0,21)
        pp.xlabel('x')
        pp.ylabel('y/f(x,p)')
        pp.errorbar(x=x,y=mean(ratio),yerr=sdev(ratio),fmt='ob')
        pp.plot([0.0,21.0],[1.0,1.0])
        pp.show()
def plot_corr_adv_dl_folded(models,data,fit,req=None,**kwargs):
 """
 Get all data ready so that it can be plotted on command
 Allows for dynamic cycling through plots
 """
 _dfNMod = len(models)
 _dfIdx = [0] ## -- index of plotted function, in array so it can be modified in functions
 ## -- objects to hold all plot data
 ##  - Dat/Fit refers to the correlator data or the fit function
 ##  - Hi/Lo corresponds to the positive/negative half of the fit plot
 ##  - Central/Error are the central value and errors
 _dfDatHiCentral = []
 _dfDatHiError   = []
 _dfDatLoCentral = []
 _dfDatLoError   = []
 _dfFitHiCentral = []
 _dfFitHiError   = []
 _dfFitLoCentral = []
 _dfFitLoError   = []
 #
 ## -- other objects
 _dfTData = []
 _dfTFit  = []
 fig,axp = plt.subplots(1,figsize=(8,8))
 #
 ## -- setup plot function
 def do_plot_adv_dl_folded(idx,fig=fig):
   fig.clear()
   axp = fig.add_subplot(111)
   key = models[idx[0]].datatag

   axp.set_yscale('log')
   axp.set_xlim([-1,len(_dfTData[idx[0]])])
   axp.set_ylim(utp.get_option("y_pos_limit",[1e-8,1e0],**kwargs[key]))
   plt.sca(axp)
   expp = [int(np.floor(np.log10(np.abs(x)))) for x in plt.yticks()[0][2:]]
   expp = ['$10^{'+str(x)+'}$' for x in expp]
   plt.yticks(plt.yticks()[0][2:],expp)
   axp.tick_params(axis='both', which='major', labelsize=20)
   #
   ## -- plot fit
   axp.plot(_dfTData[idx[0]],_dfFitHiCentral[idx[0]],
    color=utp.get_option("color2",'b',**kwargs[key]))
   axp.plot(_dfTData[idx[0]],_dfFitHiError[idx[0]][0],
    color=utp.get_option("color2",'g',**kwargs[key]),
    ls=utp.get_option("linestyle2",'--',**kwargs[key]))
   axp.plot(_dfTData[idx[0]],_dfFitHiError[idx[0]][1],
    color=utp.get_option("color2",'g',**kwargs[key]),
    ls=utp.get_option("linestyle2",'--',**kwargs[key]))
   ## -- plot correlator data
   (_,caps,_) = axp.errorbar(_dfTData[idx[0]],_dfDatHiCentral[idx[0]],yerr=_dfDatHiError[idx[0]],
    mfc=utp.get_option("markerfacecolor1",'None',**kwargs[key]),
    mec=utp.get_option("markeredgecolor1",'k',**kwargs[key]),
    color=utp.get_option("markeredgecolor1",'k',**kwargs[key]),
    ls=utp.get_option("linestyle1",'None',**kwargs[key]),
    marker=utp.get_option("marker1",'o',**kwargs[key]),
    ms=utp.get_option("markersize",9,**kwargs[key]),
    capsize=6, elinewidth=2)
   for cap in caps:
     cap.set_markeredgewidth(1)
   axp.scatter(_dfTFit[idx[0]],[_dfDatHiCentral[idx[0]][t] for t in _dfTFit[idx[0]]],
    color=utp.get_option("color1",'r',**kwargs[key]),
    marker=utp.get_option("marker",'o',**kwargs[key]),
    s=utp.get_option("markersize",81,**kwargs[key]))
   fig.suptitle(utp.get_option("plottitledf",str(idx[0])+" default title "+str(key),**kwargs[key]),
    fontsize=utp.get_option("titlesize",20,**kwargs[key]))
   ## -- modify some options 
   axp.set_xlabel(r'$t$',fontsize=10)
   axp.set_ylabel(utp.get_option("yaxistitle",r"$C(t)$",**kwargs[key]),
    #fontsize=10,rotation=0,position=(0.01,0.04),labelpad=10)
    fontsize=10,rotation=0)
   axp.yaxis.set_label_coords(-0.07,0.04)
   for item in ([axp.xaxis.label,axp.yaxis.label]):
    # must be after setting label content (LaTeX ruins it)
    item.set_fontsize(fontsize=utp.get_option("fontsize",30,**kwargs[key]))
   rect =fig.patch
   rect.set_facecolor('white')
   if utp.get_option("to_file",False,**kwargs[key]):
    save_dir  = utp.get_option("df_save_dir","./plotdump",**kwargs[key])
    save_name = utp.get_option("df_save_name","dfplot-"+key+".pdf",**kwargs[key])
    plt.savefig(save_dir+'/'+save_name)
    #mng = plt.get_current_fig_manager()
    #mng.resize(*mng.window.maxsize())
    #fig.set_size_inches(5,12)
    #save_dir  = utp.get_option("df_save_dir","./plotdump",**kwargs[key])
    #save_name = utp.get_option("df_save_name","dfplot-"+key+".pdf",**kwargs[key])
    #fig.savefig(save_dir+'/'+save_name)
   if utp.get_option("to_terminal",True,**kwargs[key]):
    plt.draw()
   pass
 #
 ## -- setup button press action function
 def press_adv_dl_folded(event,idx=_dfIdx):
   #print('press_adv_dl_folded', event.key)
   try:
     ## -- manually indicate index
     idx[0] = int(event.key) + (idx[0])*10
   except ValueError:
     if event.key==' ': ## -- space
       ## -- allows for replotting when changing index by typing number keys
       idx[0] = idx[0] % _dfNMod
       do_plot_adv_dl_folded(idx)
     elif event.key=='left':
       idx[0] = (idx[0] - 1) % _dfNMod
       do_plot_adv_dl_folded(idx)
     elif event.key=='right':
       idx[0] = (idx[0] + 1) % _dfNMod
       do_plot_adv_dl_folded(idx)
     elif event.key=='backspace':
       ## -- reset index so can manually flip through using number keys
       idx[0] = 0
     elif event.key=='d':
       ## -- dump plots into ./plotdump directory
       for ix,model in zip(range(len(models)),models):
         key = model.datatag
         save_dir  = utp.get_option("df_save_dir","./plotdump",**kwargs[key])
         save_name = utp.get_option("df_save_name","dfplot-"+key+".pdf",**kwargs[key])
         do_plot_adv_dl_folded([ix])
         plt.savefig(save_dir+'/'+save_name)
       do_plot_adv_dl_folded(idx)
 #
 ## -- 
 fig.canvas.mpl_connect('key_press_event',press_adv_dl_folded)
 ## -- save plot data
 for idx,model in zip(range(len(models)),models):
   key = model.datatag
   _dfTData.append([t for t in model.tdata if t < len(model.tdata)/2+1])
   _dfTFit.append([t for t in model.tfit if t < len(model.tdata)/2+1])
   ## -- fit
   if req is None:
    _dfFitFunc = utp.mask_fit_fcn_adv(model,fit,invert=True)
   else:
    _dfFitFunc = utp.mask_fit_fcn_adv(model,fit,req=req,invert=False)
   _dfFitMean = gv.mean(_dfFitFunc(np.array(_dfTData[-1])))
   _dfFitSdev = gv.sdev(_dfFitFunc(np.array(_dfTData[-1])))
   #print 'fit tdat   : ',_dfTData[-1]
   #print 'fit mean   : ',_dfFitMean
   #print 'fit sdev   : ',_dfFitSdev
   _dfFitHiCentral.append(
     utf.pos_arr(_dfFitMean,utp.get_option("y_pos_limit",[1e-4,1e0],**kwargs[key])[0]/100) )
   _dfFitHiError.append([
     utf.pos_arr(np.array(_dfFitMean)-np.array(_dfFitSdev),
     utp.get_option("y_pos_limit",[1e-4,1e0],**kwargs[key])[0]/1000),
     utf.pos_arr(np.array(_dfFitMean)+np.array(_dfFitSdev),
     utp.get_option("y_pos_limit",[1e-4,1e0],**kwargs[key])[0]/1000) ])
   ## -- data
   if req is None:
    _dfSubFunc = utp.mask_fit_fcn_adv(model,fit,invert=False)
   else:
    _dfSubFunc = utp.mask_fit_fcn_adv(model,fit,req=req,invert=True)
   asym = (model.tp < 0)
   _dfSub = _dfSubFunc(np.array(_dfTData[-1]))
   _dfDatMean = gv.mean(utf.fold_data(data[key],asym)-_dfSub)
   _dfDatSdev = gv.sdev(utf.fold_data(data[key],asym)-_dfSub)
   #print 'data       : ',utf.fold_data(data[key],asym)
   #print 'subtraction: ',_dfSub
   #print 'new        : ',gv.gvar(_dfDatMean,_dfDatSdev)
   _dfDatHiCentral.append( utf.pos_arr(_dfDatMean) )
   _dfDatHiError.append(utf.pos_err(_dfDatMean,_dfDatSdev))
 ## -- done saving data
 
 if not(utp.get_option("to_terminal",True,**kwargs[key])) and\
    utp.get_option("to_file",False,**kwargs[key]):
  for ix in range(len(models)):
    ## -- loops and saves all without creating window
    do_plot_adv_dl_folded([ix])
 else:
  do_plot_adv_dl_folded(_dfIdx)
def plot_corr_double_log(models,data,fit,**kwargs):
 """
 Get all data ready so that it can be plotted on command
 Allows for dynamic cycling through plots
 """
 _dlNMod = len(models)
 _dlIdx = [0] ## -- index of plotted function, in array so it can be modified in functions
 ## -- objects to hold all plot data
 ##  - Dat/Fit refers to the correlator data or the fit function
 ##  - Hi/Lo corresponds to the positive/negative half of the fit plot
 ##  - Central/Error are the central value and errors
 _dlDatHiCentral = []
 _dlDatHiError   = []
 _dlDatLoCentral = []
 _dlDatLoError   = []
 _dlFitHiCentral = []
 _dlFitHiError   = []
 _dlFitLoCentral = []
 _dlFitLoError   = []
 #
 ## -- other objects
 _dlTData = []
 _dlTFit  = []
 fig,(axp,axm) = plt.subplots(2,sharex=True,figsize=(8,16))
 #
 ## -- setup plot function
 def do_plot_double_log(idx,fig=fig):
   fig.clear()
   axp = fig.add_subplot(211)
   axm = fig.add_subplot(212,sharex=axp)
   fig.subplots_adjust(hspace=0)
   key = models[idx[0]].datatag

   axp.set_yscale('log')
   axm.set_yscale('log')
   axp.set_xlim([-1,len(_dlTData[idx[0]])])
   axm.set_xlim([-1,len(_dlTData[idx[0]])])
   axp.set_ylim(utp.get_option("y_pos_limit",[1e-8,1e0],**kwargs[key]))
   axm.set_ylim(utp.get_option("y_neg_limit",[1e-8,1e0],**kwargs[key]))
   axm.set_ylim(axm.get_ylim()[::-1])
   plt.sca(axp)
   expp = [int(np.floor(np.log10(np.abs(x)))) for x in plt.yticks()[0][2:]]
   expp = ['$10^{'+str(x)+'}$' for x in expp]
   plt.yticks(plt.yticks()[0][2:],expp)
   plt.sca(axm)
   expm = [int(np.floor(np.log10(np.abs(x)))) for x in plt.yticks()[0][2:]]
   expm = ['$-10^{'+str(x)+'}$' for x in expm]
   plt.yticks(plt.yticks()[0][2:],expm)
   #
   ## -- plot fit
   axp.plot(_dlTData[idx[0]],_dlFitHiCentral[idx[0]],
    color=utp.get_option("color2",'b',**kwargs[key]))
   axm.plot(_dlTData[idx[0]],_dlFitLoCentral[idx[0]],
    color=utp.get_option("color2",'b',**kwargs[key]))
   axp.plot(_dlTData[idx[0]],_dlFitHiError[idx[0]][0],
    color=utp.get_option("color2",'g',**kwargs[key]),
    ls=utp.get_option("linestyle2",'--',**kwargs[key]))
   axm.plot(_dlTData[idx[0]],_dlFitLoError[idx[0]][0],
    color=utp.get_option("color2",'g',**kwargs[key]),
    ls=utp.get_option("linestyle2",'--',**kwargs[key]))
   axp.plot(_dlTData[idx[0]],_dlFitHiError[idx[0]][1],
    color=utp.get_option("color2",'g',**kwargs[key]),
    ls=utp.get_option("linestyle2",'--',**kwargs[key]))
   axm.plot(_dlTData[idx[0]],_dlFitLoError[idx[0]][1],
    color=utp.get_option("color2",'g',**kwargs[key]),
    ls=utp.get_option("linestyle2",'--',**kwargs[key]))
   ## -- plot correlator data
   axp.errorbar(_dlTData[idx[0]],_dlDatHiCentral[idx[0]],yerr=_dlDatHiError[idx[0]],
    mfc=utp.get_option("markerfacecolor1",'None',**kwargs[key]),
    mec=utp.get_option("markeredgecolor1",'k',**kwargs[key]),
    color=utp.get_option("markeredgecolor1",'k',**kwargs[key]),
    ls=utp.get_option("linestyle1",'None',**kwargs[key]),
    marker=utp.get_option("marker1",'o',**kwargs[key]),
    ms=utp.get_option("markersize",6,**kwargs[key]))
   axm.errorbar(_dlTData[idx[0]],_dlDatLoCentral[idx[0]],yerr=_dlDatLoError[idx[0]],
    mfc=utp.get_option("markerfacecolor1",'None',**kwargs[key]),
    mec=utp.get_option("markeredgecolor1",'k',**kwargs[key]),
    color=utp.get_option("markeredgecolor1",'k',**kwargs[key]),
    ls=utp.get_option("linestyle1",'None',**kwargs[key]),
    marker=utp.get_option("marker1",'o',**kwargs[key]),
    ms=utp.get_option("markersize",6,**kwargs[key]))
   axp.scatter(_dlTFit[idx[0]],[_dlDatHiCentral[idx[0]][t] for t in _dlTFit[idx[0]]],
    color=utp.get_option("color1",'r',**kwargs[key]),
    marker=utp.get_option("marker",'o',**kwargs[key]),
    s=utp.get_option("markersize",36,**kwargs[key]))
   axm.scatter(_dlTFit[idx[0]],[_dlDatLoCentral[idx[0]][t] for t in _dlTFit[idx[0]]],
    color=utp.get_option("color1",'r',**kwargs[key]),
    marker=utp.get_option("marker",'o',**kwargs[key]),
    s=utp.get_option("markersize",36,**kwargs[key]))
   fig.suptitle(utp.get_option("plottitledl",str(idx[0])+" default title "+str(key),**kwargs[key]),
    fontsize=utp.get_option("titlesize",20,**kwargs[key]))
   ## -- modify some options 
   axm.set_xlabel(r'$t$')
   axm.set_ylabel(utp.get_option("yaxistitle",r"$C(t)$",**kwargs[key]),
    fontsize=30,rotation=0,position=(0.05,0.98))
   for item in ([axm.xaxis.label,axm.yaxis.label]):
    # must be after setting label content (LaTeX ruins it)
    item.set_fontsize(fontsize=utp.get_option("fontsize",20,**kwargs[key]))
   rect =fig.patch
   rect.set_facecolor('white')
   if utp.get_option("to_file",False,**kwargs[key]):
    save_dir  = utp.get_option("dl_save_dir","./plotdump",**kwargs[key])
    save_name = utp.get_option("dl_save_name","dlplot-"+key+".pdf",**kwargs[key])
    plt.savefig(save_dir+'/'+save_name)
    #mng = plt.get_current_fig_manager()
    #mng.resize(*mng.window.maxsize())
    #fig.set_size_inches(5,12)
    #save_dir  = utp.get_option("dl_save_dir","./plotdump",**kwargs[key])
    #save_name = utp.get_option("dl_save_name","dlplot-"+key+".pdf",**kwargs[key])
    #fig.savefig(save_dir+'/'+save_name)
   if utp.get_option("to_terminal",True,**kwargs[key]):
    plt.draw()
   pass
 #
 ## -- setup button press action function
 def press_double_log(event,idx=_dlIdx):
   #print('press_double_log', event.key)
   try:
     ## -- manually indicate index
     idx[0] = int(event.key) + (idx[0])*10
   except ValueError:
     if event.key==' ': ## -- space
       ## -- allows for replotting when changing index by typing number keys
       idx[0] = idx[0] % _dlNMod
       do_plot_double_log(idx)
     elif event.key=='left':
       idx[0] = (idx[0] - 1) % _dlNMod
       do_plot_double_log(idx)
     elif event.key=='right':
       idx[0] = (idx[0] + 1) % _dlNMod
       do_plot_double_log(idx)
     elif event.key=='backspace':
       ## -- reset index so can manually flip through using number keys
       idx[0] = 0
     elif event.key=='d':
       ## -- dump plots into ./plotdump directory
       for ix,model in zip(range(len(models)),models):
         key = model.datatag
         save_dir  = utp.get_option("dl_save_dir","./plotdump",**kwargs[key])
         save_name = utp.get_option("dl_save_name","dlplot-"+key+".pdf",**kwargs[key])
         do_plot_double_log([ix])
         plt.savefig(save_dir+'/'+save_name)
       do_plot_double_log(idx)
 #
 ## -- 
 fig.canvas.mpl_connect('key_press_event',press_double_log)
 ## -- save plot data
 for idx,model in zip(range(len(models)),models):
   key = model.datatag
   _dlTData.append(model.tdata)
   _dlTFit.append(model.tfit)
   _dlTFit[-1] = np.append(_dlTFit[-1],list(sorted([len(_dlTData[-1]) - t for t in _dlTFit[-1]])))
   ## -- fit
   _dlFitFunc = utp.create_fit_func(model,fit)
   _dlFitMean = gv.mean(_dlFitFunc(_dlTData[-1]))
   _dlFitSdev = gv.sdev(_dlFitFunc(_dlTData[-1]))
   _dlFitHiCentral.append(
     utf.pos_arr(_dlFitMean,utp.get_option("y_pos_limit",[1e-8,1e0],**kwargs[key])[0]/100) )
   _dlFitLoCentral.append(
     utf.neg_arr(_dlFitMean,utp.get_option("y_neg_limit",[1e-8,1e0],**kwargs[key])[0]/100) )
   _dlFitHiError.append([
     utf.pos_arr(np.array(_dlFitMean)-np.array(_dlFitSdev),
     utp.get_option("y_pos_limit",[1e-8,1e0],**kwargs[key])[0]/1000),
     utf.pos_arr(np.array(_dlFitMean)+np.array(_dlFitSdev),
     utp.get_option("y_pos_limit",[1e-8,1e0],**kwargs[key])[0]/1000) ])
   _dlFitLoError.append([
     utf.neg_arr(np.array(_dlFitMean)-np.array(_dlFitSdev),
     utp.get_option("y_neg_limit",[1e-8,1e0],**kwargs[key])[0]/1000),
     utf.neg_arr(np.array(_dlFitMean)+np.array(_dlFitSdev),
     utp.get_option("y_neg_limit",[1e-8,1e0],**kwargs[key])[0]/1000) ])
   ## -- data
   _dlDatMean = gv.mean(data[key])
   _dlDatSdev = gv.sdev(data[key])
   _dlDatHiCentral.append( utf.pos_arr(_dlDatMean) )
   _dlDatLoCentral.append( utf.neg_arr(_dlDatMean) )
   _dlDatHiError.append(utf.pos_err(_dlDatMean,_dlDatSdev))
   _dlDatLoError.append(utf.neg_err(_dlDatMean,_dlDatSdev))
 ## -- done saving data
 
 if not(utp.get_option("to_terminal",True,**kwargs[key])) and\
    utp.get_option("to_file",False,**kwargs[key]):
  for ix in range(len(models)):
    ## -- loops and saves all without creating window
    do_plot_double_log([ix])
 else:
  do_plot_double_log(_dlIdx)
def plot_corr_effective_mass(models,data,fit=None,req=[list(),list()],**kwargs):
 """
 Get all data ready so that it can be plotted on command
 Allows for dynamic cycling through plots
 """
 _emNMod = len(models)
 _emIdx = [0] ## -- index of plotted function, in array so it can be modified in functions
 ## -- objects to hold all plot data
 ##  - Dat/Fit refers to the correlator data or the fit function
 ##  - Central/Error are the central value and errors
 _emLogRatio        = []
 _emLogRatioCentral = []
 _emLogRatioError   = []
 _emLogRatioFit     = []
 _emFoldRatio       = []
 _emFoldRatioCentral= []
 _emFoldRatioError  = []
 _emFoldRatioFit    = []
 _emRatioFit        = []
 _emRatioFitNonZero = []
 _emFitCentral      = []
 _emFitError        = []
 ## -- timeslice objects
 _emTPosRatio    = []
 _emTPosFit      = []
 _emTPosFold     = []
 _emTPosFoldFit  = []
 #
 ## -- fitting options
 _emFitMin = 0
 _emFitMax = 0
 _emSep    = 0
 fig,ax = plt.subplots(1,figsize=(10,8))
 #
 ## -- setup plot function
 def do_plot_corr_effective_mass(idx,fig=fig):
   fig.clear()
   #fig.set_size_inches(10,10) #doesn't work
   ax = fig.add_subplot(111)
   if print_quality:
    plt.subplots_adjust(bottom=0.15,left=0.18,right=0.97,top=0.95)
   else:
    #plt.subplots_adjust(bottom=0.15,left=0.15,right=0.97,top=0.95)
    pass
   #fig.set_figheight(20) #doesn't work
   #fig.set_figwidth(20)
   key = models[idx[0]].datatag
   ax.set_xlim([-1,df.cor_len/2])
   ax.set_ylim(utp.get_option("y_limit",[0.0,1.4],**kwargs[key]))
   #
   # -- plot correlator data
   if utp.get_option("meff_do_fold",False,**kwargs[key]):
    ## -- plot fit
    ax.plot([t for t in _emTPosFit[idx[0]] if t < len(_emTData)/2],_emFitCentral[idx[0]],
     color=utp.get_option("color3",'b',**kwargs[key]))
    ax.plot([t for t in _emTPosFit[idx[0]] if t < len(_emTData)/2],_emFitError[idx[0]][0],
     color=utp.get_option("color3",'b',**kwargs[key]),
     ls=utp.get_option("linestyle2",'--',**kwargs[key]))
    ax.plot([t for t in _emTPosFit[idx[0]] if t < len(_emTData)/2],_emFitError[idx[0]][1],
     color=utp.get_option("color3",'b',**kwargs[key]),
     ls=utp.get_option("linestyle2",'--',**kwargs[key]))
    ## -- _emTPosRatio not necessarily symmetric, get times correct
    ax.errorbar(_emTPosFold[idx[0]],_emFoldRatioCentral[idx[0]],yerr=_emFoldRatioError[idx[0]],
     mfc=utp.get_option("markerfacecolor1",'None',**kwargs[key]),
     mec=utp.get_option("markeredgecolor1",'k',**kwargs[key]),
     color=utp.get_option("markeredgecolor1",'k',**kwargs[key]),
     ls=utp.get_option("linestyle1",'None',**kwargs[key]),
     marker=utp.get_option("marker1",'o',**kwargs[key]),
     ms=utp.get_option("markersize",10,**kwargs[key]))
    ax.scatter(_emTPosFoldFit[idx[0]],gv.mean(_emFoldRatioFit[idx[0]]),
     color=utp.get_option("color1",'r',**kwargs[key]),
     marker=utp.get_option("marker",'o',**kwargs[key]),
     s=utp.get_option("markersize",100,**kwargs[key]))
   else:
    ## -- plot fit
    ax.plot(_emTPosFit[idx[0]],_emFitCentral[idx[0]],
     color=utp.get_option("color3",'b',**kwargs[key]))
    ax.plot(_emTPosFit[idx[0]],_emFitError[idx[0]][0],
     color=utp.get_option("color3",'b',**kwargs[key]),
     ls=utp.get_option("linestyle2",'--',**kwargs[key]))
    ax.plot(_emTPosFit[idx[0]],_emFitError[idx[0]][1],
     color=utp.get_option("color3",'b',**kwargs[key]),
     ls=utp.get_option("linestyle2",'--',**kwargs[key]))
    ## -- 
    ax.errorbar(_emTPosRatio[idx[0]],_emLogRatioCentral[idx[0]],yerr=_emLogRatioError[idx[0]],
     mfc=utp.get_option("markerfacecolor1",'None',**kwargs[key]),
     mec=utp.get_option("markeredgecolor1",'k',**kwargs[key]),
     color=utp.get_option("markeredgecolor1",'k',**kwargs[key]),
     ls=utp.get_option("linestyle1",'None',**kwargs[key]),
     marker=utp.get_option("marker1",'o',**kwargs[key]),
     ms=utp.get_option("markersize",8,**kwargs[key]))
    ax.scatter(_emTPosFit[idx[0]],gv.mean(_emLogRatioFit[idx[0]]),
     color=utp.get_option("color1",'r',**kwargs[key]),
     marker=utp.get_option("marker",'o',**kwargs[key]),
     s=utp.get_option("markersize",64,**kwargs[key]))
   fig.suptitle(utp.get_option("plottitleem",str(idx[0])+" default title "+str(key),**kwargs[key]),
    fontsize=utp.get_option("titlesize",20,**kwargs[key]))
   # -- modify some options 
   if print_quality:
    ax.set_xlabel(r'$t$',fontsize=40)
    if sn_ratio:
     ax.set_ylabel(utp.get_option("yaxistitle",
      r"$-\frac{1}{"+str(_emSep)+r"}v_{i}^{T}\,log\frac{C_{ij}(t+"+str(_emSep)+r")}{C_{ij}(t)}w_{j}$",
      **kwargs[key]),fontsize=40)
    else:
     ax.set_ylabel(utp.get_option("yaxistitle",
      r"$-\frac{1}{"+str(_emSep)+r"}\,log\frac{C_{ij}(t+"+str(_emSep)+r")}{C_{ij}(t)}$",
      **kwargs[key]),fontsize=40)
    plt.xticks(plt.xticks()[0],fontsize=24)
    plt.yticks(plt.yticks()[0],fontsize=24)
   else:
    ax.set_xlabel(r'$t$')
    ax.set_ylabel(utp.get_option("yaxistitle",
     r"$-\frac{1}{"+str(_emSep)+r"}\,log\frac{C(t+"+str(_emSep)+r")}{C(t)}$",**kwargs[key]))
    for item in ([ax.xaxis.label,ax.yaxis.label]):
     # must be after setting label content (LaTeX ruins it)
     item.set_fontsize(fontsize=utp.get_option("fontsize",20,**kwargs[key]))
   rect =fig.patch
   rect.set_facecolor('white')
   if utp.get_option("to_file",False,**kwargs[key]):
    save_dir  = utp.get_option("em_save_dir","./plotdump",**kwargs[key])
    save_name = utp.get_option("em_save_name","emplot-"+key+".pdf",**kwargs[key])
    plt.savefig(save_dir+'/'+save_name)
   if utp.get_option("to_terminal",True,**kwargs[key]):
    plt.draw()
   pass
 #
 ## -- setup button press action function
 def press_effective_mass(event,idx=_emIdx):
   #print('press_effective_mass', event.key)
   try:
     ## -- manually indicate index
     idx[0] = int(event.key) + (idx[0])*10
   except ValueError:
     if event.key==' ': ## -- space
       ## -- allows for replotting when changing index by typing number keys
       idx[0] = idx[0] % _emNMod
       do_plot_corr_effective_mass(idx)
     elif event.key=='left':
       idx[0] = (idx[0] - 1) % _emNMod
       do_plot_corr_effective_mass(idx)
     elif event.key=='right':
       idx[0] = (idx[0] + 1) % _emNMod
       do_plot_corr_effective_mass(idx)
     elif event.key=='backspace':
       ## -- reset index so can manually flip through using number keys
       idx[0] = 0
     elif event.key=='d':
       ## -- dump plots into ./plotdump directory
       for ix,model in zip(range(len(models)),models):
         key = model.datatag
         save_dir  = utp.get_option("em_save_dir","./plotdump",**kwargs[key])
         save_name = utp.get_option("em_save_name","emplot-"+key+".png",**kwargs[key])
         do_plot_corr_effective_mass([ix])
         plt.savefig(save_dir+'/'+save_name)
       do_plot_corr_effective_mass(idx)
 #
 ## -- 
 fig.canvas.mpl_connect('key_press_event',press_effective_mass)
 ## -- save plot data
 for idx,model in zip(range(len(models)),models):
   key = model.datatag
   ## -- parameters used in fitting
   ##    default = 2
   _emSep    = utp.get_option("meff_sep",2,**kwargs[key])
   ##    default = smallest t not included in fit
   _emFitMin = utp.get_option("meff_fit_min",model.tfit[-1]+1,**kwargs[key])
   ##    default = midpoint - sep
   _emFitMax = utp.get_option("meff_fit_max",
     model.tdata[-1]/2-_emSep-int(model.tdata[-1]/12),**kwargs[key])
   _emTData = model.tdata
   _emTFit = range(_emFitMin,_emFitMax)
   _emTFit = np.append(_emTFit,list(sorted([len(_emTData) - t for t in _emTFit])))
   if not(fit is None) and (len(req[0]) > 0 or len(req[1]) > 0):
     _emSubFcn = utp.mask_fit_fcn(model,fit,req,invert=False)
     _emSub = np.array(_emSubFcn(np.array(_emTData)))
   else:
     _emSub = 0
   _emDataFold = utf.fold_data(data[key]-_emSub,(model.tp < 0))
   #_emTDataNonZero = [t for t in _emTData if np.abs(gv.mean(data[key])[t]) > 1e-20]
   #_emTDataNonZero = [t for t in range(len(_emDataFold)) if np.mean(_emDataFold[t]) > 1e-20]
   #
   ## -- data
   _emRatio = np.array(_emDataFold[_emSep:])/np.array(_emDataFold[:-_emSep])
   _emTDataRatio = np.array(range(len(_emRatio)))
   _emTDataNonZero = np.array([t for t in range(len(_emRatio)) if _emRatio[t] > 1e-20])
   #_emRatio = np.array([-gv.log(x)/_emSep for x in _emRatio if x > 1e-20])
   #_emTDataRatio =\
   # [t for t in _emTData
   # if (t in _emTDataNonZero) and (t <= _emTData[-1]/2 +1 - _emSep) ]   # first half
   #_emTDataRatio = np.append(_emTDataRatio,
   # [t for t in _emTData
   # if (t in _emTDataNonZero) and (t >= _emTData[-1]/2 +1 + _emSep) ] ) # second half
   #_emRatio =\
   # [data[key][t+_emSep]/data[key][t] for t in _emTData
   # if (t in _emTDataNonZero) and (t <= _emTData[-1]/2 +1 - _emSep) ]   # first half
   #_emRatio = np.append(_emRatio,
   # [data[key][t-_emSep]/data[key][t] for t in _emTData
   # if (t in _emTDataNonZero) and (t >= _emTData[-1]/2 +1 + _emSep) ] ) # second half
   ## -- times
   _emTPosRatio.append(
    [_emTDataRatio[t] for t in range(len(_emTDataRatio)) if _emRatio[t] > 0] )
   _emTPosFit.append(
    [_emTDataRatio[t] for t in range(len(_emTDataRatio))
    if _emRatio[t] > 0 and _emTDataRatio[t] in _emTFit] )
   ## -- ratios 
   _emLogRatio.append(
    [-gv.log(_emRatio[t])/_emSep for t in range(len(_emTDataRatio)) if _emRatio[t] > 0] )
   _emLogRatioFit.append(
    [-gv.log(_emRatio[t])/_emSep for t in range(len(_emTDataRatio))
    if (gv.mean(_emRatio[t]) > 0) and (_emTDataRatio[t] in _emTPosFit[-1])] )
   _emFoldRatio.append(_emLogRatio[-1])
   _emFoldRatioFit.append(_emLogRatioFit[-1])
   _emTPosFold.append(_emTPosRatio[-1])
   _emTPosFoldFit.append(_emTPosFit[-1])
   ## -- folding
   #_emFoldRatio.append(list())
   #_emFoldRatioFit.append(list())
   #_emTPosFold.append(list())
   #_emTPosFoldFit.append(list())
   #for t in range(1,len(_emTData)/2):
   # if not(t in _emTPosRatio[-1]) or not(len(_emTData)-t in _emTPosRatio[-1]):
   #  continue
   # _emFoldRatio[-1].append((_emLogRatio[-1][list(_emTPosRatio[-1]).index(t)]
   #   +_emLogRatio[-1][list(_emTPosRatio[-1]).index(len(_emTData)-t)])/2)
   # _emTPosFold[-1].append(t)
   # if not(t in _emTPosFit[-1]) or not(len(_emTData)-t in _emTPosFit[-1]):
   #  continue
   # _emFoldRatioFit[-1].append((_emLogRatio[-1][list(_emTPosRatio[-1]).index(t)]
   #   +_emLogRatio[-1][list(_emTPosRatio[-1]).index(len(_emTData)-t)])/2)
   # _emTPosFoldFit[-1].append(t)
   #for t in range(len(_emTPosFold[-1])):
   # print t,_emTPosFold[-1][t],_emFoldRatio[-1][t]
   #for t in range(len(_emTPosFoldFit[-1])):
   # print t,_emTPosFoldFit[-1][t],_emFoldRatioFit[-1][t]
   _emLogRatioCentral.append(gv.mean(_emLogRatio[-1]))
   _emLogRatioError.append([ list(gv.sdev(_emLogRatio[-1])), list(gv.sdev(_emLogRatio[-1])) ])
   _emFoldRatioCentral.append(gv.mean(_emFoldRatio[-1]))
   _emFoldRatioError.append([ list(gv.sdev(_emFoldRatio[-1])), list(gv.sdev(_emFoldRatio[-1])) ])
   ## -- fit
   _emRatioFit.append(lsq.wavg(_emLogRatioFit[-1]))
   if utp.get_option("meff_do_fold",False,**kwargs[key]):
    _emFitCentral.append([gv.mean(_emRatioFit[-1]) for t in _emTPosFit[-1] if t < len(_emTData)/2])
    _emFitError.append(
     [list(np.array(_emFitCentral[-1])-np.array([gv.sdev(_emRatioFit[-1])
       for t in _emTPosFit[-1] if t < len(_emTData)/2])),
      list(np.array(_emFitCentral[-1])+np.array([gv.sdev(_emRatioFit[-1])
        for t in _emTPosFit[-1] if t < len(_emTData)/2]))])
   else:
    _emFitCentral.append([gv.mean(_emRatioFit[-1]) for t in _emTPosFit[-1]])
    _emFitError.append(
     [list(np.array(_emFitCentral[-1])-np.array([gv.sdev(_emRatioFit[-1])
       for t in _emTPosFit[-1]])),
      list(np.array(_emFitCentral[-1])+np.array([gv.sdev(_emRatioFit[-1])
        for t in _emTPosFit[-1]]))])
 print "Best plateau fits: "
 for key,rfit in zip([model.datatag for model in models],_emRatioFit):
  print "  ",key," : ",rfit
 print "   ----------------- "
 _emRatioFitNonZero = [x for x in _emRatioFit if not(x is None)]
 print "   avg  : ",lsq.wavg(_emRatioFitNonZero)

 ## -- done saving data
 if not(utp.get_option("to_terminal",True,**kwargs[key])) and\
    utp.get_option("to_file",False,**kwargs[key]):
  for ix in range(len(models)):
    ## -- loops and saves all without creating window
    do_plot_corr_effective_mass([ix])
 else:
  do_plot_corr_effective_mass(_emIdx)
def plot_corr_effective_mass_check(models,data,fit=None,**kwargs):
 """
 Get all data ready so that it can be plotted on command
 Allows for dynamic cycling through plots
 """
 _emNMod = len(models)
 _emIdx = [0] ## -- index of plotted function, in array so it can be modified in functions
 ## -- objects to hold all plot data
 ##  - Dat/Fit refers to the correlator data or the fit function
 ##  - Central/Error are the central value and errors
 _emLogRatio        = []
 _emLogRatioCentral = []
 _emLogRatioError   = []
 _emLogRatioFit     = []
 _emRatioFit        = []
 _emRatioFitNonZero = []
 _emFitCentral      = []
 _emFitError        = []
 ## -- timeslice objects
 _emTPosRatio    = []
 _emTPosFit      = []
 #
 ## -- fitting options
 _emFitMin = 0
 _emFitMax = 0
 _emSep    = 0
 fig,ax = plt.subplots(1)
 #
 ## -- setup plot function
 def do_plot_corr_effective_mass_check(idx,fig=fig):
   fig.clear()
   ax = fig.add_subplot(111)
   key = models[idx[0]].datatag
   ax.set_ylim(utp.get_option("y_limit",[0.0,1.2],**kwargs[key]))
   #
   ## -- plot fit
   ax.plot(_emTPosFit[idx[0]],_emFitCentral[idx[0]],
    color=utp.get_option("color3",'b',**kwargs[key]))
   ax.plot(_emTPosFit[idx[0]],_emFitError[idx[0]][0],
    color=utp.get_option("color3",'b',**kwargs[key]),
    ls=utp.get_option("linestyle2",'--',**kwargs[key]))
   ax.plot(_emTPosFit[idx[0]],_emFitError[idx[0]][1],
    color=utp.get_option("color3",'b',**kwargs[key]),
    ls=utp.get_option("linestyle2",'--',**kwargs[key]))
   ## -- plot reference
   for val in df.ec_reference_lines:
     vt = [val for t in _emTPosFit[idx[0]]]
     ax.plot(_emTPosFit[idx[0]],vt,
      color=utp.get_option("color2",'g',**kwargs[key]))
   # -- plot correlator data
   ax.errorbar(_emTPosRatio[idx[0]],_emLogRatioCentral[idx[0]],yerr=_emLogRatioError[idx[0]],
    mfc=utp.get_option("markerfacecolor1",'None',**kwargs[key]),
    mec=utp.get_option("markeredgecolor1",'k',**kwargs[key]),
    color=utp.get_option("markeredgecolor1",'k',**kwargs[key]),
    ls=utp.get_option("linestyle1",'None',**kwargs[key]),
    marker=utp.get_option("marker1",'o',**kwargs[key]),
    ms=utp.get_option("markersize",6,**kwargs[key]))
   ax.scatter(_emTPosFit[idx[0]],gv.mean(_emLogRatioFit[idx[0]]),
    color=utp.get_option("color1",'r',**kwargs[key]),
    marker=utp.get_option("marker",'o',**kwargs[key]),
    s=utp.get_option("markersize",36,**kwargs[key]))
   fig.suptitle(utp.get_option("plottitle",str(idx[0])+" default title "+str(key),**kwargs[key]),
    fontsize=utp.get_option("titlesize",20,**kwargs[key]))
   # -- modify some options 
   ax.set_xlabel(r'$t$ slice')
   ax.set_ylabel(utp.get_option("yaxistitle",
    r"$-\frac{1}{"+str(_emSep)+r"}\,log\frac{C(t+"+str(_emSep)+r")}{C(t)}$",**kwargs[key]))
   for item in ([ax.xaxis.label,ax.yaxis.label]):
    # must be after setting label content (LaTeX ruins it)
    item.set_fontsize(fontsize=utp.get_option("fontsize",20,**kwargs[key]))
   rect =fig.patch
   rect.set_facecolor('white')
   if utp.get_option("to_file",False,**kwargs[key]):
    save_dir  = utp.get_option("ec_save_dir","./plotdump",**kwargs[key])
    save_name = utp.get_option("ec_save_name","ecplot-"+key+".pdf",**kwargs[key])
    plt.savefig(save_dir+'/'+save_name)
   if utp.get_option("to_terminal",True,**kwargs[key]):
    plt.draw()
   pass
 #
 ## -- setup button press action function
 def press_effective_mass(event,idx=_emIdx):
   #print('press_effective_mass', event.key)
   try:
     ## -- manually indicate index
     idx[0] = int(event.key) + (idx[0])*10
   except ValueError:
     if event.key==' ': ## -- space
       ## -- allows for replotting when changing index by typing number keys
       idx[0] = idx[0] % _emNMod
       do_plot_corr_effective_mass_check(idx)
     elif event.key=='left':
       idx[0] = (idx[0] - 1) % _emNMod
       do_plot_corr_effective_mass_check(idx)
     elif event.key=='right':
       idx[0] = (idx[0] + 1) % _emNMod
       do_plot_corr_effective_mass_check(idx)
     elif event.key=='backspace':
       ## -- reset index so can manually flip through using number keys
       idx[0] = 0
     elif event.key=='d':
       ## -- dump plots into ./plotdump directory
       for ix,model in zip(range(len(models)),models):
         key = model.datatag
         save_dir  = utp.get_option("ec_save_dir","./plotdump",**kwargs[key])
         save_name = utp.get_option("ec_save_name","ecplot-"+key+".png",**kwargs[key])
         do_plot_corr_effective_mass_check([ix])
         plt.savefig(save_dir+'/'+save_name)
       do_plot_corr_effective_mass_check(idx)
 #
 ## -- 
 fig.canvas.mpl_connect('key_press_event',press_effective_mass)
 ## -- save plot data
 for idx,model in zip(range(len(models)),models):
   key = model.datatag
   ## -- parameters used in fitting
   ##    default = 2
   _emSep    = utp.get_option("meff_sep",2,**kwargs[key])
   ##    default = smallest t not included in fit
   _emFitMin = utp.get_option("meff_fit_min",model.tfit[-1]+1,**kwargs[key])
   ##    default = midpoint - sep
   _emFitMax = utp.get_option("meff_fit_max",
     model.tdata[-1]/2-_emSep-int(model.tdata[-1]/12),**kwargs[key])
   _emTData = model.tdata
   _emTFit = range(_emFitMin,_emFitMax)
   _emTFit = np.append(_emTFit,list(sorted([len(_emTData) - t for t in _emTFit])))
   _emTDataNonZero = [t for t in _emTData if np.abs(gv.mean(data[key])[t]) > 1e-20]
   #
   ## -- data
   _emTDataRatio =\
    [t for t in _emTData
    if (t in _emTDataNonZero) and (t <= _emTData[-1]/2 +1 - _emSep) ]   # first half
   _emTDataRatio = np.append(_emTDataRatio,
    [t for t in _emTData
    if (t in _emTDataNonZero) and (t >= _emTData[-1]/2 +1 + _emSep) ] ) # second half
   _emRatio =\
    [data[key][t+_emSep]/data[key][t] for t in _emTData
    if (t in _emTDataNonZero) and (t <= _emTData[-1]/2 +1 - _emSep) ]   # first half
   _emRatio = np.append(_emRatio,
    [data[key][t-_emSep]/data[key][t] for t in _emTData
    if (t in _emTDataNonZero) and (t >= _emTData[-1]/2 +1 + _emSep) ] ) # second half
   _emTPosRatio.append(
    [_emTDataRatio[t] for t in range(len(_emTDataRatio)) if _emRatio[t] > 0] )
   _emTPosFit.append(
    [_emTDataRatio[t] for t in range(len(_emTDataRatio))
    if _emRatio[t] > 0 and _emTDataRatio[t] in _emTFit] )
   _emLogRatio.append(
    [-gv.log(_emRatio[t])/_emSep for t in range(len(_emTDataRatio)) if _emRatio[t] > 0] )
   _emLogRatioFit.append(
    [-gv.log(_emRatio[t])/_emSep for t in range(len(_emTDataRatio))
    if (gv.mean(_emRatio[t]) > 0) and (_emTDataRatio[t] in _emTPosFit[-1])] )
   _emLogRatioCentral.append(gv.mean(_emLogRatio[-1]))
   _emLogRatioError.append([ list(gv.sdev(_emLogRatio[-1])), list(gv.sdev(_emLogRatio[-1])) ])
   ## -- fit
   _emRatioFit.append(lsq.wavg(_emLogRatioFit[-1]))
   _emFitCentral.append([gv.mean(_emRatioFit[-1]) for t in _emTPosFit[-1]])
   _emFitError.append(
    [list(np.array(_emFitCentral[-1])-np.array([gv.sdev(_emRatioFit[-1]) for t in _emTPosFit[-1]])),
     list(np.array(_emFitCentral[-1])+np.array([gv.sdev(_emRatioFit[-1]) for t in _emTPosFit[-1]]))]
    )
 print "Best plateau fits: "
 for key,rfit in zip([model.datatag for model in models],_emRatioFit):
  print "  ",key," : ",rfit
 print "   ----------------- "
 _emRatioFitNonZero = [x for x in _emRatioFit if not(x is None)]
 print "   avg  : ",lsq.wavg(_emRatioFitNonZero)

 ## -- done saving data
 if not(utp.get_option("to_terminal",True,**kwargs[key])) and\
    utp.get_option("to_file",False,**kwargs[key]):
  for ix in range(len(models)):
    ## -- loops and saves all without creating window
    do_plot_corr_effective_mass_check([ix])
 else:
  do_plot_corr_effective_mass_check(_emIdx)
def compute_diagonal((dset,key)):
 print "diagonal key ",key
 tdat = compute_correlation_pair(dset,key,key)
 return (key,gv.mean(tdat[key]),gv.sdev(tdat[key]),gv.evalcorr(tdat)[key,key])
def plot_corr_adv_stacked_3pt(models,data,fit,req=None,**kwargs):
 """
 Get all data ready so that it can be plotted on command
 Allows for dynamic cycling through plots
 """
 _p3NMod = len(models)
 _p3NTsp = 0  ## -- number of source/sink separations
 _p3Tsp = []  ## -- list of source/sink separations used
 _p3Key = []  ## -- list of keys used, to search for different source/sink separations
 _p3Idx = [0] ## -- index of plotted function, in array so it can be modified in functions
 _p3ColorList = ['r','#1e90ff','g','b']
 ## -- objects to hold all plot data
 ##  - Dat/Fit refers to the correlator data or the fit function
 ##  - Central/Error are the central value and errors
 _p3DatCentral = []
 _p3DatError   = []
 _p3FitCentral = []
 _p3FitError   = []
 #
 ## -- timeslice objects
 _p3TData = []
 _p3TFit  = []
 _p3TDataSub = []
 _p3TFitSub  = []
 fig,axp = plt.subplots(1,figsize=(10,8))
 if print_quality:
  plt.subplots_adjust(bottom=0.18,left=0.18,right=0.97,top=0.95)
 else:
  plt.subplots_adjust(bottom=0.15,left=0.15,right=0.97,top=0.95)
 #
 ## -- setup plot function
 def do_plot_3pt(idx,fig=fig):
   fig.clear()
   axp = fig.add_subplot(111)
   handles = list() ## -- handles for legend
   key = _p3Key[idx[0]]

   tbd = np.max([x[-1] for x in _p3TData[idx[0]]])
   axp.set_xlim([-float(tbd)/2-1,float(tbd)/2+1])
   #axp.set_ylim(utp.get_option("y_scale",[-2,2],**kwargs[key]))
   axp.set_ylim(utp.get_option("y_scale",[plotLimit[0],plotLimit[1]],**kwargs[key]))
   #
   ## -- plot fit
   if utp.get_option("p3_do_fit",True,**kwargs[key]):
    for i,tsub,fitcn in zip(range(len(_p3TFitSub[idx[0]])),
      _p3TFitSub[idx[0]],_p3FitCentral[idx[0]]):
     axp.plot(tsub,fitcn,color=_p3ColorList[i])
    for i,tsub,fiter in zip(range(len(_p3TFitSub[idx[0]])),_p3TFitSub[idx[0]],_p3FitError[idx[0]]):
     axp.plot(tsub,fiter[0],color=_p3ColorList[i],
      ls=utp.get_option("linestyle2",'--',**kwargs[key]))
     axp.plot(tsub,fiter[1],color=_p3ColorList[i],
      ls=utp.get_option("linestyle2",'--',**kwargs[key]))
   ## -- plot correlator data
   for i,tsub,datcn,dater in zip(range(len(_p3TFitSub[idx[0]])),_p3TDataSub[idx[0]],
     _p3DatCentral[idx[0]],_p3DatError[idx[0]]):
    (_,caps,_) = axp.errorbar(tsub,datcn,dater,
     mfc=utp.get_option("markerfacecolor1",'None',**kwargs[key]),
     mec=_p3ColorList[i],
     color=_p3ColorList[i],
     #mec=utp.get_option("markeredgecolor1",'k',**kwargs[key]),
     #color=utp.get_option("markeredgecolor1",'k',**kwargs[key]),
     ls=utp.get_option("linestyle1",'None',**kwargs[key]),
     marker=utp.get_option("marker1",'o',**kwargs[key]),
     ms=utp.get_option("markersize",9,**kwargs[key]),
     ecolor=_p3ColorList[i],
     capsize=6,elinewidth=2)
    for cap in caps:
     cap.set_markeredgewidth(1)
   for i,tfit,tsub,datcn in zip(range(len(_p3TFit[idx[0]])),_p3TFit[idx[0]],
     _p3TFitSub[idx[0]],_p3DatCentral[idx[0]]):
    handle = axp.scatter(tsub,[datcn[t] for t in tfit],color=_p3ColorList[i],
     marker=utp.get_option("marker",'o',**kwargs[key]),
     s=utp.get_option("markersize",81,**kwargs[key]))
    handles.append(handle)
   fig.suptitle(utp.get_option("plottitlep3",str(idx[0])+" default title "+str(key),**kwargs[key]),
    fontsize=utp.get_option("titlesize",20,**kwargs[key]))
   if print_quality:
    axp.set_xlabel(r'$t-\frac{T}{2}$',fontsize=40)
    axp.set_ylabel(utp.get_option("yaxistitle",r"$C(t,T)$",**kwargs[key]),
     #fontsize=30,rotation=0,position=(0.05,0.98))
     fontsize=40,rotation='vertical')
    axp.yaxis.set_label_coords(-0.10,0.5)
    axp.tick_params(axis='both', which='major', labelsize=30)
    plt.yticks(list(np.arange(plotLimit[0],plotLimit[1]+1e-8,plotLimit[2])),
     fontsize=30)
   else:
    axp.set_xlabel(r'$t-\frac{T}{2}$',fontsize=30)
    axp.set_ylabel(utp.get_option("yaxistitle",r"$C(t,T)$",**kwargs[key]),
     #fontsize=30,rotation=0,position=(0.05,0.98))
     fontsize=30,rotation=0)
    #axp.yaxis.set_label_coords(-0.02,0.28)
    axp.yaxis.set_label_coords(0.0,1.03)
    axp.tick_params(axis='both', which='major', labelsize=24)
   ## -- modify some options 
   #for item in ([axp.xaxis.label,axp.yaxis.label]):
   # # must be after setting label content (LaTeX ruins it)
   # item.set_fontsize(fontsize=utp.get_option("fontsize",36,**kwargs[key]))
   plt.legend(handles,["T = "+str(t) for t in _p3Tsp],fontsize=30)
   rect =fig.patch
   rect.set_facecolor('white')
   
   if utp.get_option("to_file",False,**kwargs[key]):
    #print "key",key,"saved to file",save_dir+'/'+save_name
    save_dir  = utp.get_option("p3_save_dir","./plotdump",**kwargs[key])
    save_name = utp.get_option("p3_save_name","p3plot-"+key+".pdf",**kwargs[key])
    plt.savefig(save_dir+'/'+save_name)
   if utp.get_option("to_terminal",True,**kwargs[key]):
    plt.draw()
   pass

 #
 ## -- setup button press action function
 def press_3pt(event,idx=_p3Idx):
   try:
     ## -- manually indicate index
     idx[0] = int(event.key) + (idx[0])*10
   except ValueError:
     if event.key==' ': ## -- space
       ## -- allows for replotting when changing index by typing number keys
       idx[0] = idx[0] % len(_p3Key)
       do_plot_3pt(idx)
     elif event.key=='left':
       idx[0] = (idx[0] - 1) % len(_p3Key)
       do_plot_3pt(idx)
     elif event.key=='right':
       idx[0] = (idx[0] + 1) % len(_p3Key)
       do_plot_3pt(idx)
     elif event.key=='backspace':
       ## -- reset index so can manually flip through using number keys
       idx[0] = 0
     elif event.key=='d':
       ## -- dump plots into ./plotdump directory
       for ix,key in zip(range(len(_p3Key)),_p3Key):
         save_dir  = utp.get_option("p3_save_dir","./plotdump",**kwargs[key])
         save_name = utp.get_option("p3_save_name","p3plot-"+key+".pdf",**kwargs[key])
         do_plot_3pt([ix])
         plt.savefig(save_dir+'/'+save_name)
       do_plot_3pt(idx)
 #
 ## -- 
 fig.canvas.mpl_connect('key_press_event',press_3pt)
 ## -- save plot data
 for idx,model in zip(range(len(models)),models):
   key = model.datatag
   tsep = int(key.split('t')[-1]) # tsep
   tkey = 't'.join(key.split('t')[:-1]) # rest of tag
   if not(tsep in _p3Tsp):
    _p3Tsp.append(tsep)
    _p3NTsp += 1
   if not(tkey in _p3Key): ## -- append a list to everything for this new key
    _p3Key.append(tkey)
    _p3TFit.append(list())
    _p3TData.append(list())
    _p3TFitSub.append(list())
    _p3TDataSub.append(list())
    _p3FitCentral.append(list())
    _p3FitError.append(list())
    _p3DatCentral.append(list())
    _p3DatError.append(list())
   tidx = _p3Key.index(tkey)
   _p3TFit[tidx].append(list(model.tfit))
   _p3TData[tidx].append(model.tdata[:len(_p3TFit[tidx][-1])+2])
   _p3TFitSub[tidx].append([t-float(len(_p3TFit[tidx][-1])+1)/2 for t in _p3TFit[tidx][-1]])
   _p3TDataSub[tidx].append([t-float(len(_p3TFit[tidx][-1])+1)/2 for t in _p3TData[tidx][-1]])
   ## -- fit
   #if False:
   #_p3FitFunc = utp.create_fit_func_3pt(model,fit)
   if req is None:
    _p3FitFunc = utp.mask_fit_fcn(model,fit,invert=True)
   else:
    _p3FitFunc = utp.mask_fit_fcn(model,fit,req=req,invert=False)
   _p3FitMean = gv.mean(_p3FitFunc(np.array(_p3TFit[tidx][-1])))
   _p3FitSdev = gv.sdev(_p3FitFunc(np.array(_p3TFit[tidx][-1])))
   _p3FitCentral[tidx].append(_p3FitMean)
   _p3FitError[tidx].append([
     np.array(_p3FitMean)-np.array(_p3FitSdev),
     np.array(_p3FitMean)+np.array(_p3FitSdev)])
   ## -- data
   if req is None:
    _p3SubFunc = utp.mask_fit_fcn(model,fit,invert=False)
   else:
    _p3SubFunc = utp.mask_fit_fcn(model,fit,req=req,invert=True)
   _dfSub = _p3SubFunc(np.array(_p3TData[tidx][-1]))
   #print _p3TData[tidx][-1]
   #print _p3TFit[tidx][-1]
   #print _dfSub,data[key]
   #_p3DatMean = gv.mean([data[key][t] for t in _p3TData[tidx][-1]])
   #_p3DatSdev = gv.sdev([data[key][t] for t in _p3TData[tidx][-1]])
   _p3DatMean = gv.mean(np.array([data[key][t] for t in _p3TData[tidx][-1]])-_dfSub)
   _p3DatSdev = gv.sdev(np.array([data[key][t] for t in _p3TData[tidx][-1]])-_dfSub)
   _p3DatCentral[tidx].append( _p3DatMean )
   _p3DatError[tidx].append([list(_p3DatSdev),list(_p3DatSdev)])
 ## -- done saving data
 
 if not(utp.get_option("to_terminal",True,**kwargs[key])) and\
    utp.get_option("to_file",False,**kwargs[key]):
  for ix in range(len(_p3Key)):
    ## -- loops and saves all without creating window
    do_plot_3pt([ix])
 else:
  do_plot_3pt(_p3Idx)
Example #55
0
def main():
    gd.ranseed([2009,2010,2011,2012]) # initialize random numbers (opt.)
    x,y = make_data()               # make fit data
    p0 = None                       # make larger fits go faster (opt.)
    for nexp in range(2,8):
        if nexp == 2:
            sys_stdout = sys.stdout
            sys.stdout = tee.tee(sys_stdout, open("eg4GBF.out","w"))
        print '************************************* nexp =',nexp
        prior = make_prior(nexp)
        fit = lsqfit.nonlinear_fit(data=(x,y),fcn=f,prior=prior,p0=p0)
        print fit                   # print the fit results
        # E = fit.p['E']              # best-fit parameters
        # a = fit.p['a']
        # print 'E1/E0 =',E[1]/E[0],'  E2/E0 =',E[2]/E[0]
        # print 'a1/a0 =',a[1]/a[0],'  a2/a0 =',a[2]/a[0]
        print
        if nexp == 3:
            sys.stdout = sys_stdout
        if fit.chi2/fit.dof<1.:
            p0 = fit.pmean          # starting point for next fit (opt.)
    if DO_ERRORBUDGET:
        print E[1]/E[0]
        print (E[1]/E[0]).partialsdev(fit.prior['E'])
        print (E[1]/E[0]).partialsdev(fit.prior['a'])
        print (E[1]/E[0]).partialsdev(y)
        outputs = {'E1/E0':E[1]/E[0], 'E2/E0':E[2]/E[0],         
                 'a1/a0':a[1]/a[0], 'a2/a0':a[2]/a[0]}
        inputs = {'E':fit.prior['E'],'a':fit.prior['a'],'y':y}
        
        sys.stdout = tee.tee(sys_stdout, open("eg4GBFb.out","w"))
        print fit.fmt_values(outputs)
        print fit.fmt_errorbudget(outputs,inputs)
        sys.stdout = sys_stdout
        
    if DO_EMPBAYES:
        def fitargs(z,nexp=nexp,prior=prior,f=f,data=(x,y),p0=p0):
            z = gd.exp(z)
            prior['a'] = [gd.gvar(0.5,0.5*z[0]) for i in range(nexp)]
            return dict(prior=prior,data=data,fcn=f,p0=p0)
        ##
        z0 = [0.0]
        fit,z = lsqfit.empbayes_fit(z0,fitargs,tol=1e-3)
        sys.stdout = tee.tee(sys_stdout, open("eg4GBFa.out","w"))
        print fit                   # print the optimized fit results
        E = fit.p['E']              # best-fit parameters
        a = fit.p['a']
        print 'E1/E0 =',E[1]/E[0],'  E2/E0 =',E[2]/E[0]
        print 'a1/a0 =',a[1]/a[0],'  a2/a0 =',a[2]/a[0]
        print "prior['a'] =",fit.prior['a'][0]
        sys.stdout = sys_stdout
        print
    
    if DO_PLOT:
        import pylab as pp   
        from gvar import mean,sdev     
        fity = f(x,fit.pmean)
        ratio = y/fity
        pp.xlim(0,21)
        pp.xlabel('x')
        pp.ylabel('y/f(x,p)')
        pp.errorbar(x=x,y=mean(ratio),yerr=sdev(ratio),fmt='ob')
        pp.plot([0.0,21.0],[1.0,1.0])
        pp.show()
Example #56
0
def wavg(dataseq, prior=None, fast=False, **kargs):
    """ Weighted average of |GVar|\s or arrays/dicts of |GVar|\s.
        
    The weighted average of several |GVar|\s is what one obtains from
    a  least-squares fit of the collection of |GVar|\s to the
    one-parameter fit function ::

        def f(p): 
            return N * [p[0]]

    where ``N`` is the number of |GVar|\s. The average is the best-fit 
    value for ``p[0]``.  |GVar|\s with smaller standard deviations carry 
    more weight than those with larger standard deviations. The averages
    computed by ``wavg`` take account of correlations between the |GVar|\s.

    If ``prior`` is not ``None``, it is added to the list of data 
    used in the average. Thus ``wavg([x2, x3], prior=x1)`` is the 
    same as ``wavg([x1, x2, x3])``. 
        
    Typical usage is ::
        
        x1 = gvar.gvar(...)
        x2 = gvar.gvar(...)
        x3 = gvar.gvar(...)
        xavg = wavg([x1, x2, x3])   # weighted average of x1, x2 and x3
    
    where the result ``xavg`` is a |GVar| containing the weighted average.

    The individual |GVar|\s in the last example can be  replaced by
    multidimensional distributions, represented by arrays of |GVar|\s
    or dictionaries of |GVar|\s (or arrays of |GVar|\s). For example, ::

        x1 = [gvar.gvar(...), gvar.gvar(...)]
        x2 = [gvar.gvar(...), gvar.gvar(...)]
        x3 = [gvar.gvar(...), gvar.gvar(...)]
        xavg = wavg([x1, x2, x3])   
            # xavg[i] is wgtd avg of x1[i], x2[i], x3[i]

    where each array ``x1``, ``x2`` ... must have the same shape. 
    The result ``xavg`` in this case is an array of |GVar|\s, where 
    the shape of the array is the same as that of ``x1``, etc.

    Another example is ::

        x1 = dict(a=[gvar.gvar(...), gvar.gvar(...)], b=gvar.gvar(...))
        x2 = dict(a=[gvar.gvar(...), gvar.gvar(...)], b=gvar.gvar(...))
        x3 = dict(a=[gvar.gvar(...), gvar.gvar(...)])
        xavg = wavg([x1, x2, x3])   
            # xavg['a'][i] is wgtd avg of x1['a'][i], x2['a'][i], x3['a'][i]
            # xavg['b'] is gtd avg of x1['b'], x2['b']  

    where different dictionaries can have (some) different keys. Here the 
    result ``xavg`` is a :class:`gvar.BufferDict`` having the same keys as
    ``x1``, etc.
     
    Weighted averages can become costly when the number of random samples being 
    averaged is large (100s or more). In such cases it might be useful to set
    parameter ``fast=True``. This causes ``wavg`` to estimate the weighted 
    average by incorporating the random samples one at a time into a 
    running average::

        result = prior
        for dataseq_i in dataseq:
            result = wavg([result, dataseq_i], ...)

    This method is much faster when ``len(dataseq)`` is large, and gives the
    exact result when there are no correlations between different elements
    of list ``dataseq``. The results are approximately correct when 
    ``dataseq[i]`` and ``dataseq[j]`` are correlated for ``i!=j``.

    :param dataseq: The |GVar|\s to be averaged. ``dataseq`` is a one-dimensional
        sequence of |GVar|\s, or of arrays of |GVar|\s, or of dictionaries 
        containing |GVar|\s or arrays of |GVar|\s. All ``dataseq[i]`` must
        have the same shape.
    :param prior: Prior values for the averages, to be included in the weighted
        average. Default value is ``None``, in which case ``prior`` is ignored.
    :type prior: |GVar| or array/dictionary of |GVar|\s
    :param fast: Setting ``fast=True`` causes ``wavg`` to compute an 
        approximation to the weighted average that is much faster to calculate 
        when averaging a large number of samples (100s or more). The default is 
        ``fast=False``.
    :type fast: bool 
    :param kargs: Additional arguments (e.g., ``svdcut``) to the fitter 
        used to do the averaging.
    :type kargs: dict
        
    Results returned by :func:`gvar.wavg` have the following extra 
    attributes describing the average:
        
    .. attribute:: chi2
        
        ``chi**2`` for weighted average.
        
    .. attribute:: dof
        
        Effective number of degrees of freedom.
        
    .. attribute:: Q
        
        The probability that the ``chi**2`` could have been larger, 
        by chance, assuming that the data are all Gaussain and consistent
        with each other. Values smaller than 0.1 or suggest that the 
        data are not Gaussian or are inconsistent with each other. Also 
        called the *p-value*.

        Quality factor `Q` (or *p-value*) for fit.

    .. attribute:: time

        Time required to do average.

    .. attribute:: svdcorrection

        The *svd* corrections made to the data when ``svdcut`` is not ``None``.

    .. attribute:: fit

        Fit output from average.
    """
    if len(dataseq) <= 0:
        if prior is None:
            return None 
        wavg.Q = 1
        wavg.chi2 = 0
        wavg.dof = 0
        wavg.time = 0
        wavg.fit = None
        wavg.svdcorrection = None
        if hasattr(prior, 'keys'):
            return BufferDictWAvg(dataseq[0], wavg)
        if numpy.shape(prior) == ():
            return GVarWAvg(prior, wavg)
        else:
            return ArrayWAvg(numpy.asarray(prior), wavg)        
    elif len(dataseq) == 1 and prior is None:
        wavg.Q = 1
        wavg.chi2 = 0
        wavg.dof = 0
        wavg.time = 0
        wavg.fit = None
        wavg.svdcorrection = None
        if hasattr(dataseq[0], 'keys'):
            return BufferDictWAvg(dataseq[0], wavg)
        if numpy.shape(dataseq[0]) == ():
            return GVarWAvg(dataseq[0], wavg)
        else:
            return ArrayWAvg(numpy.asarray(dataseq[0]), wavg)
    if fast:
        chi2 = 0
        dof = 0
        time = 0
        ans = prior
        svdcorrection = gvar.BufferDict()
        for i, dataseq_i in enumerate(dataseq):
            if ans is None:
                ans = dataseq_i
            else:
                ans = wavg([ans, dataseq_i], fast=False, **kargs)
                chi2 += wavg.chi2
                dof += wavg.dof
                time += wavg.time
                if wavg.svdcorrection is not None:
                    for k in wavg.svdcorrection:
                        svdcorrection[str(i) + ':' + k] = wavg.svdcorrection[k]
        wavg.chi2 = chi2
        wavg.dof = dof
        wavg.time = time
        wavg.Q = gammaQ(dof / 2., chi2 / 2.)
        wavg.svdcorrection = svdcorrection
        wavg.fit = None
        ans.dof = wavg.dof
        ans.Q = wavg.Q
        ans.chi2 = wavg.chi2
        ans.time = wavg.time
        ans.svdcorrection = wavg.svdcorrection
        ans.fit = wavg.fit
        return ans
    if hasattr(dataseq[0], 'keys'):
        data = {}
        keys = []
        if prior is not None:
            dataseq = [prior] + list(dataseq)
        for dataseq_i in dataseq:
            for k in dataseq_i:
                if k in data:
                    data[k].append(dataseq_i[k])
                else:
                    data[k] = [dataseq_i[k]]
                    keys.append(k)
        data = gvar.BufferDict(data, keys=keys)
        p0 = gvar.BufferDict()
        for k in data:
            p0[k] = gvar.mean(data[k][0]) + gvar.sdev(data[k][0]) / 10.
        def fcn(p):
            ans = gvar.BufferDict()
            for k in data:
                ans[k] = len(data[k]) * [p[k]]
            return ans
    else:
        p = numpy.asarray(dataseq[0])
        data = [] if prior is None else [prior]
        data += [dataseqi for dataseqi in dataseq]
        p0 = numpy.asarray(gvar.mean(data[0]) + gvar.sdev(data[0]) / 10.)
        data = numpy.array(data)
        def fcn(p):
            return len(data) * [p]
    fit = lsqfit.nonlinear_fit(data=data, fcn=fcn, p0=p0, **kargs)
    # wavg.Q = fit.Q
    # wavg.chi2 = fit.chi2
    # wavg.dof = fit.dof
    # wavg.time = fit.time
    # wavg.svdcorrection = fit.svdcorrection
    # wavg.fit = fit
    if p0.shape is None:
        return BufferDictWAvg(gvar.BufferDict(p0, buf=fit.p.flat), fit)
    elif p0.shape == ():
        return GVarWAvg(fit.p.flat[0], fit)
    else:
        return ArrayWAvg(fit.p.reshape(p0.shape), fit)
def plot_corr_3pt(models,data,fit,**kwargs):
 """
 Get all data ready so that it can be plotted on command
 Allows for dynamic cycling through plots
 """
 _p3NMod = len(models)
 _p3Idx = [0] ## -- index of plotted function, in array so it can be modified in functions
 ## -- objects to hold all plot data
 ##  - Dat/Fit refers to the correlator data or the fit function
 ##  - Central/Error are the central value and errors
 _p3DatCentral = []
 _p3DatError   = []
 _p3FitCentral = []
 _p3FitError   = []
 #
 ## -- other objects
 _p3TData = []
 _p3TFit  = []
 fig,axp = plt.subplots(1,figsize=(8,8))
 #
 ## -- setup plot function
 def do_plot_3pt(idx,fig=fig):
   fig.clear()
   axp = fig.add_subplot(111)
   #fig.subplots_adjust(hspace=0)
   key = models[idx[0]].datatag

   #axp.set_yscale('log')
   #axp.set_xlim([-1,len(_p3TData[idx[0]])])
   axp.set_xlim([-.5,len(_p3TFit[idx[0]])+1.5])
   axp.set_ylim(utp.get_option("y_scale",[-2,2],**kwargs[key]))
   #plt.sca(axp)
   #expp = [int(np.floor(np.log10(np.abs(x)))) for x in plt.yticks()[0][2:]]
   #expp = ['$10^{'+str(x)+'}$' for x in expp]
   #plt.yticks(plt.yticks()[0][2:],expp)
   #
   ## -- plot fit
   axp.plot(_p3TData[idx[0]],_p3FitCentral[idx[0]],
    color=utp.get_option("color2",'b',**kwargs[key]))
   axp.plot(_p3TData[idx[0]],_p3FitError[idx[0]][0],
    color=utp.get_option("color2",'g',**kwargs[key]),
    ls=utp.get_option("linestyle2",'--',**kwargs[key]))
   axp.plot(_p3TData[idx[0]],_p3FitError[idx[0]][1],
    color=utp.get_option("color2",'g',**kwargs[key]),
    ls=utp.get_option("linestyle2",'--',**kwargs[key]))
   ## -- plot correlator data
   axp.errorbar(_p3TData[idx[0]],_p3DatCentral[idx[0]],yerr=_p3DatError[idx[0]],
    mfc=utp.get_option("markerfacecolor1",'None',**kwargs[key]),
    mec=utp.get_option("markeredgecolor1",'k',**kwargs[key]),
    color=utp.get_option("markeredgecolor1",'k',**kwargs[key]),
    ls=utp.get_option("linestyle1",'None',**kwargs[key]),
    marker=utp.get_option("marker1",'o',**kwargs[key]),
    ms=utp.get_option("markersize",6,**kwargs[key]))
   axp.scatter(_p3TFit[idx[0]],[_p3DatCentral[idx[0]][t] for t in _p3TFit[idx[0]]],
    color=utp.get_option("color1",'r',**kwargs[key]),
    marker=utp.get_option("marker",'o',**kwargs[key]),
    s=utp.get_option("markersize",36,**kwargs[key]))
   fig.suptitle(utp.get_option("plottitlep3",str(idx[0])+" default title "+str(key),**kwargs[key]),
    fontsize=utp.get_option("titlesize",20,**kwargs[key]))
   ## -- modify some options 
   for item in ([axp.xaxis.label,axp.yaxis.label]):
    # must be after setting label content (LaTeX ruins it)
    item.set_fontsize(fontsize=utp.get_option("fontsize",20,**kwargs[key]))
   rect =fig.patch
   rect.set_facecolor('white')
   if utp.get_option("to_file",False,**kwargs[key]):
    #print "key",key,"saved to file",save_dir+'/'+save_name
    save_dir  = utp.get_option("p3_save_dir","./plotdump",**kwargs[key])
    save_name = utp.get_option("p3_save_name","p3plot-"+key+".pdf",**kwargs[key])
    plt.savefig(save_dir+'/'+save_name)
   if utp.get_option("to_terminal",True,**kwargs[key]):
    plt.draw()
   pass
 #
 ## -- setup button press action function
 def press_3pt(event,idx=_p3Idx):
   try:
     ## -- manually indicate index
     idx[0] = int(event.key) + (idx[0])*10
   except ValueError:
     if event.key==' ': ## -- space
       ## -- allows for replotting when changing index by typing number keys
       idx[0] = idx[0] % _p3NMod
       do_plot_3pt(idx)
     elif event.key=='left':
       idx[0] = (idx[0] - 1) % _p3NMod
       do_plot_3pt(idx)
     elif event.key=='right':
       idx[0] = (idx[0] + 1) % _p3NMod
       do_plot_3pt(idx)
     elif event.key=='backspace':
       ## -- reset index so can manually flip through using number keys
       idx[0] = 0
     elif event.key=='d':
       ## -- dump plots into ./plotdump directory
       for ix,model in zip(range(len(models)),models):
         key = model.datatag
         save_dir  = utp.get_option("p3_save_dir","./plotdump",**kwargs[key])
         save_name = utp.get_option("p3_save_name","p3plot-"+key+".pdf",**kwargs[key])
         do_plot_3pt([ix])
         plt.savefig(save_dir+'/'+save_name)
       do_plot_3pt(idx)
 #
 ## -- 
 fig.canvas.mpl_connect('key_press_event',press_3pt)
 ## -- save plot data
 for idx,model in zip(range(len(models)),models):
   key = model.datatag
   _p3TData.append(model.tdata)
   _p3TFit.append(model.tfit)
   #_p3TFit[-1] = np.append(_p3TFit[-1],list(sorted([len(_p3TData[-1]) - t for t in _p3TFit[-1]])))
   ## -- fit
   _p3FitFunc = utp.create_fit_func_3pt(model,fit) ## not defined yet!
   _p3FitMean = gv.mean(_p3FitFunc(_p3TData[-1]))
   _p3FitSdev = gv.sdev(_p3FitFunc(_p3TData[-1]))
   _p3FitCentral.append(_p3FitMean)
   _p3FitError.append([
     np.array(_p3FitMean)-np.array(_p3FitSdev),
     np.array(_p3FitMean)+np.array(_p3FitSdev)])
   ## -- data
   _p3DatMean = gv.mean(data[key])
   _p3DatSdev = gv.sdev(data[key])
   _p3DatCentral.append( _p3DatMean )
   _p3DatError.append([list(_p3DatSdev),list(_p3DatSdev)])
 ## -- done saving data
 
 if not(utp.get_option("to_terminal",True,**kwargs[key])) and\
    utp.get_option("to_file",False,**kwargs[key]):
  for ix in range(len(models)):
    ## -- loops and saves all without creating window
    do_plot_3pt([ix])
 else:
  do_plot_3pt(_p3Idx)
Example #58
0
def main():
    gv.ranseed([2009, 2010, 2011, 2012])  # initialize random numbers (opt.)
    x, y = make_data()  # make fit data
    p0 = None  # make larger fits go faster (opt.)
    sys_stdout = sys.stdout
    for nexp in range(3, 8):
        prior = make_prior(nexp)
        fit = lsqfit.nonlinear_fit(data=(x, y), fcn=f, prior=prior, p0=p0, svdcut=1e-15)  # ,svdcut=SVDCUT)
        if fit.chi2 / fit.dof < 1.0:
            p0 = fit.pmean  # starting point for next fit (opt.)
        if nexp == 5:
            sys.stdout = tee.tee(sys_stdout, open("eg3.out", "w"))
        print "************************************* nexp =", nexp
        print fit  # print the fit results
        E = fit.p["E"]  # best-fit parameters
        a = fit.p["a"]
        print "E1/E0 =", E[1] / E[0], "  E2/E0 =", E[2] / E[0]
        print "a1/a0 =", a[1] / a[0], "  a2/a0 =", a[2] / a[0]
        # print E[1]-E[0], E[-1]-E[-2]
        # print (E[1]/E[0]).partialsdev(fit.prior['E'])
        # print (E[1]/E[0]).partialsdev(fit.prior['a'])
        # print (E[1]/E[0]).partialsdev(fit.y)
        sys.stdout = sys_stdout
        print
    # sys.stdout = tee.tee(sys_stdout, open("eg3a.out", "w"))
    # for i in range(1):
    #     print '--------------------- fit with %d extra data sets' % (i+1)
    #     x, y = make_data(1)
    #     prior = fit.p
    #     fit = lsqfit.nonlinear_fit(data=(x,y),fcn=f1,prior=prior, svdcut=SVDCUT)
    #     print fit
    sys.stdout = sys_stdout

    if DO_BOOTSTRAP:
        Nbs = 10  # number of bootstrap copies
        outputs = {"E1/E0": [], "E2/E0": [], "a1/a0": [], "a2/a0": [], "E1": [], "a1": []}  # results
        for bsfit in fit.bootstrap_iter(n=Nbs):
            E = bsfit.pmean["E"]  # best-fit parameters
            a = bsfit.pmean["a"]
            outputs["E1/E0"].append(E[1] / E[0])  # accumulate results
            outputs["E2/E0"].append(E[2] / E[0])
            outputs["a1/a0"].append(a[1] / a[0])
            outputs["a2/a0"].append(a[2] / a[0])
            outputs["E1"].append(E[1])
            outputs["a1"].append(a[1])
            # print E[:2]
            # print a[:2]
            # print bsfit.chi2/bsfit.dof

        # extract means and standard deviations from the bootstrap output
        for k in outputs:
            outputs[k] = gv.gvar(np.mean(outputs[k]), np.std(outputs[k]))
        print "Bootstrap results:"
        print "E1/E0 =", outputs["E1/E0"], "  E2/E1 =", outputs["E2/E0"]
        print "a1/a0 =", outputs["a1/a0"], "  a2/a0 =", outputs["a2/a0"]
        print "E1 =", outputs["E1"], "  a1 =", outputs["a1"]

    if DO_PLOT:
        print fit.format(100)  # print the fit results
        import pylab as pp
        from gvar import mean, sdev

        fity = f(x, fit.pmean)
        ratio = y / fity
        pp.xlim(0, 21)
        pp.xlabel("x")
        pp.ylabel("y/f(x,p)")
        pp.errorbar(x=x, y=mean(ratio), yerr=sdev(ratio), fmt="ob")
        pp.plot([0.0, 21.0], [1.0, 1.0])
        pp.show()
Example #59
0
def main():
    gv.ranseed([2009,2010,2011,2012]) # initialize random numbers (opt.)
    x,y = make_data()               # make fit data
    p0 = None                       # make larger fits go faster (opt.)
    sys_stdout = sys.stdout
    sys.stdout = tee.tee(sys.stdout, open("eg1.out","w"))
    for nexp in range(3,20):
        prior = make_prior(nexp)
        fit = lsqfit.nonlinear_fit(data=(x,y),fcn=f,prior=prior,p0=p0) #, svdcut=SVDCUT)
        if fit.chi2/fit.dof<1.:
            p0 = fit.pmean          # starting point for next fit (opt.)
        if nexp in [8, 9, 10]:
            print(".".center(73))
        if nexp > 7 and nexp < 19:
            continue
        elif nexp not in [3]:
            print("")
        print '************************************* nexp =',nexp
        print fit.format()                   # print the fit results
        E = fit.p['E']              # best-fit parameters
        a = fit.p['a']
        print 'E1/E0 =',E[1]/E[0],'  E2/E0 =',E[2]/E[0]
        print 'a1/a0 =',a[1]/a[0],'  a2/a0 =',a[2]/a[0]

    # extra data 1
    print '\n--------------------- fit with extra information'
    sys.stdout = tee.tee(sys_stdout, open("eg1a.out", "w"))
    def ratio(p):
        return p['a'][1] / p['a'][0]
    newfit = lsqfit.nonlinear_fit(data=gv.gvar(1,1e-5), fcn=ratio, prior=fit.p)
    print (newfit)
    # print(newfit.p['a'][1] / newfit.p['a'][0])
    # print(fit.p['a'][1] / fit.p['a'][0])

    # alternate method for extra data
    sys.stdout = tee.tee(sys_stdout, open("eg1b.out", "w"))
    fit.p['a1/a0'] = fit.p['a'][1] / fit.p['a'][0]
    new_data = {'a1/a0' : gv.gvar(1,1e-5)}
    new_p = lsqfit.wavg([fit.p, new_data])
    print 'chi2/dof = %.2f\n' % (new_p.chi2 / new_p.dof)
    print 'E:', new_p['E'][:4]
    print 'a:', new_p['a'][:4]
    print 'a1/a0:', new_p['a1/a0']

    # # extra data 2
    # sys.stdout = tee.tee(sys_stdout, open("eg1b.out", "w"))
    # newfit = fit
    # for i in range(1):
    #     print '\n--------------------- fit with %d extra data sets' % (i+1)
    #     x, ynew = make_data()
    #     prior = newfit.p
    #     newfit = lsqfit.nonlinear_fit(data=(x,ynew), fcn=f, prior=prior) # , svdcut=SVDCUT)
    #     print newfit
    sys.stdout = sys_stdout
    # def fcn(x, p):
    #     return f(x, p), f(x, p)
    # prior = make_prior(nexp)
    # fit = lsqfit.nonlinear_fit(data=(x, [y, ynew]), fcn=fcn, prior=prior, p0=newfit.pmean) # , svdcut=SVDCUT)
    # print(fit)


    if DO_BOOTSTRAP:
        Nbs = 40                                     # number of bootstrap copies

        outputs = {'E1/E0':[], 'E2/E0':[], 'a1/a0':[],'a2/a0':[],'E1':[],'a1':[]}   # results
        for bsfit in fit.bootstrap_iter(n=Nbs):
            E = bsfit.pmean['E']                     # best-fit parameters
            a = bsfit.pmean['a']
            outputs['E1/E0'].append(E[1]/E[0])       # accumulate results
            outputs['E2/E0'].append(E[2]/E[0])
            outputs['a1/a0'].append(a[1]/a[0])
            outputs['a2/a0'].append(a[2]/a[0])
            outputs['E1'].append(E[1])
            outputs['a1'].append(a[1])
            # print E[:2]
            # print a[:2]
            # print bsfit.chi2/bsfit.dof

        # extract means and standard deviations from the bootstrap output
        for k in outputs:
            outputs[k] = gv.gvar(np.mean(outputs[k]),np.std(outputs[k]))
        print 'Bootstrap results:'
        print 'E1/E0 =',outputs['E1/E0'],'  E2/E1 =',outputs['E2/E0']
        print 'a1/a0 =',outputs['a1/a0'],'  a2/a0 =',outputs['a2/a0']
        print 'E1 =',outputs['E1'],'  a1 =',outputs['a1']
        
    if DO_PLOT:
        import pylab as plt   
        ratio = y/fit.fcn(x,fit.pmean)
        plt.xlim(0,21)
        plt.xlabel('x')
        plt.ylabel('y/f(x,p)')
        plt.errorbar(x=x,y=gv.mean(ratio),yerr=gv.sdev(ratio),fmt='ob')
        plt.plot([0.0,21.0],[1.0,1.0])
        plt.show()