Beispiel #1
0
 def w0(self):
     if self._w0 is None:
         df = data_tables.ScaleSetting().data
         df = df[['a[fm]','description','w0_orig/a']]
         df.rename(columns={'a[fm]': 'a_fm'}, inplace=True)
         self._w0 = df
     return self._w0
Beispiel #2
0
    def x(self):
        """
        Gets input arguments "x_ctm" for use in the continuum limit
        at the physical point.
        """
        scale = data_tables.ScaleSetting()
        ctm = data_tables.ContinuumConstants()

        # Quark masses
        ml_ctm_MeV = 3.402  # Eq (5.3) of 1802.04248
        ms_ctm_MeV = 92.47  # Eq (5.2) of 1802.04248
        mc_ctm_MeV = 1090   # Eq (5.9) of 1802.04248
        ml_ctm = ml_ctm_MeV * scale.w0_fm / ctm.hbarc
        ms_ctm = ms_ctm_MeV * scale.w0_fm / ctm.hbarc
        mc_ctm = mc_ctm_MeV * scale.w0_fm / ctm.hbarc

        # Hadron masses
        mpi5 = ctm.pdg['pi'] * scale.w0_fm / ctm.hbarc
        mK5 = ctm.pdg['K'] * scale.w0_fm / ctm.hbarc
        mS5 = np.nan
        mother = self.mother * scale.w0_fm / ctm.hbarc
        daughter = self.daughter * scale.w0_fm / ctm.hbarc

        # The physical low-energy constant "mu"
        # Infer LEC mu from Gell-Mann Oakes Renner: Mpi**2 = 2*mu*ml
        # Note: no special treatment is required for daughter kaons here.
        # The physics reason is that the LEC "mu" is basically related
        # (up to conventional numerical constants) to the value of the
        # quark condensate.
        # Numerically, one finds
        # 2.326(21) -- from Mpi / (2*ml)
        # 2.243(21) -- from MK / (ml + ms)
        mu_ctm = ctm.pdg['pi']**2 / (2*ml_ctm_MeV) * scale.w0_fm / ctm.hbarc
        energy_min, energy_max = self.get_energy_bounds()
        f = ctm.pdg['fpi'] * scale.w0_fm / ctm.hbarc
        # f = ctm.pdg['fK'] * scale.w0_fm / ctm.hbarc
        # Note:
        # dm_heavy represents the "mistuing" dm = (m-m0) of the heavy quark.
        # By definition, this difference vanishes at the physical point.
        return {
            'fpi': gv.mean(f),
            'm_light': gv.mean(ml_ctm),
            'm_strange': gv.mean(ms_ctm),
            'm_heavy': gv.mean(mc_ctm),
            'dm_heavy': 0,
            'alpha_s': 0,
            'E': np.linspace(gv.mean(energy_min), gv.mean(energy_max)),
            'mpi5': gv.mean(mpi5),
            'mK5': gv.mean(mK5),
            'mS5': gv.mean(mS5),
            # 'mpi5': gv.mean(self.daughter * scale.w0_fm / ctm.hbarc),
            # 'mK5': mu_ctm * (ml_ctm + ms_ctm),
            # 'mS5': mu_ctm * (2.0 * ms_ctm),
            'mu': gv.mean(mu_ctm),
            'DeltaBar': 0,
            'M_mother': gv.mean(mother),
            'M_daughter': gv.mean(daughter),
        }
Beispiel #3
0
 def build_base_prior(self):
     scale = data_tables.ScaleSetting()
     ctm = data_tables.ContinuumConstants()
     delta_pole = ctm.get_delta_pole(self.process, fractional_width=1.0) * scale.w0_fm / ctm.hbarc
     prior = {
         'leading': gv.gvar(10, 10),
         'log(delta_pole)': np.log(delta_pole),
     }
     if self.model_name != 'LogLess':
         prior['g'] = gv.gvar(10, 10)
     return prior
Beispiel #4
0
def build_fit_data(dataframe):
    """
    Builds dictionaries suitable for interpretation as input data
    for chiral-continuum fits with lsqfit, "data=(x,y)".
    Args:
        dataframe: pd.DataFrame containing correlated data
    Returns:
        xdict, ydict: the data dictionaries for the fit
    """
    keys = ['a_fm', 'description', 'm_light', 'm_strange', 'm_heavy', 'dm_heavy']
    groups = dataframe.groupby(keys)
    xdict, ydict = {}, {}
    for (a_fm, description, m_light, m_strange, m_heavy, dm_heavy), subdf in groups:
        subdf = subdf.sort_values(by='E_daughter')
        y = subdf['form_factor'].values
        M_daughter = subdf['M_daughter'].apply(gv.mean).unique().item()
        p2 = subdf['p2'].values
        x = InputData(a_fm, description, m_light, m_strange, m_heavy, dm_heavy, M_daughter, p2).asdict()

        # Include continuum constants like fpi as independent "x-parameters"
        scale = data_tables.ScaleSetting()
        ctm = data_tables.ContinuumConstants()
        fpi = ctm.pdg['fpi'] * scale.w0_fm / ctm.hbarc
        x['fpi'] = gv.mean(fpi)

        # Include staggered low-energy constants as independent "x-parameters"
        const = data_tables.StaggeredConstants().get_row(a_fm=a_fm)
        w0 = gv.mean(scale.get_row(a_fm=a_fm, description=description)['w0_orig/a'])

        # Quantities with mass dimension zero
        x['alpha_s'] = const['alpha_s']

        # Quantities with mass dimension +1
        x['mu'] = const['mu'] * w0

        # Quantities with mass dimension +2
        for k in ['Delta_P', 'Delta_A', 'Delta_T', 'Delta_V', 'Delta_I',
                    'DeltaBar', 'Hairpin_V', 'Hairpin_A']:
            x[k] = gv.mean(const[k]) * w0**2

        # Hadron masses
        x['M_daughter'] = subdf['M_daughter'].apply(gv.mean).unique().item() * w0
        x['M_mother'] = subdf['M_mother'].apply(gv.mean).unique().item() * w0
        x['mpi5'] = subdf['pion'].apply(gv.mean).unique().item() * w0
        x['mK5'] = subdf['kaon'].apply(gv.mean).unique().item() * w0
        x['mS5'] = np.sqrt(const['mu'] * (2 * m_strange) * w0)

        # Collect results
        key = FitKey(a_fm, description, m_light, m_strange, m_heavy)
        ydict[key] = y
        xdict[key] = x

    return xdict, ydict
Beispiel #5
0
 def get_energy_bounds(self, MeV=False):
     """
     Get the mininum and maximum energies for the daughter
     hadron for a physical process. The minimum energy corresponds
     to the daughter hadron at rest (q2=q2_max). The maximum
     energy corresponds to the daughter hadron moving with
     zero energy transfered to the leptonic system (q2=0).
     """
     scale = data_tables.ScaleSetting()
     ctm = data_tables.ContinuumConstants()
     energy_min = self.daughter
     energy_max = (self.mother**2 + self.daughter**2) / (2*self.mother)
     if not MeV:
         energy_min = energy_min * scale.w0_fm / ctm.hbarc
         energy_max = energy_max * scale.w0_fm / ctm.hbarc
     return (energy_min, energy_max)
Beispiel #6
0
 def __init__(self, a_fm, description, m_light, m_strange, m_heavy, dm_heavy, M_daughter, p2):
     """
     Args:
         a_fm: float, approximate lattice spacing in fm (e.g., 0.15). Used to look up exact scale
         description: str, the ratio ml/ms (e.g., '1/27'). Used to look up the exact scale
         m_light: float, bare mass of the light (u/d) quarks
         m_strange: float, bare mass of the strange quark
         m_heavy: float, bare mass of the heavy quark
         dm_heavy: float, "mistuning" of the bare heavy quark mass in the problem (m-m_physical)
         M_daughter: float or gvar, the mass of the daughter hadron
         p2: np.array, the squared lattice momenta of the daughter hadron
     """
     scale = data_tables.ScaleSetting().data
     mask = (scale['a[fm]'] == a_fm) & (scale['description'] == description)
     w0 = gv.mean(scale[mask]['w0_orig/a'].item())
     # Convert to dimensionless units of w0
     self.m_light = m_light * w0
     self.m_strange = m_strange * w0
     self.m_heavy = m_heavy * w0
     self.dm_heavy = dm_heavy * w0
     self.E = np.sqrt(M_daughter**2 + p2) * w0
Beispiel #7
0
def run_fits(process, channel, engine):

    data = FormFactorData(process, engine)[channel]
    scale = data_tables.ScaleSetting()
    ctm = data_tables.ContinuumConstants()
    lam = gv.mean(700 * scale.w0_fm / ctm.hbarc)

    if process == 'Ds to K':
        models = {
            # 'SU2': su2.SU2Model,
            'HardSU2': su2.HardSU2Model,
            # 'SU2:continuum': su2.SU2Model,
            'HardSU2:continuum': su2.HardSU2Model,
            'LogLess': chipt.LogLessModel,
        }
    else:
        models = {
            # 'SU2': su2.SU2Model,
            'HardSU2': su2.HardSU2Model,
            # 'SU2:continuum': su2.SU2Model,
            'HardSU2:continuum': su2.HardSU2Model,
            'LogLess': chipt.LogLessModel,
        }

    results = []
    for model_name, model_fcn in models.items():
        print("Starting fits for", model_name)
        
        # Define models
        if model_name == 'LogLess':
            model = model_fcn(channel, process, lam=lam)
        else:
            if ('continuum' in model_name):
                continuum_logs = True
            else:
                continuum_logs = False
            model = model_fcn(channel, process, lam=lam, continuum_logs=continuum_logs)

        wrapped = WrappedModel(model)
        model_continuum = model_fcn(channel, process, lam=lam, continuum=True)
        continuum = ContinuumLimit(model.process)
        
        # Masks for dropping parts of the dataset
        masks = {
            'full': data['a_fm'] > 0,  # trivially true by definition. The full dataset.
            'omit 0.12 fm': data['a_fm'] != 0.12,  # drop the coarsest lattice spacing
            'omit 0.042 fm': data['a_fm'] != 0.42,  # drop the finest lattice spacing
            'mh/mc <= 1.1': ~data['alias_heavy'].isin(['1.4 m_charm', '1.5 m_charm', '2.0 m_charm', '2.2 m_charm']),
        }
        for mask_label, mask in masks.items():
            # Build data
            x, y_data = build_fit_data(data[mask])

            # Run variations on the model
            priors = ModelVariations(model.process, model_name).priors
            for label, prior in tqdm(priors.items()):
                if (label != 'NNLO') & (mask_label not in ('full', 'mh/mc <= 1.1')):
                    # Keep: full data and NNLO
                    # Keep: full data and model variation
                    # Keep: drop data and NNLO
                    # Skip: drop data and model variation simultaneously
                    continue

                fit = lsqfit.nonlinear_fit(data=(x, y_data), fcn=wrapped, prior=prior, debug=True)
                fit = serialize.SerializableNonlinearFit(fit)
                y_ctm = model_continuum(continuum.x, fit.p)
                result = fit.serialize()
                result['model_name'] = model_name
                result['model'] = model
                result['model_ctm'] = model_continuum
                result['continuum'] = continuum
                result['label'] = label
                result['dataset'] = mask_label
                result['fit'] = fit
                result['process'] = process
                result['channel'] = channel
                result['f(q2max)'] = y_ctm[0]
                result['f(q2=0)'] = y_ctm[-1]
                result['f(q2=middle)'] = y_ctm[len(y_ctm)//2]
                result['f'] = y_ctm
                results.append(result)

    return data, pd.DataFrame(results)