예제 #1
0
 def test_emcee_scan(self):
     # dummy observables
     o1 = Observable( 'test_obs 1' )
     o2 = Observable( 'test_obs 2' )
     # dummy predictions
     def f1(wc_obj, par_dict):
         return par_dict['m_b']
     def f2(wc_obj, par_dict):
         return 2.5
     Prediction( 'test_obs 1', f1 )
     Prediction( 'test_obs 2', f2 )
     d1 = NormalDistribution(5, 0.2)
     cov2 = [[0.1**2, 0.5*0.1*0.3], [0.5*0.1*0.3, 0.3**2]]
     d2 = MultivariateNormalDistribution([6,2], cov2)
     m1 = Measurement( 'measurement 1 of test_obs 1' )
     m2 = Measurement( 'measurement 2 of test_obs 1 and test_obs 2' )
     m1.add_constraint(['test_obs 1'], d1)
     m2.add_constraint(['test_obs 1', 'test_obs 2'], d2)
     fit = BayesianFit('fit_emcee_test', flavio.default_parameters, ['m_b', 'm_c'],  [], ['test_obs 1', 'test_obs 2'])
     scan = emceeScan(fit)
     scan.run(3, burnin=0)
     self.assertTupleEqual(scan.result.shape, (3 * scan.nwalkers, 2))
     BayesianFit.del_instance('fit_emcee_test')
     Observable.del_instance('test_obs 1')
     Observable.del_instance('test_obs 2')
     Measurement.del_instance('measurement 1 of test_obs 1')
     Measurement.del_instance('measurement 2 of test_obs 1 and test_obs 2')
예제 #2
0
    def make_measurement(self, N=100, Nexp=5000, threads=1, force=False):
        """Initialize the fit by producing a pseudo-measurement containing both
        experimental uncertainties as well as theory uncertainties stemming
        from nuisance parameters.

        Optional parameters:

        - `N`: number of random computations for the SM covariance (computing
          time is proportional to it; more means less random fluctuations.)
        - `Nexp`: number of random computations for the experimental covariance.
          This is much less expensive than the theory covariance, so a large
          number can be afforded (default: 5000).
        - `threads`: number of parallel threads for the SM
          covariance computation. Defaults to 1 (no parallelization).
        - `force`: if True, will recompute SM covariance even if it
          already has been computed. Defaults to False.
        """
        central_exp, cov_exp = self._get_central_covariance_experiment(Nexp)
        cov_sm = self.get_sm_covariance(N, force=force, threads=threads)
        covariance = cov_exp + cov_sm
        # add the Pseudo-measurement
        m = flavio.classes.Measurement(
            'Pseudo-measurement for FastFit instance: ' + self.name)
        if np.asarray(central_exp).ndim == 0 or len(
                central_exp) <= 1:  # for a 1D (or 0D) array
            m.add_constraint(
                self.observables,
                NormalDistribution(central_exp, np.sqrt(covariance)))
        else:
            m.add_constraint(
                self.observables,
                MultivariateNormalDistribution(central_exp, covariance))
예제 #3
0
 def test_exp_combo(self):
     o = Observable('test_obs')
     o.arguments = ['x']
     m = Measurement('test_obs measurement 1')
     m.add_constraint([('test_obs', 1)], MultivariateNormalDistribution([1, 2], np.eye(2)))
     # error: no measurement
     with self.assertRaises(ValueError):
         flavio.combine_measurements('test_obs', x=1, include_measurements=['bla'])
     m.add_constraint([('test_obs', 1)], NormalDistribution(2, 3))
     combo = flavio.combine_measurements('test_obs', x=1)
     self.assertEqual(combo.central_value, 2)
     self.assertEqual(combo.standard_deviation, 3)
     m2 = Measurement('test_obs measurement 2')
     m2.add_constraint([('test_obs', 1)], NormalDistribution(3, 3))
     combo = flavio.combine_measurements('test_obs', x=1)
     self.assertAlmostEqual(combo.central_value, 2.5)
     self.assertAlmostEqual(combo.standard_deviation, sqrt(9 / 2))
     Observable.del_instance('test_obs')
예제 #4
0
 def _obstable_tree(self):
     if not self._obstable_tree_cache:
         info = tree()  # nested dict
         pull_dof = 1
         llh = self.likelihood
         for flh_name, flh in llh.fast_likelihoods.items():
             # loop over fast likelihoods: they only have a single "measurement"
             m = flh.pseudo_measurement
             ml = flh.full_measurement_likelihood
             pred = ml.get_predictions_par(llh.par_dict, self.w)
             sm_cov = flh.sm_covariance.get(force=False)
             _, exp_cov = flh.exp_covariance.get(force=False)
             inspire_dict = self._get_inspire_dict(flh.observables, ml)
             for i, obs in enumerate(flh.observables):
                 info[obs]['lh_name'] = flh_name
                 info[obs]['name'] = obs if isinstance(obs, str) else obs[0]
                 info[obs]['theory'] = pred[obs]
                 info[obs]['th. unc.'] = np.sqrt(sm_cov[i, i])
                 info[obs]['experiment'] = m.get_central(obs)
                 info[obs]['exp. unc.'] = np.sqrt(exp_cov[i, i])
                 info[obs]['exp. PDF'] = NormalDistribution(
                     m.get_central(obs), np.sqrt(exp_cov[i, i]))
                 info[obs]['inspire'] = sorted(set(inspire_dict[obs]))
                 ll_central = m.get_logprobability_single(
                     obs, m.get_central(obs))
                 ll = m.get_logprobability_single(obs, pred[obs])
                 # DeltaChi2 is -2*DeltaLogLikelihood
                 info[obs]['pull'] = pull(-2 * (ll - ll_central),
                                          dof=pull_dof)
         for lh_name, lh in llh.likelihoods.items():
             # loop over "normal" likelihoods
             ml = lh.measurement_likelihood
             pred = ml.get_predictions_par(llh.par_dict, self.w)
             inspire_dict = self._get_inspire_dict(lh.observables, ml)
             for i, obs in enumerate(lh.observables):
                 obs_dict = flavio.Observable.argument_format(obs, 'dict')
                 obs_name = obs_dict.pop('name')
                 with warnings.catch_warnings():
                     warnings.simplefilter("ignore")
                     p_comb = flavio.combine_measurements(
                         obs_name,
                         include_measurements=ml.get_measurements,
                         **obs_dict)
                 info[obs]['experiment'] = p_comb.central_value
                 info[obs]['exp. unc.'] = max(p_comb.error_left,
                                              p_comb.error_right)
                 info[obs]['exp. PDF'] = p_comb
                 info[obs]['inspire'] = sorted(set(inspire_dict[obs]))
                 info[obs]['theory'] = pred[obs]
                 info[obs]['th. unc.'] = 0
                 info[obs]['lh_name'] = lh_name
                 info[obs]['name'] = obs if isinstance(obs, str) else obs[0]
                 ll = p_comb.logpdf([pred[obs]]) - p_comb.logpdf(
                     [p_comb.central_value])
                 info[obs]['pull'] = pull(-2 * ll, dof=pull_dof)
         self._obstable_tree_cache = info
     return self._obstable_tree_cache
예제 #5
0
 def obstable_sm(self):
     self._check_sm_cov_loaded()
     if self._obstable_sm is None:
         info = tree()  # nested dict
         for flh_name, flh in self.fast_likelihoods.items():
             # loop over fast likelihoods: they only have a single "measurement"
             m = flh.pseudo_measurement
             ml = flh.full_measurement_likelihood
             pred_sm = ml.get_predictions_par(self.par_dict_sm,
                                              flavio.WilsonCoefficients())
             sm_cov = flh.sm_covariance.get(force=False)
             _, exp_cov = flh.exp_covariance.get(force=False)
             inspire_dict = self._get_inspire_dict(flh.observables, ml)
             for i, obs in enumerate(flh.observables):
                 info[obs]['lh_name'] = flh_name
                 info[obs]['name'] = obs if isinstance(obs, str) else obs[0]
                 info[obs]['th. unc.'] = np.sqrt(sm_cov[i, i])
                 info[obs]['experiment'] = m.get_central(obs)
                 info[obs]['exp. unc.'] = np.sqrt(exp_cov[i, i])
                 info[obs]['exp. PDF'] = NormalDistribution(
                     m.get_central(obs), np.sqrt(exp_cov[i, i]))
                 info[obs]['inspire'] = sorted(set(inspire_dict[obs]))
                 info[obs]['ll_sm'] = m.get_logprobability_single(
                     obs, pred_sm[obs])
                 info[obs]['ll_central'] = m.get_logprobability_single(
                     obs, m.get_central(obs))
         for lh_name, lh in self.likelihoods.items():
             # loop over "normal" likelihoods
             ml = lh.measurement_likelihood
             pred_sm = ml.get_predictions_par(self.par_dict_sm,
                                              flavio.WilsonCoefficients())
             inspire_dict = self._get_inspire_dict(lh.observables, ml)
             for i, obs in enumerate(lh.observables):
                 obs_dict = flavio.Observable.argument_format(obs, 'dict')
                 obs_name = obs_dict.pop('name')
                 with warnings.catch_warnings():
                     warnings.simplefilter("ignore")
                     p_comb = flavio.combine_measurements(
                         obs_name,
                         include_measurements=ml.get_measurements,
                         **obs_dict)
                 info[obs]['experiment'] = p_comb.central_value
                 info[obs]['exp. unc.'] = max(p_comb.error_left,
                                              p_comb.error_right)
                 info[obs]['exp. PDF'] = p_comb
                 info[obs]['inspire'] = sorted(set(inspire_dict[obs]))
                 info[obs]['th. unc.'] = 0
                 info[obs]['lh_name'] = lh_name
                 info[obs]['name'] = obs if isinstance(obs, str) else obs[0]
                 info[obs]['ll_sm'] = p_comb.logpdf([pred_sm[obs]])
                 if info[obs]['ll_sm'] == -np.inf:
                     info[obs]['ll_sm'] = -1e100
                 info[obs]['ll_central'] = p_comb.logpdf(
                     [p_comb.central_value])
         self._obstable_sm = info
     return self._obstable_sm
예제 #6
0

def myDeltaMS(wc_obj, par):
    DMs_SM = par['Delta M_S']
    if wc_obj.wc is None:
        return DMs_SM
    else:
        Cbs = -sq2 / (4 * GF *
                      ckm.xi('t', 'bs')(par)**2) * wc_obj.wc['CVLL_bsbs']
        return DMs_SM * abs(1 + Cbs / (1.3397e-3))


Observable('DMs')
Prediction('DMs', myDeltaMS)
m = Measurement('DMs exp')
m.add_constraint(['DMs'], NormalDistribution(17.757, 0.021))
m2 = Measurement('SLAC HFLAV 2018')
m2.add_constraint(['S_psiphi'], NormalDistribution(0.021,
                                                   0.030144582822607187))


def wc_Z(lambdaQ, MZp):
    "Wilson coefficients as functions of Z' couplings"
    if MZp < 100:
        return {}
    alpha = get_alpha(par, MZp)['alpha_e']
    return {
        'C9_bsmumu':
        -pi / (sq2 * GF * MZp**2 * alpha) * lambdaQ / ckm.xi('t', 'bs')(par),
        'C10_bsmumu':
        pi / (sq2 * GF * MZp**2 * alpha) * lambdaQ / ckm.xi('t', 'bs')(par),