def load_parameters(file_res, file_cov, process, constraints): implementation_name = process + ' SSE' res_dict = csv_to_dict(file_res) cov_dict = csv_to_dict(file_cov) keys_sorted = sorted(res_dict.keys()) res = [res_dict[k] for k in keys_sorted] # M -> M + M^T - diag(M) since the dictionary contains only the entries above the diagonal cov = (np.array([[cov_dict.get((k, m), 0) for m in keys_sorted] for k in keys_sorted]) + np.array([[cov_dict.get((m, k), 0) for m in keys_sorted] for k in keys_sorted]) - np.diag([cov_dict[(k, k)] for k in keys_sorted])) parameter_names = [ implementation_name + ' ' + coeff_name for coeff_name in keys_sorted ] for parameter_name in parameter_names: try: # check if parameter object already exists p = Parameter.get_instance(parameter_name) except: # otherwise, create a new one p = Parameter(parameter_name) else: # if parameter exists, remove existing constraints constraints.remove_constraints(parameter_name) constraints.add_constraint( parameter_names, MultivariateNormalDistribution(central_value=res, covariance=cov))
def test_emcee_scan(self): # dummy observables o1 = Observable( 'test_obs 1' ) o2 = Observable( 'test_obs 2' ) # dummy predictions def f1(wc_obj, par_dict): return par_dict['m_b'] def f2(wc_obj, par_dict): return 2.5 Prediction( 'test_obs 1', f1 ) Prediction( 'test_obs 2', f2 ) d1 = NormalDistribution(5, 0.2) cov2 = [[0.1**2, 0.5*0.1*0.3], [0.5*0.1*0.3, 0.3**2]] d2 = MultivariateNormalDistribution([6,2], cov2) m1 = Measurement( 'measurement 1 of test_obs 1' ) m2 = Measurement( 'measurement 2 of test_obs 1 and test_obs 2' ) m1.add_constraint(['test_obs 1'], d1) m2.add_constraint(['test_obs 1', 'test_obs 2'], d2) fit = BayesianFit('fit_emcee_test', flavio.default_parameters, ['m_b', 'm_c'], [], ['test_obs 1', 'test_obs 2']) scan = emceeScan(fit) scan.run(3, burnin=0) self.assertTupleEqual(scan.result.shape, (3 * scan.nwalkers, 2)) BayesianFit.del_instance('fit_emcee_test') Observable.del_instance('test_obs 1') Observable.del_instance('test_obs 2') Measurement.del_instance('measurement 1 of test_obs 1') Measurement.del_instance('measurement 2 of test_obs 1 and test_obs 2')
def load_parameters(filename, process, constraints): implementation_name = process + ' BSZ' parameter_names = [ implementation_name + ' ' + coeff_name for coeff_name in a_ff_string ] # a0_A0 and a0_T2 are not treated as independent parameters! parameter_names.remove(implementation_name + ' a0_A0') parameter_names.remove(implementation_name + ' a0_T2') for parameter_name in parameter_names: try: # check if parameter object already exists p = Parameter[parameter_name] except: # otherwise, create a new one p = Parameter(parameter_name) # get LaTeX representation of coefficient and form factor names _tex_a = tex_a[parameter_name.split(' ')[-1].split('_')[0]] _tex_ff = tex_ff[parameter_name.split(' ')[-1].split('_')[-1]] p.tex = r'$' + _tex_a + r'^{' + _tex_ff + r'}$' p.description = r'BSZ form factor parametrization coefficient $' + _tex_a + r'$ of $' + _tex_ff + r'$' else: # if parameter exists, remove existing constraints constraints.remove_constraint(parameter_name) [central, unc, corr] = get_ffpar(filename) constraints.add_constraint( parameter_names, MultivariateNormalDistribution(central_value=central, covariance=np.outer(unc, unc) * corr))
def make_measurement(self, N=100, Nexp=5000, threads=1, force=False): """Initialize the fit by producing a pseudo-measurement containing both experimental uncertainties as well as theory uncertainties stemming from nuisance parameters. Optional parameters: - `N`: number of random computations for the SM covariance (computing time is proportional to it; more means less random fluctuations.) - `Nexp`: number of random computations for the experimental covariance. This is much less expensive than the theory covariance, so a large number can be afforded (default: 5000). - `threads`: number of parallel threads for the SM covariance computation. Defaults to 1 (no parallelization). - `force`: if True, will recompute SM covariance even if it already has been computed. Defaults to False. """ central_exp, cov_exp = self._get_central_covariance_experiment(Nexp) cov_sm = self.get_sm_covariance(N, force=force, threads=threads) covariance = cov_exp + cov_sm # add the Pseudo-measurement m = flavio.classes.Measurement( 'Pseudo-measurement for FastFit instance: ' + self.name) if np.asarray(central_exp).ndim == 0 or len( central_exp) <= 1: # for a 1D (or 0D) array m.add_constraint( self.observables, NormalDistribution(central_exp, np.sqrt(covariance))) else: m.add_constraint( self.observables, MultivariateNormalDistribution(central_exp, covariance))
def np_measurement(name, w, observables, covariance): """Measurement instance of `observables` measured with `covariance` assuming the central values to be equal to the NP predictions given the `Wilson` instance `w`.""" def predict(obs): d = Observable.argument_format(obs, 'dict') return flavio.np_prediction(d.pop('name'), w, **d) cv = [predict(obs) for obs in observables] d = MultivariateNormalDistribution(cv, covariance=covariance) m = Measurement(name) m.add_constraint(observables, d) return m
def test_exp_combo(self): o = Observable('test_obs') o.arguments = ['x'] m = Measurement('test_obs measurement 1') m.add_constraint([('test_obs', 1)], MultivariateNormalDistribution([1, 2], np.eye(2))) # error: no measurement with self.assertRaises(ValueError): flavio.combine_measurements('test_obs', x=1, include_measurements=['bla']) m.add_constraint([('test_obs', 1)], NormalDistribution(2, 3)) combo = flavio.combine_measurements('test_obs', x=1) self.assertEqual(combo.central_value, 2) self.assertEqual(combo.standard_deviation, 3) m2 = Measurement('test_obs measurement 2') m2.add_constraint([('test_obs', 1)], NormalDistribution(3, 3)) combo = flavio.combine_measurements('test_obs', x=1) self.assertAlmostEqual(combo.central_value, 2.5) self.assertAlmostEqual(combo.standard_deviation, sqrt(9 / 2)) Observable.del_instance('test_obs')
def load_parameters(filename, constraints): f = pkgutil.get_data('flavio.physics', filename) ff_dict = yaml.load(f) for parameter_name in ff_dict['parameters']: try: # check if parameter object already exists p = Parameter[parameter_name] except: # otherwise, create a new one p = Parameter(parameter_name) else: # if parameter exists, remove existing constraints constraints.remove_constraint(parameter_name) covariance = np.outer(ff_dict['uncertainties'], ff_dict['uncertainties']) * ff_dict['correlation'] if not np.allclose(covariance, covariance.T): # if the covariance is not symmetric, it is assumed that only the values above the diagonal are present. # then: M -> M + M^T - diag(M) covariance = covariance + covariance.T - np.diag(np.diag(covariance)) constraints.add_constraint( ff_dict['parameters'], MultivariateNormalDistribution(central_value=ff_dict['central_values'], covariance=covariance))
def load_parameters(file_res, file_cov, process, constraints): implementation_name = process + ' SSE' res_dict = csv_to_dict(file_res) cov_dict = csv_to_dict(file_cov) keys_sorted = sorted(res_dict.keys()) res = [res_dict[k] for k in keys_sorted] cov = np.array([[ cov_dict.get((k,m),0) for m in keys_sorted] for k in keys_sorted]) parameter_names = [implementation_name + ' ' + translate_parameters(coeff_name) for coeff_name in keys_sorted] for parameter_name in parameter_names: try: # check if parameter object already exists p = Parameter[parameter_name] except: # otherwise, create a new one p = Parameter(parameter_name) _tex_a = tex_a[parameter_name.split(' ')[-1].split('_')[0]] _tex_ff = tex_ff[parameter_name.split(' ')[-1].split('_')[-1]] p.tex = r'$' + _tex_a + r'^{' + _tex_ff + r'}$' p.description = r'SSE form factor parametrization coefficient $' + _tex_a + r'$ of $' + _tex_ff + r'$' else: # if parameter exists, remove existing constraints constraints.remove_constraint(parameter_name) constraints.add_constraint(parameter_names, MultivariateNormalDistribution(central_value=res, covariance=cov ))