def test_bbn_likelihood(packages_path, skip_not_installed): packages_path = process_packages_path(packages_path) install_test_wrapper(skip_not_installed, get_camb, packages_path) from camb.bbn import BBN_table_interpolator BBN_likelihood.bbn = BBN_table_interpolator(bbn_table) model = get_model(info_error, packages_path=packages_path) assert np.allclose(model.loglikes({'YHe': 0.246})[0], [0.246, -0.84340], rtol=1e-4), \ "Failed BBN likelihood with %s" % info_error # second case, BBN likelihood has to be calculated before CAMB BBN_with_theory_errors.bbn = BBN_likelihood.bbn model = get_model(info_error2, packages_path=packages_path) assert np.allclose(model.loglikes({'BBN_delta': 1.0})[0], [0.24594834, -0.5], rtol=1e-4)
def test_cobaya(self): mflike_type = self.get_mflike_type(as_string=True) # params = dict(cosmo_params) # params['a_tSZ'] = 3.3 info = { "likelihood": { mflike_type: { "data_folder": "MFLike/v0.6", "input_file": pre + "00000.fits", "cov_Bbl_file": pre + "w_covar_and_Bbl.fits", } }, "theory": { "camb": { "extra_args": { "lens_potential_accuracy": 1 }, "stop_at_error": True } }, "params": cosmo_params, "modules": packages_path, "debug": True, } from cobaya.model import get_model model = get_model(info) my_mflike = model.likelihood[mflike_type] chi2 = -2 * (model.loglikes(nuisance_params)[0] - my_mflike.logp_const) self.assertAlmostEqual(chi2[0], chi2s["tt-te-et-ee"], 2)
def test_bbn_yhe(packages_path): packages_path = process_packages_path(packages_path) load_module("camb", path=os.path.join(packages_path, "code", "CAMB")) from camb.bbn import BBN_table_interpolator BBN.bbn = BBN_table_interpolator(bbn_table) BBN2.bbn = BBN.bbn info['params']['check'] = {'derived': True} for inf in (info, info2): inf['packages_path'] = packages_path for order in [1, -1]: for explicit_derived in [None, None, {'derived': True}]: print(inf, order, explicit_derived) model = get_model(inf) loglike, derived = model.loglikes({}) vals = set([BBN.bbn.Y_He(camb_params['ombh2'])] + derived) assert len(vals) == 1, \ "wrong Yhe value: %s" % vals inf['params']["YHe"] = explicit_derived inf['params'].pop('YHe') inf['theory'] = { p: v for p, v in reversed(list(inf['theory'].items())) }
def test_cross_correlation(): cosmo_params = { "Omega_c": 0.25, "Omega_b": 0.05, "h": 0.67, "n_s": 0.96 } info = {"params": {"omch2": cosmo_params['Omega_c'] * cosmo_params['h'] ** 2., "ombh2": cosmo_params['Omega_b'] * cosmo_params['h'] ** 2., "H0": cosmo_params['h'] * 100, "ns": cosmo_params['n_s'], "As": 2.2e-9, "tau": 0, "b1": 1, "s1": 0.4}, "likelihood": {"CrossCorrelationLikelihood": CrossCorrelationLikelihood}, "theory": { "camb": None, "ccl": {"external": CCL, "nonlinear": False} }, "debug": False, "stop_at_error": True} model = get_model(info) loglikes, derived = model.loglikes() assert np.isclose(loglikes[0], 88.2, atol = .2, rtol = 0.)
def test_cobaya(self): """Test the Cobaya interface to the SPTPol likelihood.""" from cobaya.model import get_model info = { "debug": True, "theory": { "camb": { "extra_args": { "lens_potential_accuracy": 1 } } }, "params": { **cosmo_params, **fg_params }, "modules": packages_path, } for use_cl, expected_chi2 in { "teee": 162.98103875445057, "te": 74.72963434194682, "ee": 76.80106189735758, }.items(): print("use_cl", use_cl) info["likelihood"] = {f"sptpol_2017.{use_cl.upper()}": None} model = get_model(info) chi2 = -2 * model.loglike({})[0] self.assertAlmostEqual(chi2, expected_chi2, 3)
def test_cobaya(self): """Test the Cobaya interface to the SPT3G likelihood.""" from cobaya.model import get_model info = { "debug": True, "likelihood": { "spt3g_2020.TEEE": None }, "theory": { "camb": { "extra_args": { "lens_potential_accuracy": 1 } } }, "params": { **cosmo_params, **fg_params }, "packages_path": packages_path, } model = get_model(info) chi2 = -2 * model.loglike({})[0] self.assertAlmostEqual(chi2, 1143.0310254786946, 2)
def initiate_model(info_text): info = yaml_load(info_txt) info['packages_path'] = '/home/moon/mniemeyer/cobaya_modules' model = get_model(info) point = dict( zip(model.parameterization.sampled_params(), model.prior.sample(ignore_external=True)[0])) return model, point
def test_bbn_likelihood(packages_path): packages_path = process_packages_path(packages_path) load_module("camb", path=os.path.join(packages_path, "code", "CAMB")) from camb.bbn import BBN_table_interpolator BBN_likelihood.bbn = BBN_table_interpolator(bbn_table) info_error['packages_path'] = packages_path model = get_model(info_error) assert np.allclose(model.loglikes({'YHe': 0.246})[0], [0.246, -0.84340], rtol=1e-4), \ "Failed BBN likelihood with %s" % info_error # second case, BBN likelihood has to be calculated before CAMB BBN_with_theory_errors.bbn = BBN_likelihood.bbn info_error2['packages_path'] = packages_path model = get_model(info_error2) assert np.allclose(model.loglikes({'BBN_delta': 1.0})[0], [0.24594834, -0.5], rtol=1e-4)
def test_cobaya(self): from cobaya.model import get_model info = { 'likelihood': { 'plancklensing.PlanckLensing': None }, 'theory': { 'camb': { "extra_args": { "lens_potential_accuracy": 1 } } }, 'params': camb_params, 'debug': True } model = get_model(info) chi2 = -2 * model.loglikes({'A_planck': 1.0})[0] self.assertAlmostEqual(chi2[0], 8.734, 1) for name in [ 'plancklensing.PlanckLensingMarged', 'plancklensing.plancklensing.PlanckLensingMarged' ]: info = { 'likelihood': { name: None }, 'theory': { 'camb': { "extra_args": { "lens_potential_accuracy": 1 }, 'stop_at_error': True } }, 'params': camb_params, 'stop_at_error': True } model = get_model(info) chi2 = -2 * model.loglikes({})[0] self.assertAlmostEqual(chi2[0], 8.765, 1)
def test_cobaya(self): for mode, chi2 in chi2s.items(): info = { "debug": True, "likelihood": {"planck_2020_lollipop.{}".format(mode): None}, "theory": {"camb": {"extra_args": {"lens_potential_accuracy": 1}}}, "params": cosmo_params, "modules": packages_path, } from cobaya.model import get_model model = get_model(info) self.assertLess( abs(-2 * model.loglikes({})[0][0] - chi2), 1)
def _get_fiducial_Cls(self): info_fiducial = { "params": self.fiducial_params, "likelihood": {"soliket.utils.OneWithCls": {"lmax": self.theory_lmax}}, "theory": {"camb": {"extra_args": {"kmax": 0.9}}}, # "modules": modules_path, } model_fiducial = get_model(info_fiducial) model_fiducial.logposterior({}) Cls = model_fiducial.provider.get_Cl(ell_factor=False) return Cls
def test_cobaya(self): info = dict( debug=True, likelihood={"spt_hiell_2020.TT": None}, theory=dict(camb={"extra_args": {"lens_potential_accuracy": 1}}), params=cosmo_params, modules=packages_path, ) from cobaya.model import get_model model = get_model(info) my_spt = model.likelihood["spt_hiell_2020.TT"] chi2 = -2 * (model.loglikes(nuisance_params)[0]) self.assertAlmostEqual(chi2[0], 1289.6505690210145, 3)
def test_clusters(): fiducial_params = { "ombh2": 0.02225, "omch2": 0.1198, "H0": 67.3, "tau": 0.06, "As": 2.2e-9, "ns": 0.96, "mnu": 0.06, "nnu": 3.046, } info_fiducial = { "params": fiducial_params, "likelihood": { "soliket.ClusterLikelihood": { "stop_at_error": True } }, "theory": { "camb": { "extra_args": { "accurate_massive_neutrino_transfers": True, "num_massive_neutrinos": 1, "redshifts": np.linspace(0, 2, 41), "nonlinear": False, "kmax": 10.0, "dark_energy_model": "ppf", } }, "soliket.CCL": { "stop_at_error": True }, }, } from cobaya.model import get_model model_fiducial = get_model(info_fiducial) # import pdb # pdb.set_trace() lnl = model_fiducial.loglikes({})[0] assert np.isfinite(lnl) like = model_fiducial.likelihood["soliket.ClusterLikelihood"] assert like._get_n_expected() > 40
def get_demo_lensing_model(theory): if theory == "camb": info_yaml = r""" likelihood: soliket.LensingLiteLikelihood: stop_at_error: True theory: camb: extra_args: lens_potential_accuracy: 1 params: ns: prior: min: 0.8 max: 1.2 H0: prior: min: 40 max: 100 """ elif theory == "classy": info_yaml = r""" likelihood: soliket.LensingLiteLikelihood: stop_at_error: True theory: classy: extra_args: output: lCl, tCl path: global params: n_s: prior: min: 0.8 max: 1.2 H0: prior: min: 40 max: 100 """ info = yaml_load(info_yaml) model = get_model(info) return model
def _get_model(packages_path, likelihood_info): info = { 'params': params, 'likelihood': { 'test_likelihood': likelihood_info }, 'theory': { 'camb': { 'stop_at_error': True, 'extra_args': { 'num_massive_neutrinos': 1 } } }, 'packages_path': process_packages_path(packages_path) } return get_model(info)
def test_cobaya(self): info = { "likelihood": { "mflike.MFLike": { "input_file": pre + "00000.fits", "cov_Bbl_file": pre + "w_covar_and_Bbl.fits", } }, "theory": {"camb": {"extra_args": {"lens_potential_accuracy": 1}}}, "params": cosmo_params, "packages_path": packages_path, } from cobaya.model import get_model model = get_model(info) my_mflike = model.likelihood["mflike.MFLike"] chi2 = -2 * (model.loglikes(nuisance_params)[0] - my_mflike.logp_const) self.assertAlmostEqual(chi2[0], chi2s["tt-te-et-ee"], 2)
def test_pk_binning(packages_path): # reproduce power law by sending in spline point values # has to be fine sampling to get to 1e-3 precision in test. nbins = 40 tau = 0.05 k_min_bin = -5.5 k_max_bin = 2 info = { 'packages_path': process_packages_path(packages_path), 'likelihood': { 'cmb': Pklike }, 'theory': { 'camb': { "external_primordial_pk": True }, 'my_pk': { "external": BinnedPk, 'nbins': nbins, 'k_min_bin': k_min_bin, 'k_max_bin': k_max_bin } }, 'params': { "ombh2": 0.022274, "omch2": 0.11913, "cosmomc_theta": 0.01040867, "tau": tau, "nnu": 3.046 }, 'stop_at_error': True, 'debug': debug } scale = 1e-9 ks = np.logspace(k_min_bin, k_max_bin, nbins) def pk_test(k): return testAs * (k / 0.05)**(testns - 1) / scale * np.exp(-2 * tau) pars = {'b%s' % (b + 1): pk_test(ks[b]) for b in range(nbins)} model = get_model(info) model.loglikes(pars)
def test_dependencies(packages_path): info['packages_path'] = process_packages_path(packages_path) theories = [('A', A), ('B', B)] _test_loglike(theories) _test_loglike([('A', A), ('B', B2)]) info['params']['Bderived'] = {'derived': True} info['theory'] = dict(theories) model = get_model(info) assert model.loglikes({})[1] == [10], "failed" info['params'].pop('Bderived') with pytest.raises(LoggedError) as e: _test_loglike([('A', A2), ('B', B)]) assert "Circular dependency" in str(e.value) _test_loglike([('A', {'external': A}), ('B', B2)]) with pytest.raises(LoggedError) as e: _test_loglike([('A', A), ('B', B2), ('C', C)]) assert "Bout is provided by more than one component" in str(e.value) _test_loglike([('A', A), ('B', B2), ('C', { 'external': C, 'provides': 'Bout' })]) _test_loglike([('A', A), ('B', { 'external': B2, 'provides': ['Bout'] }), ('C', { 'external': C })]) with pytest.raises(LoggedError) as e: _test_loglike([('A', A), ('B', { 'external': B2, 'provides': ['Bout'] }), ('C', { 'external': C, 'provides': ['Bout'] })]) assert "more than one component provides Bout" in str(e.value)
def test_cobaya(self): info = { "likelihood": { "mflike.MFLike": { "sim_id": 0 } }, "theory": { "camb": { "extra_args": { "lens_potential_accuracy": 1 } } }, "params": cosmo_params, "modules": modules_path } from cobaya.model import get_model model = get_model(info) chi2 = -2 * model.loglikes(nuisance_params)[0] self.assertAlmostEqual(chi2[0], chi2s["tt-te-ee"], 3)
def test_cobaya(self): from cobaya.yaml import yaml_load from cobaya.model import get_model last_bib = None for name in [ 'test_package.TestLike', 'test_package.test_like.TestLike', 'test_package.sub_module.test_like2.TestLike2', 'test_package.sub_module.test_like2' ]: info_yaml = r""" likelihood: %s: params: H0: 72 """ % name info = yaml_load(info_yaml) model = get_model(info) self.assertAlmostEqual(-2 * model.loglikes({})[0][0], 3.614504, 4) bib = model.likelihood[name].get_bibtex() self.assertTrue('Lewis' in bib if last_bib is None else bib == last_bib)
def get_theory_cls(setup, lmax): # Get simulation parameters simu = setup["simulation"] cosmo = simu["cosmo. parameters"] # CAMB use As if "logA" in cosmo: cosmo["As"] = 1e-10 * np.exp(cosmo["logA"]) del cosmo["logA"] # Get cobaya setup from copy import deepcopy info = deepcopy(setup["cobaya"]) info["params"] = cosmo # Fake likelihood so far info["likelihood"] = {"one": None} from cobaya.model import get_model model = get_model(info) model.likelihood.theory.needs(cl={"tt": lmax}) model.logposterior({}) # parameters are fixed Dls = model.likelihood.theory.get_cl(ell_factor=True) return Dls["tt"]
def test_cobaya(): """Test the Cobaya interface to the ACT likelihood.""" from cobaya.yaml import yaml_load from cobaya.model import get_model info_yaml = r""" likelihood: pyactlike.ACTPol_lite_DR4: components: - tt - te - ee lmax: 6000 theory: camb: extra_args: lens_potential_accuracy: 1 params: ns: prior: min: 0.8 max: 1.2 H0: prior: min: 40 max: 100 yp2: prior: min: 0.5 max: 1.5 """ info = yaml_load(info_yaml) model = get_model(info) assert np.isfinite(model.loglike({"ns": 1.0, "H0": 70, "yp2": 1.0})[0])
def test_primordial_pk(packages_path): packages_path = process_packages_path(packages_path) info_pk['packages_path'] = packages_path model = get_model(info_pk) model.loglikes({'testAs': testAs, 'testns': testns})
from cobaya.model import get_model model = get_model(info)
def test_parameterization_dependencies(): class TestLike(Likelihood): params = {'a': None, 'b': None} def get_can_provide_params(self): return ['D'] def logp(self, **params_values): a = params_values['a'] b = params_values['b'] params_values['_derived']['D'] = -7 return a + 100 * b info_yaml = r""" params: aa: prior: [2,4] bb: prior: [0,1] ref: [0.5, 0.1] c: value: "lambda aa, bb: aa+bb" a: value: "lambda c, aa: c*aa" b: 1 D: E: derived: "lambda D,c,a,aa: D*c/a+aa" prior: pr: "lambda bb, a: bb-10*a" stop_at_error: True """ test_info = yaml_load(info_yaml) test_info["likelihood"] = {"Like": TestLike} model = get_model(test_info) assert np.isclose(model.loglike({'bb': 0.5, 'aa': 2})[0], 105) assert np.isclose( model.logposterior({ 'bb': 0.5, 'aa': 2 }).logpriors[1], -49.5) test_info['params']['b'] = {'value': 'lambda a, c, bb: a*c*bb'} like, derived = get_model(test_info).loglike({'bb': 0.5, 'aa': 2}) assert np.isclose(like, 630) assert derived == [2.5, 5.0, 6.25, -7, -1.5] assert np.isclose( model.logposterior({ 'bb': 0.5, 'aa': 2 }).logpriors[1], -49.5) test_info['params']['aa'] = 2 test_info['params']['bb'] = 0.5 like, derived = get_model(test_info).loglike() assert np.isclose(like, 630) assert derived == [2.5, 5.0, 6.25, -7, -1.5] test_info["prior"]["on_derived"] = "lambda f: 5*f" with pytest.raises(LoggedError) as e: get_model(test_info) assert "found and don't have a default value either" in str(e.value) # currently don't allow priors on derived parameters test_info["prior"]["on_derived"] = "lambda E: 5*E" with pytest.raises(LoggedError) as e: get_model(test_info) assert "that are output derived parameters" in str(e.value)
def _test_loglike(theories): for th in theories, theories[::-1]: info['theory'] = dict(th) model = get_model(info) assert model.loglikes({})[0] == 8, "test loglike failed for %s" % th
def get_demo_lensing_model(theory): if theory == "camb": info_yaml = r""" likelihood: soliket.LensingLikelihood: stop_at_error: True theory: camb: extra_args: lens_potential_accuracy: 1 params: ns: prior: min: 0.8 max: 1.2 H0: prior: min: 40 max: 100 """ elif theory == "classy": info_yaml = r""" likelihood: soliket.LensingLikelihood: stop_at_error: True theory: classy: extra_args: output: lCl, tCl path: global params: n_s: prior: min: 0.8 max: 1.2 H0: prior: min: 40 max: 100 """ info = yaml_load(info_yaml) from cobaya.install import install install(info, path=packages_path, skip_global=True) test_point = {} for par, pdict in info["params"].items(): if not isinstance(pdict, dict): continue if "ref" in pdict: try: value = float(pdict["ref"]) except TypeError: value = (pdict["ref"]["min"] + pdict["ref"]["max"]) / 2 test_point[par] = value elif "prior" in pdict: value = (pdict["prior"]["min"] + pdict["prior"]["max"]) / 2 test_point[par] = value model = get_model(info) return model, test_point
def _test_loglike2(theories): for th in theories, theories[::-1]: info2['theory'] = dict(th) model = get_model(info2) assert model.loglike( )[0] == 20., "fail conditional dependency for %s" % th
'n_s': 9.660360599e-01, 'tau_reio': 5.142494234e-02 } fiducial_params_extra = { 'recombination': 'recfast', 'non linear': 'halofit' } fiducial_params_full = fiducial_params.copy() fiducial_params_full.update(fiducial_params_extra) info_fiducial = { 'params': fiducial_params, 'likelihood': {'cobaya_mock_cmb.MockSO': {'python_path': '.'}, 'cobaya_mock_cmb.MockSOBaseline': {'python_path': '.'}, 'cobaya_mock_cmb.MockSOGoal': {'python_path': '.'}, 'cobaya_mock_cmb.MockCMBS4': {'python_path': '.'}, 'cobaya_mock_cmb.MockCMBS4sens0': {'python_path': '.'}, 'cobaya_mock_cmb.MockPlanck': {'python_path': '.'}}, 'theory': {'classy': {"extra_args": fiducial_params_extra}}} model_fiducial = get_model(info_fiducial) model_fiducial.logposterior({}) Cls = model_fiducial.provider.get_Cl(units="muK2") for likelihood in model_fiducial.likelihood.values(): likelihood.create_fid_values(Cls, fiducial_params_full, override=True)
def get_demo_xcorr_model(theory): if theory == "camb": info_yaml = r""" likelihood: soliket.XcorrLikelihood: stop_at_error: True datapath: soliket/tests/data/unwise_g-so_kappa.sim.sacc.fits k_tracer_name: ck_so gc_tracer_name: gc_unwise theory: camb: extra_args: lens_potential_accuracy: 1 params: tau: 0.05 mnu: 0.0 nnu: 3.046 b1: prior: min: 0. max: 10. ref: min: 1. max: 4. proposal: 0.1 s1: prior: min: 0.1 max: 1.0 proposal: 0.1 """ elif theory == "classy": info_yaml = r""" likelihood: soliket.XcorrLikelihood: stop_at_error: True datapath: soliket/tests/data/unwise_g-so_kappa.sim.sacc.fits k_tracer_name: ck_so gc_tracer_name: gc_unwise theory: classy: extra_args: output: lCl, tCl path: global params: b1: prior: min: 0. max: 10. ref: min: 1. max: 4. proposal: 0.1 s1: prior: min: 0.1 max: 1.0 proposal: 0.1 """ info = yaml_load(info_yaml) model = get_model(info) return model