Ejemplo n.º 1
0
def _test_overhead_timing():
    # prints timing for simple Gaussian vanilla mcmc
    import pstats
    from cProfile import Profile
    from io import StringIO
    # noinspection PyUnresolvedReferences
    from cobaya.samplers.mcmc import proposal  # one-time numba compile out of profiling

    LikeTest = _make_gaussian_like(15)
    info = {
        'likelihood': {
            'like': LikeTest
        },
        'debug': False,
        'sampler': {
            'mcmc': {
                'max_samples': 1000,
                'burn_in': 0,
                "learn_proposal": False,
                "Rminus1_stop": 0.0001
            }
        }
    }
    prof = Profile()
    prof.enable()
    run(info)
    prof.disable()
    s = StringIO()
    ps = pstats.Stats(prof, stream=s)
    ps.strip_dirs()
    ps.sort_stats('time')
    ps.print_stats(10)
    ps.sort_stats('cumtime')
    ps.print_stats(10)
    print(s.getvalue())
Ejemplo n.º 2
0
def test_prior_inherit_samegiven_differentdefinition():
    test_info = deepcopy(test_info_common)
    likname = list(test_info_common["likelihood"])[0]
    default_info = get_default_info(likname, "likelihood")
    name, prior = deepcopy(default_info["prior"]).popitem()
    test_info["prior"] = {name: "this is not a prior"}
    with pytest.raises(LoggedError):
        run(test_info)
Ejemplo n.º 3
0
def test_polychord_resume(packages_path, skip_not_installed, tmpdir):
    """
    Tests correct resuming of a run, especially conserving the original blocking.

    To test preservation of the oversampling+blocking, we try to confuse the sampler by
    requesting speed measuring at resuming, and providing speeds very different from the
    real ones.
    """
    nlive = 10
    max_ndead = 2 * nlive
    dead_points = []

    def callback(sampler):
        nonlocal dead_points
        dead_points = sampler.dead[["a", "b"]].values.copy()

    info = {
        "likelihood": {
            "A": {
                "external": "lambda a: stats.norm.logpdf(a)",
                "speed": 1
            },
            "B": {
                "external": "lambda b: stats.norm.logpdf(b)",
                "speed": 0.01
            }
        },
        "params": {
            "a": {
                "prior": {
                    "min": 0,
                    "max": 1
                }
            },
            "b": {
                "prior": {
                    "min": 0,
                    "max": 1
                }
            }
        },
        "sampler": {
            "polychord": {
                "measure_speeds": True,
                "nlive": nlive,
                "max_ndead": max_ndead,
                "callback_function": callback,
            }
        },
        "output": str(tmpdir)
    }
    install_test_wrapper(skip_not_installed, run, info)
    old_dead_points = dead_points.copy()
    info["resume"] = True
    run(info)
    assert np.allclose(old_dead_points, dead_points)
Ejemplo n.º 4
0
def test_post_params():
    # Tests:
    # - added simple dynamical derived parameter "a+b"
    # - added dynamical derived parameter that depends on *new* chi2__target
    # - added new fixed input "c" + new derived-from-external-function "cprime"
    # Generate original chain
    info = {
        "params": info_params, "sampler": info_sampler_dummy,
        "likelihood": {"gaussian": sampled_pdf}}
    updated_info_gaussian, sampler_gaussian = run(info)
    products_gaussian = sampler_gaussian.products()
    info_post = {
        "post": {"suffix": "foo",
                 "remove": {"params": {"a_plus_b": None}},
                 "add": {
                     "likelihood": {
                         "target": {"external": target_pdf, "output_params": ["cprime"]}},
                     "params": {
                         "c": 1.234,
                         "a_minus_b": {"derived": "lambda a,b: a-b"},
                         "my_chi2__target": {
                             "derived": "lambda chi2__target: chi2__target"},
                         "cprime": None}}}}
    info_post.update(updated_info_gaussian)
    products = post(info_post, products_gaussian["sample"]).products
    # Compare parameters
    assert np.allclose(
        products["sample"]["a"] - products["sample"]["b"],
        products["sample"]["a_minus_b"])
    assert np.allclose(
        products["sample"]["cprime"], info_post["post"]["add"]["params"]["c"])
    assert np.allclose(
        products["sample"]["my_chi2__target"], products["sample"]["chi2__target"])
Ejemplo n.º 5
0
def test_post_prior(tmpdir):
    # Generate original chain
    info: InputDict = {
        "output": os.path.join(tmpdir, "gaussian"), "force": True,
        "params": info_params, "sampler": info_sampler,
        "likelihood": {"one": None}, "prior": {"gaussian": sampled_pdf}}
    info_post: InputDict = {
        "output": info["output"], "force": True,
        "post": {"suffix": "foo", 'skip': 0.1,
                 "remove": {"prior": {"gaussian": None}},
                 "add": {"prior": {"target": target_pdf_prior}}}}
    _, sampler = run(info)
    if mpi.is_main_process():
        mcsamples_in = loadMCSamples(info["output"], settings={'ignore_rows': 0.1})
        target_mean, target_cov = mpi.share(_get_targets(mcsamples_in))
    else:
        target_mean, target_cov = mpi.share()

    for mem in [False, True]:
        post(info_post, sample=sampler.products()["sample"] if mem else None)
        # Load with GetDist and compare
        if mpi.is_main_process():
            mcsamples = loadMCSamples(
                info_post["output"] + _post_ + info_post["post"]["suffix"])
            new_mean = mcsamples.mean(["a", "b"])
            new_cov = mcsamples.getCovMat().matrix
            mpi.share((new_mean, new_cov))
        else:
            new_mean, new_cov = mpi.share()
        assert np.allclose(new_mean, target_mean)
        assert np.allclose(new_cov, target_cov)
Ejemplo n.º 6
0
def body_of_test(packages_path, like_name=None, like_info=None):
    info = {
        _packages_path: process_packages_path(packages_path),
        kinds.sampler: {
            "evaluate": None
        }
    }
    if like_name:
        info[kinds.likelihood] = {like_name: None}
    elif like_info:
        info[kinds.likelihood] = like_info
        like_name = list(like_info)[0]
    info[_params] = {"H0": fiducial_H0}
    updated_info, sampler = run(info)
    products = sampler.products()
    # The default values for .get are for the _docs_ test
    mean = updated_info[kinds.likelihood][like_name].get(
        "H0_mean", fiducial_H0)
    std = updated_info[kinds.likelihood][like_name].get(
        "H0_std", fiducial_H0_std)
    reference_value = -2 * norm.logpdf(fiducial_H0, loc=mean, scale=std)
    computed_value = (
        products["sample"]["chi2__" +
                           list(info[kinds.likelihood])[0]].values[0])
    assert np.allclose(computed_value, reference_value)
Ejemplo n.º 7
0
def test_minimize_gaussian():
    # parameters
    dimension = 3
    n_modes = 1
    # MPI
    comm = MPI.COMM_WORLD
    rank = comm.Get_rank()
    # Info of likelihood and prior
    ranges = np.array([[0, 1] for _ in range(dimension)])
    prefix = "a_"
    info = info_random_gaussian_mixture(ranges=ranges,
                                        n_modes=n_modes,
                                        input_params_prefix=prefix,
                                        derived=True)
    mean = info[kinds.likelihood]["gaussian_mixture"]["means"][0]
    cov = info[kinds.likelihood]["gaussian_mixture"]["covs"][0]
    maxloglik = multivariate_normal.logpdf(mean, mean=mean, cov=cov)
    if rank == 0:
        print("Maximum of the gaussian mode to be found:")
        print(mean)
    info[kinds.sampler] = {"minimize": {"ignore_prior": True}}
    info["debug"] = False
    info["debug_file"] = None
    #    info["output_prefix"] = "./minigauss/"
    from cobaya.run import run
    updated_info, sampler = run(info)
    products = sampler.products()
    # Done! --> Tests
    if rank == 0:
        rel_error = abs(maxloglik -
                        -products["minimum"]["minuslogpost"]) / abs(maxloglik)
        assert rel_error < 0.001
Ejemplo n.º 8
0
def body_of_test(modules, lik_name, theory):
    if theory in skip_theories:
        print('Skipping test with %s' % theory)
        return
    info = {
        _path_install: process_modules_path(modules),
        _theory: {
            theory: None
        },
        _sampler: {
            "evaluate": None
        }
    }
    if lik_name.startswith("lambda"):
        line = copy(lik_name)
        lik_name = "whatever"
        info[_likelihood] = {lik_name: line}
    else:
        info[_likelihood] = {lik_name: None}
    info[_params] = {"H0": fiducial_H0}
    updated_info, products = run(info)
    # The default values for .get are for the _docs_ test
    mean = updated_info[_likelihood][lik_name].get("H0", fiducial_H0)
    std = updated_info[_likelihood][lik_name].get("H0_std", 1)
    reference_value = -2 * norm.logpdf(fiducial_H0, loc=mean, scale=std)
    computed_value = (
        products["sample"]["chi2__" +
                           list(info[_likelihood].keys())[0]].values[0])
    assert np.allclose(computed_value, reference_value)
Ejemplo n.º 9
0
def test_parameterization():
    updated_info, sampler = run(info)
    products = sampler.products()
    sample = products["sample"]
    from getdist.mcsamples import MCSamplesFromCobaya
    gdsample = MCSamplesFromCobaya(updated_info, products["sample"])
    for i, point in sample:
        a = info[_params]["a"]
        b = get_external_function(info[_params]["b"])(a, point["bprime"])
        c = get_external_function(info[_params]["c"])(a, point["cprime"])
        e = get_external_function(e_func)(b)
        f = get_external_function(f_func)(b)
        g = get_external_function(info[_params]["g"]["derived"])(x_func(
            point["c"]))
        h = get_external_function(info[_params]["h"])(info[_params]["i"])
        j = get_external_function(info[_params]["j"])(b)
        k = get_external_function(info[_params]["k"]["derived"])(f)
        assert np.allclose(point[["b", "c", "e", "f", "g", "h", "j", "k"]],
                           [b, c, e, f, g, h, j, k])
        # Test for GetDist too (except fixed ones, ignored by GetDist)
        bcefffg_getdist = [
            gdsample.samples[i][gdsample.paramNames.list().index(p)]
            for p in ["b", "c", "e", "f", "g", "j", "k"]
        ]
        assert np.allclose(bcefffg_getdist, [b, c, e, f, g, j, k])
Ejemplo n.º 10
0
def test_prior_inherit_samegiven_differentdefinition():
    test_info = deepcopy(test_info_common)
    likname = list(test_info_common[kinds.likelihood])[0]
    default_info = get_default_info(likname, kinds.likelihood)
    name, prior = deepcopy(default_info[_prior]).popitem()
    test_info[_prior] = {name: "this is not a prior"}
    with pytest.raises(LoggedError):
        updated_info, products = run(test_info)
Ejemplo n.º 11
0
def test_prior_inherit_samegiven():
    test_info = deepcopy(test_info_common)
    likname = list(test_info_common[_likelihood].keys())[0]
    default_info = get_default_info(likname, _likelihood)
    name, prior = deepcopy(default_info[_prior]).popitem()
    test_info[_prior] = {name: prior}
    updated_info, products = run(test_info)
    assert updated_info[_prior] == default_info[_prior]
Ejemplo n.º 12
0
def test_prior_inherit_differentgiven():
    test_info = deepcopy(test_info_common)
    test_info[_prior] = {"third": "lambda a1: 1"}
    updated_info, products = run(test_info)
    likname = list(test_info_common[_likelihood].keys())[0]
    default_info = get_default_info(likname, _likelihood)
    default_info[_prior].update(test_info[_prior])
    assert updated_info[_prior] == default_info[_prior]
Ejemplo n.º 13
0
def test_prior_inherit_samegiven():
    test_info = deepcopy(test_info_common)
    likname = list(test_info_common["likelihood"])[0]
    default_info = get_default_info(likname, "likelihood")
    name, prior = deepcopy(default_info["prior"]).popitem()
    test_info["prior"] = {name: prior}
    updated_info, _ = run(test_info)
    assert updated_info["prior"] == default_info["prior"]
Ejemplo n.º 14
0
def test_prior_inherit_samegiven_differentdefinition():
    test_info = deepcopy(test_info_common)
    likname = list(test_info_common[_likelihood].keys())[0]
    default_info = get_default_info(likname, _likelihood)
    name, prior = deepcopy(default_info[_prior]).popitem()
    test_info[_prior] = {name: "this is not a prior"}
    with pytest.raises(HandledException):
        updated_info, products = run(test_info)
Ejemplo n.º 15
0
def test_prior_inherit_differentgiven():
    test_info = deepcopy(test_info_common)
    test_info["prior"] = {"third": "lambda a1: 1"}
    updated_info, _ = run(test_info)
    likname = list(test_info_common["likelihood"])[0]
    default_info = get_default_info(likname, "likelihood")
    default_info["prior"].update(test_info["prior"])
    assert updated_info["prior"] == default_info["prior"]
Ejemplo n.º 16
0
def test_mcmc_fastdragging():
    """
    In 2d, tests a slow parameter "a0" vs a fast one "a1".
    Both share a gaussian lik with a high correlation.
    "a0" is made slower by a mock unit likelihood.
    Both likelihoods have mock derived paramters,
    just for testing that they are tracked correctly along the dragging.
    """
    info = {
        _params:
        odict([["a0", {
            "prior": {
                "min": -4,
                "max": 4
            },
            "ref": 0
        }], ["a1", {
            "prior": {
                "min": -4,
                "max": 4
            },
            "ref": 0
        }], ["b0", None], ["b1", None]]),
        _likelihood: {
            "slow": {
                "external":
                "import_module('test_mcmc_fastdragging').loglik_slow",
                "speed": 0.25
            },
            "correlation": {
                "external":
                "import_module('test_mcmc_fastdragging').loglik_correlation",
                "speed": 1
            },
            "derived": {
                "external":
                "import_module('test_mcmc_fastdragging').loglik_derived",
                "speed": 1
            }
        },
        _sampler: {
            "mcmc": {
                "max_samples": np.inf,
                "burn_in": 0,
                "Rminus1_stop": 0.01,
                "Rminus1_cl_stop": 0.01,
                "learn_proposal": True,
                "covmat": np.eye(2),
                "covmat_params": ["a0", "a1"],
                "drag_nfast_times": 5,
                "max_speed_slow": 0.5
            }
        }
    }
    info["debug"] = False
    info["debug_file"] = None
    info["output_prefix"] = "chains_test/"
    updated_info, products = run(info)
Ejemplo n.º 17
0
def body_of_test(dim, tmpdir=None):
    mindim = 4
    assert dim > mindim, "Needs dimension>%d for the test." % mindim
    initial_random_covmat = random_cov(dim * [[0, 1]])
    i_s = list(range(dim))
    shuffle(i_s)
    n_altered = int(dim / 4)
    i_proposal = i_s[:n_altered]
    i_ref = i_s[n_altered:2 * n_altered]
    i_prior = i_s[2 * n_altered:3 * n_altered]
    removed = list(chain(*(i_proposal, i_ref, i_prior)))
    i_covmat = [i for i in range(dim) if i not in removed]
    for i in removed:
        diag = initial_random_covmat[i, i]
        initial_random_covmat[:, i] = 0
        initial_random_covmat[i, :] = 0
        initial_random_covmat[i, i] = diag
    # Prepare info, including refs, priors and reduced covmat
    prefix = "a_"
    input_order = list(range(dim))
    shuffle(input_order)
    info = {_likelihood: {"one": {"prefix": ""}}, _params: odict()}
    for i in input_order:
        p = prefix + str(i)
        info[_params][p] = {_prior: {_p_dist: "norm", "loc": 0, "scale": 1000}}
        sigma = np.sqrt(initial_random_covmat[i, i])
        if i in i_proposal:
            info[_params][p][_p_proposal] = sigma
        elif i in i_ref:
            info[_params][prefix + str(i)][_p_ref] = {
                _p_dist: "norm",
                "scale": sigma
            }
        elif i in i_prior:
            info[_params][prefix + str(i)][_prior]["scale"] = sigma
    reduced_covmat = initial_random_covmat[np.ix_(i_covmat, i_covmat)]
    reduced_covmat_params = [prefix + str(i) for i in i_covmat]
    info[_sampler] = {"mcmc": {}}
    if tmpdir:
        filename = os.path.join(str(tmpdir), "mycovmat.dat")
        header = " ".join(reduced_covmat_params)
        np.savetxt(filename, reduced_covmat, header=header)
        info[_sampler]["mcmc"]["covmat"] = str(filename)
    else:
        info[_sampler]["mcmc"]["covmat_params"] = reduced_covmat_params
        info[_sampler]["mcmc"]["covmat"] = reduced_covmat
    to_compare = initial_random_covmat[np.ix_(input_order, input_order)]

    def callback(sampler):
        assert np.allclose(to_compare, sampler.proposer.get_covariance())

    info[_sampler]["mcmc"].update({
        "callback_function": callback,
        "callback_every": 1,
        "max_samples": 1,
        "burn_in": 0
    })
    updated_info, products = run(info)
Ejemplo n.º 18
0
def test_external():

    os.chdir(base_dir)
    mapping_proj = ['ell_0', 'ell_2', 'ell_4']
    make_data_covariance(data_fn=data_fn,
                         covariance_fn=covariance_fn,
                         mapping_proj=mapping_proj)
    info = yaml_load_file('./test_cobaya.yaml')
    updated_info, sampler = run(info)
    assert 'a' in updated_info['params']
    assert 'sample' in sampler.products()
Ejemplo n.º 19
0
def test_parametrization():
    updated_info, products = run(info)
    sample = products["sample"]
    for i, point in sample:
        a = info[_params]["a"]
        b = get_external_function(info[_params]["b"])(a, point["bprime"])
        d = get_external_function(d_func)(a)
        e = get_external_function(e_func)(b)
        f = get_external_function(info[_params]["f"]["derived"])(x_func(
            point["c"]))
        assert np.allclose(point[[_derived_pre + p for p in ["d", "e", "f"]]],
                           [d, e, f])
Ejemplo n.º 20
0
def test_post_prior(tmpdir):
    # Generate original chain
    info = {
        _output_prefix: os.path.join(str(tmpdir), "gaussian"),
        _force: True,
        _params: info_params,
        kinds.sampler: info_sampler,
        kinds.likelihood: {
            "one": None
        },
        _prior: {
            "gaussian": sampled_pdf
        }
    }
    run(info)
    info_post = {
        _output_prefix: info[_output_prefix],
        _force: True,
        _post: {
            _post_suffix: "foo",
            _post_remove: {
                _prior: {
                    "gaussian": None
                }
            },
            _post_add: {
                _prior: {
                    "target": target_pdf_prior
                }
            }
        }
    }
    post(info_post)
    # Load with GetDist and compare
    mcsamples = loadMCSamples(info_post[_output_prefix] + _post_ +
                              info_post[_post][_post_suffix])
    new_mean = mcsamples.mean(["a", "b"])
    new_cov = mcsamples.getCovMat().matrix
    assert abs(KL_norm(target["mean"], target["cov"], new_mean,
                       new_cov)) < 0.02
Ejemplo n.º 21
0
def test_post_prior(tmpdir):
    # Generate original chain
    info = {
        "output": os.path.join(str(tmpdir), "gaussian"),
        "force": True,
        "params": info_params,
        "sampler": info_sampler,
        "likelihood": {
            "one": None
        },
        "prior": {
            "gaussian": sampled_pdf
        }
    }
    run(info)
    info_post = {
        "output": info["output"],
        "force": True,
        "post": {
            "suffix": "foo",
            "remove": {
                "prior": {
                    "gaussian": None
                }
            },
            "add": {
                "prior": {
                    "target": target_pdf
                }
            }
        }
    }
    post(info_post)
    # Load with GetDist and compare
    mcsamples = loadMCSamples(info_post["output"] + "_post_" +
                              info_post["post"]["suffix"])
    new_mean = mcsamples.mean(["a", "b"])
    new_cov = mcsamples.getCovMat().matrix
    assert abs(KL_norm(target["mean"], target["cov"], new_mean,
                       new_cov)) < 0.02
Ejemplo n.º 22
0
def test_inherit_label_and_bounds():
    test_info = deepcopy(test_info_common)
    likname = list(test_info_common[_likelihood].keys())[0]
    default_info_params = get_default_info(likname, _likelihood)[_params]
    test_info[_params] = deepcopy(default_info_params)
    test_info[_params]["a1"].pop(_p_label, None)
    # First, change one limit (so no inheritance at all)
    test_info[_params]["b1"].pop("min")
    new_max = 2
    test_info[_params]["b1"]["max"] = new_max
    updated_info, products = run(test_info)
    assert updated_info[_params]["a1"] == default_info_params["a1"]
    assert updated_info[_params]["b1"].get("min") is None
    assert updated_info[_params]["b1"]["max"] == new_max
    # Second, remove limits, so they are inherited
    test_info = deepcopy(test_info_common)
    test_info[_params] = deepcopy(default_info_params)
    test_info[_params]["b1"].pop("min")
    test_info[_params]["b1"].pop("max")
    updated_info, products = run(test_info)
    assert updated_info[_params]["b1"]["min"] == default_info_params["b1"]["min"]
    assert updated_info[_params]["b1"]["max"] == default_info_params["b1"]["max"]
Ejemplo n.º 23
0
def test_post_params():
    # Tests:
    # - added simple dynamical derived parameter "a+b"
    # - added dynamical derived parameter that depends on *new* chi2__target
    # - added new fixed input "c" + new derived-from-external-function "cprime"
    # Generate original chain
    info = {
        _params: info_params,
        _sampler: info_sampler_dummy,
        _likelihood: {
            "gaussian": sampled_pdf
        }
    }
    updated_info_gaussian, products_gaussian = run(info)
    info_post = {
        _post: {
            _post_suffix: "foo",
            _post_remove: {
                _params: {
                    "a_plus_b": None
                }
            },
            _post_add: {
                _likelihood: {
                    "target": target_pdf
                },
                _params: {
                    "c": 1.234,
                    "a_minus_b": {
                        _p_derived: "lambda a,b: a-b"
                    },
                    "my_chi2__target": {
                        _p_derived: "lambda chi2__target: chi2__target"
                    },
                    "cprime": None
                }
            }
        }
    }
    info_post.update(updated_info_gaussian)
    updated_info, products = post(info_post, products_gaussian["sample"])
    # Compare parameters
    assert np.allclose(products["sample"]["a"] - products["sample"]["b"],
                       products["sample"]["a_minus_b"])
    assert np.allclose(products["sample"]["cprime"],
                       info_post[_post][_post_add][_params]["c"])
    assert np.allclose(products["sample"]["my_chi2__target"],
                       products["sample"]["chi2__target"])
Ejemplo n.º 24
0
def test_inherit_label_and_bounds():
    test_info = deepcopy(test_info_common)
    likname = list(test_info_common["likelihood"])[0]
    default_info_params = get_default_info(likname, "likelihood")["params"]
    test_info["params"] = deepcopy(default_info_params)
    test_info["params"]["a1"].pop("latex", None)
    # Remove limits, so they are inherited
    test_info = deepcopy(test_info_common)
    test_info["params"] = deepcopy(default_info_params)
    test_info["params"]["b1"].pop("min")
    test_info["params"]["b1"].pop("max")
    updated_info, _ = run(test_info)
    assert updated_info["params"]["b1"]["min"] == default_info_params["b1"][
        "min"]
    assert updated_info["params"]["b1"]["max"] == default_info_params["b1"][
        "max"]
Ejemplo n.º 25
0
def test_inherit_label_and_bounds():
    test_info = deepcopy(test_info_common)
    likname = list(test_info_common[kinds.likelihood])[0]
    default_info_params = get_default_info(likname, kinds.likelihood)[_params]
    test_info[_params] = deepcopy(default_info_params)
    test_info[_params]["a1"].pop(partag.latex, None)
    # Remove limits, so they are inherited
    test_info = deepcopy(test_info_common)
    test_info[_params] = deepcopy(default_info_params)
    test_info[_params]["b1"].pop("min")
    test_info[_params]["b1"].pop("max")
    updated_info, products = run(test_info)
    assert updated_info[_params]["b1"]["min"] == default_info_params["b1"][
        "min"]
    assert updated_info[_params]["b1"]["max"] == default_info_params["b1"][
        "max"]
Ejemplo n.º 26
0
def test_parameterization():
    updated_info, products = run(info)
    sample = products["sample"]
    from getdist.mcsamples import loadCobayaSamples
    gdsample = loadCobayaSamples(updated_info, products["sample"])
    for i, point in sample:
        a = info[_params]["a"]
        b = get_external_function(info[_params]["b"])(a, point["bprime"])
        c = get_external_function(info[_params]["c"])(a, point["cprime"])
        e = get_external_function(e_func)(b)
        f = get_external_function(f_func)(b)
        g = get_external_function(info[_params]["g"]["derived"])(x_func(
            point["c"]))
        assert np.allclose(point[["b", "c", "e", "f", "g"]], [b, c, e, f, g])
        bcefg_getdist = [
            gdsample.samples[i][gdsample.paramNames.list().index(p)]
            for p in ["b", "c", "e", "f", "g"]
        ]
        assert np.allclose(bcefg_getdist, [b, c, e, f, g])
Ejemplo n.º 27
0
def sampling(setup):
    """
    Sample CMB power spectra over cosmo. parameters using `cobaya` using either
    minimization algorithms or MCMC methods.
    """

    # Get experiment setup
    experiment = setup["experiment"]
    lmin, lmax = experiment["lmin"], experiment["lmax"]

    simu = setup["simulation"]
    Dl, cov = simu["Dl"], simu["covmat"]

    # Chi2 for CMB spectra sampling
    def chi2(_theory={"cl": {"tt": lmax}}):
        Dl_theo = _theory.get_cl(ell_factor=True)["tt"][lmin:lmax]
        chi2 = np.sum((Dl - Dl_theo)**2 / cov)
        return -0.5 * chi2

    # Chi2 for CMB spectra residuals sampling
    from beyondCV import utils
    Dl_Planck = utils.get_theory_cls(setup, lmax)[lmin:lmax]

    def chi2_residuals(_theory={"cl": {"tt": lmax}}):
        Dl_theo = _theory.get_cl(ell_factor=True)["tt"][lmin:lmax]
        Delta_Dl_obs, Delta_Dl_theo = Dl, Dl_theo - Dl_Planck
        chi2 = np.sum((Delta_Dl_obs - Delta_Dl_theo)**2 / cov)
        return -0.5 * chi2

    # Get cobaya setup
    info = setup["cobaya"]

    # Add likelihood function
    survey = experiment.get("survey")
    if survey in ["SOxSO", "SOxP", "PxP"]:
        info["likelihood"] = {"chi2": chi2}
    else:
        info["likelihood"] = {"chi2": chi2_residuals}

    from cobaya.run import run
    return run(info)
Ejemplo n.º 28
0
def body_of_test(modules, data, theory):
    assert modules, "I need a modules folder!"
    info = {
        _path_install: modules,
        _theory: {
            theory: None
        },
        _sampler: {
            "evaluate": None
        }
    }
    lik_name = "hst_" + data
    info[_likelihood] = {lik_name: None}
    fiducial_H0 = 70
    info[_params] = {"H0": fiducial_H0}
    updated_info, products = run(info)
    mean = updated_info[_likelihood][lik_name]["H0"]
    std = updated_info[_likelihood][lik_name]["H0_err"]
    reference_value = -2 * norm.logpdf(fiducial_H0, loc=mean, scale=std)
    computed_value = (
        products["sample"]["chi2__" +
                           list(info[_likelihood].keys())[0]].values[0])
    assert np.allclose(computed_value, reference_value)
Ejemplo n.º 29
0
def body_of_test(modules, data, theory):
    assert modules, "I need a modules folder!"
    info = {_path_install: modules,
            _theory: {theory: None},
            _sampler: {"evaluate": None}}
    info[_likelihood] = {"des_1yr_"+data: None}
    info[_params] = {
        _theory: {
            "H0": 68.81,
            "ombh2": 0.0468 * 0.6881 ** 2,
            "omch2": (0.295 - 0.0468) * 0.6881 ** 2 - 0.0006155,
            "YHe": 0.245341,
            "tau": 0.08,
            "As": 2.260574e-09,
            "ns": 0.9676
        }}
    if data in ["shear", "galaxy_galaxylensing"]:
        info[_params].update(yaml_load(test_params_shear)[_params])
    if data in ["clustering", "galaxy_galaxylensing"]:
        info[_params].update(yaml_load(test_params_clustering)[_params])


    # UPDATE WITH BOTH ANYWAY FOR NOW!!!!!
    info[_params].update(yaml_load(test_params_shear)[_params])
    info[_params].update(yaml_load(test_params_clustering)[_params])
    
    
    reference_value = 650.872548
    abs_tolerance = 0.1
    if theory == "classy":
        info[_params][_theory].update(baseline_cosmology_classy_extra)
        abs_tolerance += 2
        print("WE SHOULD NOT HAVE TO LOWER THE TOLERANCE THAT MUCH!!!")
    updated_info, products = run(info)
    # print products["sample"]
    computed_value = products["sample"]["chi2__"+list(info[_likelihood].keys())[0]].values[0]
    assert (computed_value-reference_value) < abs_tolerance
Ejemplo n.º 30
0
def body_of_test(dimension=1,
                 n_modes=1,
                 info_sampler={},
                 tmpdir="",
                 modules=None):
    comm = MPI.COMM_WORLD
    rank = comm.Get_rank()
    # Info of likelihood and prior
    ranges = np.array([[-1, 1] for _ in range(dimension)])
    while True:
        info = info_random_gaussian_mixture(ranges=ranges,
                                            n_modes=n_modes,
                                            prefix="a_",
                                            O_std_min=O_std_min,
                                            O_std_max=O_std_max,
                                            derived=True)
        if n_modes == 1:
            break
        means = info["likelihood"]["gaussian_mixture"]["means"]
        distances = chain(*[[np.linalg.norm(m1 - m2) for m2 in means[i + 1:]]
                            for i, m1 in enumerate(means)])
        if min(distances) >= distance_factor * O_std_max:
            break
    if rank == 0:
        print("Original mean of the gaussian mode:")
        print(info["likelihood"]["gaussian_mixture"]["means"])
        print("Original covmat of the gaussian mode:")
        print(info["likelihood"]["gaussian_mixture"]["covs"])
    info[_sampler] = info_sampler
    if list(info_sampler.keys())[0] == "mcmc":
        if "covmat" in info_sampler["mcmc"]:
            info[_sampler]["mcmc"]["covmat_params"] = (list(
                info["params"].keys())[:dimension])
    info[_debug] = False
    info[_debug_file] = None
    info[_output_prefix] = getattr(tmpdir, "realpath()", lambda: tmpdir)()
    if modules:
        info[_path_install] = process_modules_path(modules)
    # Delay to one chain to check that MPI communication of the sampler is non-blocking
    #    if rank == 1:
    #        info["likelihood"]["gaussian_mixture"]["delay"] = 0.1
    updated_info, products = run(info)
    # Done! --> Tests
    if rank == 0:
        if list(info_sampler.keys())[0] == "mcmc":
            ignore_rows = 0.5
        else:
            ignore_rows = 0
        results = loadCobayaSamples(updated_info,
                                    products["sample"],
                                    ignore_rows=ignore_rows,
                                    name_tag="sample")
        clusters = None
        if "clusters" in products:
            clusters = [
                loadCobayaSamples(updated_info,
                                  products["clusters"][i]["sample"],
                                  name_tag="cluster %d" % (i + 1))
                for i in products["clusters"]
            ]
        # Plots!
        try:
            import getdist.plots as gdplots
            from getdist.gaussian_mixtures import MixtureND
            mixture = MixtureND(
                info[_likelihood]["gaussian_mixture"]["means"],
                info[_likelihood]["gaussian_mixture"]["covs"],
                names=[p for p in info[_params] if "deriv" not in p],
                label="truth")
            g = gdplots.getSubplotPlotter()
            to_plot = [mixture, results]
            if clusters:
                to_plot = to_plot + clusters
            g.triangle_plot(to_plot, )
            g.export("test.png")
        except:
            print("Plotting failed!")
        # 1st test: KL divergence
        if n_modes == 1:
            cov_sample, mean_sample = results.getCov(), results.getMeans()
            KL_final = KL_norm(
                m1=info[_likelihood]["gaussian_mixture"]["means"][0],
                S1=info[_likelihood]["gaussian_mixture"]["covs"][0],
                m2=mean_sample[:dimension],
                S2=cov_sample[:dimension, :dimension])
            print("Final KL: ", KL_final)
            assert KL_final <= KL_tolerance
        # 2nd test: clusters
        else:
            if "clusters" in products:
                assert len(products["clusters"].keys()) >= n_modes, (
                    "Not all clusters detected!")
                for c2 in clusters:
                    cov_c2, mean_c2 = c2.getCov(), c2.getMeans()
                    KLs = [
                        KL_norm(m1=info[_likelihood]["gaussian_mixture"]
                                ["means"][i_c1],
                                S1=info[_likelihood]["gaussian_mixture"]
                                ["covs"][i_c1],
                                m2=mean_c2[:dimension],
                                S2=cov_c2[:dimension, :dimension])
                        for i_c1 in range(n_modes)
                    ]
                    extra_tol = 4 * n_modes if n_modes > 1 else 1
                    assert min(KLs) <= KL_tolerance * extra_tol
            else:
                assert 0, "Could not check sample convergence: multimodal but no clusters"
        # 3rd test: Evidence
        if "logZ" in products:
            logZprior = sum(np.log(ranges[:, 1] - ranges[:, 0]))
            assert (products["logZ"] - logZ_nsigmas * products["logZstd"] <
                    -logZprior <
                    products["logZ"] + logZ_nsigmas * products["logZstd"])