コード例 #1
0
def test_minimize_gaussian(tmpdir):
    maxloglik = 0
    for method in reversed(valid_methods):
        NoisyCovLike.noise = 0.005 if method == 'bobyqa' else 0
        info: InputDict = {
            'likelihood': {
                'like': NoisyCovLike
            },
            "sampler": {
                "minimize": {
                    "ignore_prior": True,
                    "method": method
                }
            }
        }
        products = run(info).sampler.products()
        error = abs(maxloglik - -products["minimum"]["minuslogpost"])
        assert error < 0.01

        info['output'] = os.path.join(tmpdir, 'testmin')
        products = run(info, force=True).sampler.products()
        if mpi.is_main_process():
            from getdist.types import BestFit
            res = BestFit(info['output'] + '.bestfit').getParamDict()
            assert np.isclose(res["loglike"],
                              products["minimum"]["minuslogpost"])
            for p, v in list(res.items())[:-2]:
                assert np.isclose(products["minimum"][p], v)
コード例 #2
0
def test_mcmc_sync():
    info: InputDict = yaml_load(yaml)
    logger.info('Test end synchronization')

    if mpi.rank() == 1:
        max_samples = 200
    else:
        max_samples = 600
    # simulate asynchronous ending sampling loop
    info['sampler']['mcmc'] = {'max_samples': max_samples}

    updated_info, sampler = run(info)
    assert len(sampler.products()["sample"]) == max_samples

    logger.info('Test error synchronization')
    if mpi.rank() == 0:
        info['sampler']['mcmc'] = {'max_samples': 'none'}  # 'none' not valid
        with NoLogging(logging.ERROR), pytest.raises(TypeError):
            run(info)
    else:
        with pytest.raises(mpi.OtherProcessError):
            run(info)

    logger.info('Test one-process hang abort')

    aborted = False

    def test_abort():
        nonlocal aborted
        aborted = True

    # test error converted into MPI_ABORT after timeout
    # noinspection PyTypeChecker
    with pytest.raises(
        (LoggedError, mpi.OtherProcessError)), NoLogging(logging.ERROR):
        with mpi.ProcessState('test',
                              time_out_seconds=0.5,
                              timeout_abort_proc=test_abort):
            if mpi.rank() != 1:
                time.sleep(0.6)  # fake hang
            else:
                raise LoggedError(logger, 'Expected test error')
    if mpi.rank() == 1:
        assert aborted
コード例 #3
0
def test_run_minimize(tmpdir):
    NoisyCovLike.noise = 0
    info: InputDict = {
        'likelihood': {
            'like': NoisyCovLike
        },
        "sampler": {
            "mcmc": {
                "Rminus1_stop": 0.5,
                'Rminus1_cl_stop': 0.4,
                'seed': 2
            }
        },
        "output": os.path.join(tmpdir, 'testchain')
    }
    run(info, force=True)
    min_info: InputDict = dict(info, sampler={'minimize': None})
    output_info, sampler = run(min_info, force=True)
    assert (abs(sampler.products()["minimum"]["b"] - mean[1]) < 0.01)
コード例 #4
0
def _test_overhead_timing(dim=15):
    # prints timing for simple Gaussian vanilla mcmc
    import pstats
    from cProfile import Profile
    from io import StringIO
    # noinspection PyUnresolvedReferences
    from cobaya.samplers.mcmc import proposal  # one-time numba compile out of profiling

    like_test = _make_gaussian_like(dim)
    info: InputDict = {
        'likelihood': {
            'like': like_test
        },
        'debug': False,
        'sampler': {
            'mcmc': {
                'max_samples': 1000,
                'burn_in': 0,
                "learn_proposal": False,
                "Rminus1_stop": 0.0001
            }
        }
    }
    prof = Profile()
    prof.enable()
    run(info)
    prof.disable()
    # prof.dump_stats("out.prof")  # to visualize with e.g. snakeviz
    s = StringIO()
    ps = pstats.Stats(prof, stream=s)
    print_n_calls = 10
    ps.strip_dirs()
    ps.sort_stats('time')
    ps.print_stats(print_n_calls)
    ps.sort_stats('cumtime')
    ps.print_stats(print_n_calls)
    print(s.getvalue())
コード例 #5
0
def test_cosmo_run_not_found():
    with NoLogging(logging.ERROR):
        inf = deepcopy_where_possible(info)
        inf["likelihood"]["H0.perfect"] = None
        with pytest.raises(ComponentNotFoundError):
            run(inf)
        inf = deepcopy_where_possible(info)
        inf["likelihood"]["none"] = None
        with pytest.raises(ComponentNotFoundError):
            run(inf)
        inf = deepcopy_where_possible(info)
        inf["likelihood"]["pandas.plotting.PlotAccessor"] = None
        with pytest.raises(LoggedError) as e:
            run(inf)
        assert "Failed to get defaults for component" in str(e)
コード例 #6
0
def test_mcmc_drag_results():
    info: InputDict = yaml_load(yaml_drag)
    info['likelihood'] = {
        'g1': {
            'external': GaussLike
        },
        'g2': {
            'external': GaussLike2
        }
    }
    updated_info, sampler = run(info)
    products = sampler.products()
    from getdist.mcsamples import MCSamplesFromCobaya
    products["sample"] = mpi.allgather(products["sample"])
    gdample = MCSamplesFromCobaya(updated_info,
                                  products["sample"],
                                  ignore_rows=0.2)
    assert abs(gdample.mean('a') - 0.2) < 0.03
    assert abs(gdample.mean('b')) < 0.03
    assert abs(gdample.std('a') - 0.293) < 0.03
    assert abs(gdample.std('b') - 0.4) < 0.03
コード例 #7
0
def test_cosmo_run_resume_post(tmpdir, packages_path=None):
    # only vary As, so fast chain. Chain does not need to converge (tested elsewhere).
    info['output'] = os.path.join(tmpdir, 'testchain')
    if packages_path:
        info["packages_path"] = process_packages_path(packages_path)
    run(info, force=True)
    # note that continuing from files leads to text-file precision at read in, so a mix of
    # precision in the output SampleCollection returned from run
    run(info, resume=True, override={'sampler': {'mcmc': {'Rminus1_stop': 0.2}}})
    updated_info, sampler = run(info['output'] + '.updated' + Extension.dill,
                                resume=True,
                                override={'sampler': {'mcmc': {'Rminus1_stop': 0.05}}})
    results = mpi.allgather(sampler.products()["sample"])
    samp = MCSamplesFromCobaya(updated_info, results, ignore_rows=0.2)
    assert np.isclose(samp.mean('As100'), 100 * samp.mean('As'))

    # post-processing
    info_post: PostDict = {'add': {'params': {'h': None},
                                   "likelihood": {"test_likelihood2": likelihood2}},
                           'remove': {'likelihood': ["test_likelihood"]},
                           'suffix': 'testpost',
                           'skip': 0.2, 'thin': 4
                           }

    output_info, products = run(updated_info, override={'post': info_post}, force=True)
    results2 = mpi.allgather(products["sample"])
    samp2 = MCSamplesFromCobaya(output_info, results2)
    samp_test = samp.copy()
    samp_test.weighted_thin(4)
    sigma8 = samp_test.getParams().sigma8
    samp_test.reweightAddingLogLikes(-(sigma8 - 0.7) ** 2 / 0.1 ** 2
                                     + (sigma8 - 0.75) ** 2 / 0.07 ** 2)
    assert np.isclose(samp_test.mean('sigma8'), samp2.mean('sigma8'))

    # from getdist-format chain files
    root = os.path.join(tmpdir, 'getdist_format')
    if mpi.is_main_process():
        samp.saveChainsAsText(root)
    mpi.sync_processes()

    from_txt = dict(updated_info, output=root)
    post_from_text = dict(info_post, skip=0)  # getdist already skipped
    output_info, products = run(from_txt, override={'post': post_from_text}, force=True)
    samp_getdist = MCSamplesFromCobaya(output_info, mpi.allgather(products["sample"]))
    assert not products["stats"]["points_removed"]
    assert samp2.numrows == samp_getdist.numrows
    assert np.isclose(samp2.mean('sigma8'), samp_getdist.mean('sigma8'))

    # again with inferred-inputs for params
    info_conv = cosmomc_root_to_cobaya_info_dict(root)
    # have to manually add consistent likelihoods if re-computing
    info_conv['likelihood'] = info['likelihood']
    info_conv['theory'] = info['theory']
    post_from_text = dict(info_post, skip=0, suffix='getdist2')  # getdist already skipped
    output_info, products = run(info_conv, override={'post': post_from_text},
                                output=False)
    samp_getdist2 = MCSamplesFromCobaya(output_info, mpi.allgather(products["sample"]))
    assert np.isclose(samp2.mean('sigma8'), samp_getdist2.mean('sigma8'))

    # from save info, no output
    info_post['output'] = None
    output_info, products = run({'output': info['output'], 'post': info_post}, force=True)
    results3 = mpi.allgather(products["sample"])
    samp3 = MCSamplesFromCobaya(output_info, results3)
    assert np.isclose(samp3.mean("sigma8"), samp2.mean("sigma8"))
    assert np.isclose(samp3.mean("joint"), samp2.mean("joint"))
    samps4 = loadMCSamples(info['output'] + '.post.testpost')
    assert np.isclose(samp3.mean("joint"), samps4.mean("joint"))

    # test recover original answer swapping likelihoods back
    info_revert = {'add': {'likelihood': info['likelihood']},
                   'remove': {'likelihood': ["test_likelihood2"]},
                   'suffix': 'revert',
                   'skip': 0, 'thin': 1,
                   'output': None
                   }
    output_info, products = run({'output': info['output'] + '.post.testpost',
                                 'post': info_revert}, force=True)
    results_revert = mpi.allgather(products["sample"])
    samp_revert = MCSamplesFromCobaya(output_info, results_revert)

    samp_thin = MCSamplesFromCobaya(updated_info, results, ignore_rows=0.2)
    samp_thin.weighted_thin(4)
    assert samp_thin.numrows == samp_revert.numrows + products["stats"]["points_removed"]
    if not products["stats"]["points_removed"]:
        assert np.isclose(samp_revert.mean("sigma8"), samp_thin.mean("sigma8"))
    else:
        assert abs(samp_revert.mean("sigma8") - samp_thin.mean("sigma8")) < 0.01
    assert not products["stats"]["points_removed"]

    # no remove
    info_post = {
        'add': {'params': {'h': None}, "likelihood": {"test_likelihood2": likelihood2}},
        'suffix': 'test2', 'skip': 0.2, 'thin': 4}
    output_info, products = run(updated_info, override={'post': info_post}, force=True)
    results2 = mpi.allgather(products["sample"])
    samp2 = MCSamplesFromCobaya(output_info, results2)
    assert "chi2__type1" in samp2.paramNames.list()
    # check what has been saved to disk is consistent
    samps4 = loadMCSamples(updated_info['output'] + '.post.test2')
    assert samp2.paramNames.list() == samps4.paramNames.list()
    assert np.isclose(samp2.mean("sigma8"), samps4.mean("sigma8"))

    # adding new theory derived
    info_post['add']['theory'] = {'new_param_theory': BTheory}
    output_info, products = run(updated_info, override={'post': info_post}, output=False)
    results3 = mpi.allgather(products["sample"])
    samp3 = MCSamplesFromCobaya(output_info, results3)
    assert np.isclose(samp3.mean("sigma8"), samp2.mean("sigma8"))
    assert np.isclose(samp3.mean("As1000"), samp2.mean("As") * 1000)

    info_post['add']['theory'] = {'new_param_theory': CTheory}
    with pytest.raises(LoggedError) as e, NoLogging(logging.ERROR):
        run(updated_info, override={'post': info_post}, output=False)
    assert 'Parameter AsX no known value' in str(e)