Ejemplo n.º 1
0
def _get_model_with_requirements_and_eval(theo, reqs, packages_path,
                                          skip_not_installed):
    planck_base_model_prime = deepcopy(planck_base_model)
    planck_base_model_prime["hubble"] = "H"  # intercompatibility CAMB/CLASS
    info_theory = {theo: {"extra_args": base_precision[theo]}}
    info = create_input(planck_names=True,
                        theory=theo,
                        **planck_base_model_prime)
    info = recursive_update(info, {
        "theory": info_theory,
        "likelihood": {
            "one": None
        }
    })
    info["packages_path"] = process_packages_path(packages_path)
    info["debug"] = True
    model = install_test_wrapper(skip_not_installed, get_model, info)
    eval_parameters = {
        p: v
        for p, v in fiducial_parameters.items()
        if p in model.parameterization.sampled_params()
    }
    model.add_requirements(reqs)
    model.logposterior(eval_parameters)
    return model
Ejemplo n.º 2
0
def test_cosmo_docs_basic():
    flag = True
    for theo in ["camb", "classy"]:
        info_new = create_input(preset=preset_pre + theo)
        info_yaml_new = yaml_dump(info_new)
        file_path = os.path.join(path, file_pre + theo + ".yaml")
        with open(file_path) as docs_file:
            info_yaml_docs = "".join(docs_file.readlines())
        info_docs = yaml_load(info_yaml_docs)
        if not is_equal_info(
                info_new, info_docs, strict=True, print_not_log=True):
            with open(file_path, "w") as docs_file:
                docs_file.write(info_yaml_new)
            flag = False
            print("OLD:\n%s" % info_yaml_docs)
            print("----------------------------------------")
            print("NEW:\n%s" % info_yaml_new)
    assert flag, ("Differences in example input file. "
                  "Files have been re-generated; check out your git diff.")
Ejemplo n.º 3
0
def makeGrid(batchPath,
             settingName=None,
             settings=None,
             read_only=False,
             interactive=False,
             install_reqs_at=None,
             install_reqs_force=None):
    print("Generating grid...")
    batchPath = os.path.abspath(batchPath) + os.sep
    if not settings:
        if not settingName:
            raise NotImplementedError(
                "Re-using previous batch is work in progress...")
        #            if not pathIsGrid(batchPath):
        #                raise Exception('Need to give name of setting file if batchPath/config '
        #                                'does not exist')
        #            read_only = True
        #            sys.path.insert(0, batchPath + 'config')
        #            settings = __import__(IniFile(batchPath + 'config/config.ini').params['setting_file'].replace('.py', ''))
        elif os.path.splitext(settingName)[-1].lower() in _yaml_extensions:
            settings = yaml_load_file(settingName)
        else:
            raise NotImplementedError(
                "Using a python script is work in progress...")
            # In this case, info-as-dict would be passed
            # settings = __import__(settingName, fromlist=['dummy'])
    batch = batchjob.BatchJob(batchPath)
    # batch.skip = settings.get("skip", False)
    batch.makeItems(settings, messages=not read_only)
    if read_only:
        for jobItem in [b for b in batch.jobItems]:
            if not jobItem.chainExists():
                batch.jobItems.remove(jobItem)
        batch.save()
        print('OK, configured grid with %u existing chains' %
              (len(batch.jobItems)))
        return batch
    else:
        batch.makeDirectories(setting_file=None)
        batch.save()
    infos = {}
    components_used = {}
    # Default info
    defaults = copy.deepcopy(settings)
    grid_definition = defaults.pop("grid")
    models_definitions = grid_definition["models"]
    datasets_definitions = grid_definition["datasets"]
    for jobItem in batch.items(wantSubItems=False):
        # Model info
        jobItem.makeChainPath()
        try:
            model_info = copy.deepcopy(models_definitions[jobItem.param_set]
                                       or {})
        except KeyError:
            raise ValueError("Model '%s' must be defined." % jobItem.param_set)
        model_info = merge_info(defaults, model_info)
        # Dataset info
        try:
            dataset_info = copy.deepcopy(
                datasets_definitions[jobItem.data_set.tag])
        except KeyError:
            raise ValueError("Data set '%s' must be defined." %
                             jobItem.data_set.tag)
        # Combined info
        combined_info = merge_info(defaults, model_info, dataset_info)
        if "preset" in combined_info:
            preset = combined_info.pop("preset")
            combined_info = merge_info(create_input(**preset), combined_info)
        combined_info[_output_prefix] = jobItem.chainRoot
        # Requisites
        components_used = get_used_components(components_used, combined_info)
        if install_reqs_at:
            combined_info[_packages_path] = os.path.abspath(install_reqs_at)
        # Save the info (we will write it after installation:
        # we need to install to add auto covmats
        if jobItem.param_set not in infos:
            infos[jobItem.param_set] = {}
        infos[jobItem.param_set][jobItem.data_set.tag] = combined_info
    # Installing requisites
    if install_reqs_at:
        print("Installing required code and data for the grid.")
        from cobaya.log import logger_setup
        logger_setup()
        install_reqs(components_used,
                     path=install_reqs_at,
                     force=install_reqs_force)
    print("Adding covmats (if necessary) and writing input files")
    for jobItem in batch.items(wantSubItems=False):
        info = infos[jobItem.param_set][jobItem.data_set.tag]
        # Covariance matrices
        # We try to find them now, instead of at run time, to check if correctly selected
        try:
            sampler = list(info[kinds.sampler])[0]
        except KeyError:
            raise ValueError("No sampler has been chosen")
        if sampler == "mcmc" and info[kinds.sampler][sampler].get(
                "covmat", "auto"):
            packages_path = install_reqs_at or info.get(_packages_path, None)
            if not packages_path:
                raise ValueError(
                    "Cannot assign automatic covariance matrices because no "
                    "external packages path has been defined.")
            # Need updated info for covmats: includes renames
            updated_info = update_info(info)
            # Ideally, we use slow+sampled parameters to look for the covariance matrix
            # but since for that we'd need to initialise a model, we approximate that set
            # as theory+sampled
            from itertools import chain
            like_params = set(
                chain(*[
                    list(like[_params])
                    for like in updated_info[kinds.likelihood].values()
                ]))
            params_info = {
                p: v
                for p, v in updated_info[_params].items()
                if is_sampled_param(v) and p not in like_params
            }
            best_covmat = _get_best_covmat(os.path.abspath(packages_path),
                                           params_info,
                                           updated_info[kinds.likelihood])
            info[kinds.sampler][sampler]["covmat"] = os.path.join(
                best_covmat["folder"], best_covmat["name"])
        # Write the info for this job
        # Allow overwrite since often will want to regenerate grid with tweaks
        yaml_dump_file(jobItem.iniFile(),
                       sort_cosmetic(info),
                       error_if_exists=False)

        # Non-translated old code
        # if not start_at_bestfit:
        #     setMinimize(jobItem, ini)
        #     variant = '_minimize'
        #     ini.saveFile(jobItem.iniFile(variant))
        ## NOT IMPLEMENTED: start at best fit
        ##        ini.params['start_at_bestfit'] = start_at_bestfit
        # ---
        # for deffile in settings.defaults:
        #    ini.defaults.append(batch.commonPath + deffile)
        # if hasattr(settings, 'override_defaults'):
        #    ini.defaults = [batch.commonPath + deffile for deffile in settings.override_defaults] + ini.defaults
        # ---
        # # add ini files for importance sampling runs
        # for imp in jobItem.importanceJobs():
        #     if getattr(imp, 'importanceFilter', None): continue
        #     if batch.hasName(imp.name.replace('_post', '')):
        #         raise Exception('importance sampling something you already have?')
        #     for minimize in (False, True):
        #         if minimize and not getattr(imp, 'want_minimize', True): continue
        #         ini = IniFile()
        #         updateIniParams(ini, imp.importanceSettings, batch.commonPath)
        #         if cosmomcAction == 0 and not minimize:
        #             for deffile in settings.importanceDefaults:
        #                 ini.defaults.append(batch.commonPath + deffile)
        #             ini.params['redo_outroot'] = imp.chainRoot
        #             ini.params['action'] = 1
        #         else:
        #             ini.params['file_root'] = imp.chainRoot
        #         if minimize:
        #             setMinimize(jobItem, ini)
        #             variant = '_minimize'
        #         else:
        #             variant = ''
        #         ini.defaults.append(jobItem.iniFile())
        #         ini.saveFile(imp.iniFile(variant))
        #         if cosmomcAction != 0: break

    if not interactive:
        return batch
    print('Done... to run do: cobaya-grid-run %s' % batchPath)
Ejemplo n.º 4
0
def body_of_test(modules,
                 best_fit,
                 info_likelihood,
                 info_theory,
                 ref_chi2,
                 best_fit_derived=None,
                 extra_model={}):
    # Create base info
    theo = list(info_theory)[0]
    # In Class, theta_s is exact, but different from the approximate one cosmomc_theta
    # used by Planck, so we take H0 instead
    planck_base_model_prime = deepcopy(planck_base_model)
    planck_base_model_prime.update(extra_model or {})
    if "H0" in best_fit:
        planck_base_model_prime["hubble"] = "H"
        best_fit_derived = deepcopy(best_fit_derived) or {}
        best_fit_derived.pop("H0", None)
    info = create_input(planck_names=True,
                        theory=theo,
                        **planck_base_model_prime)
    # Add specifics for the test: theory, likelihoods and derived parameters
    info = recursive_update(info, {_theory: info_theory})
    info[_theory][theo]["use_planck_names"] = True
    info = recursive_update(info, {_likelihood: info_likelihood})
    info[_params].update({p: None for p in best_fit_derived or {}})
    # We need UPDATED info, to get the likelihoods nuisance parameters
    info = update_info(info)
    # Notice that update_info adds an aux internal-only _params property to the likes
    for lik in info[_likelihood]:
        info[_likelihood][lik].pop(_params, None)
    info[_path_install] = process_modules_path(modules)
    info[_debug] = True
    # Create the model and compute likelihood and derived parameters at best fit
    model = get_model(info)
    best_fit_values = {
        p: best_fit[p]
        for p in model.parameterization.sampled_params()
    }
    likes, derived = model.loglikes(best_fit_values)
    likes = dict(zip(list(model.likelihood), likes))
    derived = dict(zip(list(model.parameterization.derived_params()), derived))
    # Check value of likelihoods
    for like in info[_likelihood]:
        chi2 = -2 * likes[like]
        msg = (
            "Testing likelihood '%s': | %.2f (now) - %.2f (ref) | = %.2f >= %.2f"
            % (like, chi2, ref_chi2[like], abs(chi2 - ref_chi2[like]),
               ref_chi2["tolerance"]))
        assert abs(chi2 - ref_chi2[like]) < ref_chi2["tolerance"], msg
        print(msg)
    # Check value of derived parameters
    not_tested = []
    not_passed = []
    for p in best_fit_derived or {}:
        if best_fit_derived[p][0] is None or p not in best_fit_derived:
            not_tested += [p]
            continue
        rel = (abs(derived[p] - best_fit_derived[p][0]) /
               best_fit_derived[p][1])
        if rel > tolerance_derived * (2 if p in
                                      ("YHe", "Y_p", "DH", "sigma8",
                                       "s8omegamp5", "thetastar") else 1):
            not_passed += [(p, rel)]
    print("Derived parameters not tested because not implemented: %r" %
          not_tested)
    assert not not_passed, "Some derived parameters were off. Fractions of test tolerance: %r" % not_passed