Esempio n. 1
0
def prepare_data_script():
    warn_deprecation()
    logger_setup()
    if "CONTAINED" not in os.environ:
        log.error("This command should only be run within a container. "
                  "Run 'cobaya-install' instead.")
        raise HandledException
    parser = argparse.ArgumentParser(
        description=
        "Cobaya's installation tool for the data needed by a container.")
    parser.add_argument(
        "-f",
        "--force",
        action="store_true",
        default=False,
        help="Force re-installation of apparently installed modules.")
    arguments = parser.parse_args()
    try:
        info = load_input(requirements_file_path)
    except IOError:
        log.error(
            "Cannot find the requirements file. This should not be happening.")
        raise HandledException
    install(info,
            path=_modules_path,
            force=arguments.force,
            **{
                _code: False,
                _data: True
            })
Esempio n. 2
0
def bib_script(args=None):
    """Command line script for the bibliography."""
    warn_deprecation()
    # Parse arguments and launch
    import argparse
    parser = argparse.ArgumentParser(
        prog="cobaya bib",
        description=
        ("Prints bibliography to be cited for one or more components or input files."
         ))
    parser.add_argument(
        "files_or_components",
        action="store",
        nargs="+",
        metavar="input_file.yaml|component_name",
        help="Component(s) or input file(s) whose bib info is requested.")
    arguments = parser.parse_args(args)
    # Configure the logger ASAP
    logger_setup()
    logger = get_logger("bib")
    # Gather requests
    infos: List[Union[Dict, str]] = []
    for f in arguments.files_or_components:
        if os.path.splitext(f)[1].lower() in Extension.yamls:
            infos += [load_input(f)]
        else:  # a single component name, no kind specified
            infos += [f]
    if not infos:
        logger.info(
            "Nothing to do. Pass input files or component names as arguments.")
        return
    print(pretty_repr_bib(*get_bib_info(*infos, logger=logger)))
Esempio n. 3
0
def get_model(info):
    assert hasattr(info, "keys"), (
        "The first argument must be a dictionary with the info needed for the model. "
        "If you were trying to pass the name of an input file instead, "
        "load it first with 'cobaya.input.load_input', "
        "or, if you were passing a yaml string, load it with 'cobaya.yaml.yaml_load'."
    )
    # Configure the logger ASAP
    # Just a dummy import before configuring the logger, until I fix root/individual level
    import getdist
    logger_setup(info.pop(_debug, _debug_default), info.pop(_debug_file, None))
    # Create the full input information, including defaults for each module.
    info = deepcopy(info)
    ignored_info = {}
    for k in list(info):
        if k not in [
                _params, _likelihood, _prior, _theory, _path_install, _timing
        ]:
            ignored_info.update({k: info.pop(k)})
    import logging
    if ignored_info:
        logging.getLogger(__name__.split(".")[-1]).warn(
            "Ignored blocks/options: %r", list(ignored_info))
    full_info = get_full_info(info)
    if logging.root.getEffectiveLevel() <= logging.DEBUG:
        logging.getLogger(__name__.split(".")[-1]).debug(
            "Input info updated with defaults (dumped to YAML):\n%s",
            yaml_dump(full_info, force_reproducible=False))
    # Initialize the posterior and the sampler
    return Model(full_info[_params],
                 full_info[_likelihood],
                 full_info.get(_prior),
                 full_info.get(_theory),
                 modules=info.get(_path_install),
                 timing=full_info.get(_timing))
Esempio n. 4
0
def run(info):
    assert hasattr(info, "keys"), (
        "The first argument must be a dictionary with the info needed for the run. "
        "If you were trying to pass the name of an input file instead, "
        "load it first with 'cobaya.input.load_input', "
        "or, if you were passing a yaml string, load it with 'cobaya.yaml.yaml_load'."
    )
    # Configure the logger ASAP
    # Just a dummy import before configuring the logger, until I fix root/individual level
    import getdist
    logger_setup(info.get(_debug), info.get(_debug_file))
    import logging
    # Initialize output, if required
    output = Output(output_prefix=info.get(_output_prefix),
                    resume=info.get(_resume),
                    force_output=info.pop(_force, None))
    # Create the full input information, including defaults for each module.
    full_info = get_full_info(info)
    if output:
        full_info[_output_prefix] = output.updated_output_prefix()
        full_info[_resume] = output.is_resuming()
    if logging.root.getEffectiveLevel() <= logging.DEBUG:
        # Don't dump unless we are doing output, just in case something not serializable
        # May be fixed in the future if we find a way to serialize external functions
        if info.get(_output_prefix) and am_single_or_primary_process():
            logging.getLogger(__name__.split(".")[-1]).info(
                "Input info updated with defaults (dumped to YAML):\n%s",
                yaml_dump(full_info))
    # TO BE DEPRECATED IN >1.2!!! #####################
    _force_reproducible = "force_reproducible"
    if _force_reproducible in info:
        info.pop(_force_reproducible)
        logging.getLogger(__name__.split(".")[-1]).warn(
            "Option '%s' is no longer necessary. Please remove it!" %
            _force_reproducible)
    # CHECK THAT THIS WARNING WORKS!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
    ###################################################
    # We dump the info now, before modules initialization, lest it is accidentally modified
    # If resuming a sample, it checks that old and new infos are consistent
    output.dump_info(info, full_info)
    # Initialize the posterior and the sampler
    with Model(full_info[_params],
               full_info[_likelihood],
               full_info.get(_prior),
               full_info.get(_theory),
               modules=info.get(_path_install),
               timing=full_info.get(_timing),
               allow_renames=False) as model:
        with Sampler(full_info[_sampler],
                     model,
                     output,
                     resume=full_info.get(_resume),
                     modules=info.get(_path_install)) as sampler:
            sampler.run()
    # For scripted calls
    return deepcopy(full_info), sampler.products()
Esempio n. 5
0
def get_bib_info(*infos, logger=None):
    """
    Gathers and returns the descriptions and bibliographic sources for the components
    mentioned in ``infos``.

    ``infos`` can be input dictionaries or single component names.
    """
    if not logger:
        logger_setup()
        logger = get_logger("bib")
    used_components, component_infos = get_used_components(*infos,
                                                           return_infos=True)
    descs: InfoDict = {}
    bibs: InfoDict = {}
    used_components = get_used_components(*infos)
    for kind, components in used_components.items():
        if kind is None:
            continue  # we will deal with bare component names later, to avoid repetition
        descs[kind], bibs[kind] = {}, {}
        for component in components:
            try:
                descs[kind][component] = get_desc_component(
                    component, kind, component_infos[component])
                bibs[kind][component] = get_bib_component(component, kind)
            except ComponentNotFoundError:
                sugg = similar_internal_class_names(component)
                logger.error(
                    f"Could not identify component '{component}'. "
                    f"Did you mean any of the following? {sugg} (mind capitalization!)"
                )
                continue
    # Deal with bare component names
    for component in used_components.get(None, []):
        try:
            cls = get_component_class(component)
        except ComponentNotFoundError:
            sugg = similar_internal_class_names(component)
            logger.error(
                f"Could not identify component '{component}'. "
                f"Did you mean any of the following? {sugg} (mind capitalization!)"
            )
            continue
        kind = cls.get_kind()
        if kind not in descs:
            descs[kind], bibs[kind] = {}, {}
        if kind in descs and component in descs[kind]:
            continue  # avoid repetition
        descs[kind][component] = get_desc_component(cls, kind)
        bibs[kind][component] = get_bib_component(cls, kind)
    descs["cobaya"] = {"cobaya": cobaya_desc}
    bibs["cobaya"] = {"cobaya": cobaya_bib}
    return descs, bibs
Esempio n. 6
0
def citation_script():
    from cobaya.mpi import get_mpi_rank
    if not get_mpi_rank():
        # Configure the logger ASAP
        from cobaya.log import logger_setup
        logger_setup()
        # Parse arguments and launch
        import argparse
        parser = argparse.ArgumentParser(description="Cobaya's citation tool.")
        parser.add_argument("files", action="store", nargs="+", metavar="input_file.yaml",
                            help="One or more input files.")
        from cobaya.input import load_input
        infos = [load_input(f) for f in parser.parse_args().files]
        citation(*infos)
Esempio n. 7
0
def citation_script():
    from cobaya.mpi import am_single_or_primary_process
    if am_single_or_primary_process():
        # Configure the logger ASAP
        from cobaya.log import logger_setup
        logger_setup()
        # Parse arguments and launch
        import argparse
        parser = argparse.ArgumentParser(description="Cobaya's citation tool.")
        parser.add_argument("files", action="store", nargs="+", metavar="input_file.yaml",
                            help="One or more input files.")
        from cobaya.input import load_input
        infos = [load_input(f) for f in parser.parse_args().files]
        print(prettyprint_citation(citation(*infos)))
Esempio n. 8
0
def gui_script():
    warn_deprecation()
    try:
        app = QApplication(sys.argv)
    except NameError:
        # TODO: fix this long logger setup
        from cobaya.log import logger_setup, LoggedError
        logger_setup(0, None)
        raise LoggedError(
            "cosmo_generator", "PySide2 is not installed! "
            "Check Cobaya's documentation for the cosmo_generator "
            "('Basic cosmology runs').")
    clip = app.clipboard()
    window = MainWindow()
    window.clipboard = clip
    sys.exit(app.exec_())
Esempio n. 9
0
def gui_script():
    try:
        app = QApplication(sys.argv)
    except NameError:
        # TODO: fix this long logger setup
        from cobaya.log import logger_setup, HandledException
        logger_setup(0, None)
        import logging
        logging.getLogger("cosmo_generator").error(
            "PySide or PySide2 is not installed! "
            "Check Cobaya's documentation for the cosmo_generator "
            "('Basic cosmology runs').")
        raise HandledException
    clip = app.clipboard()
    window = MainWindow()
    window.clipboard = clip
    sys.exit(app.exec_())
Esempio n. 10
0
def create_image_script():
    warn_deprecation()
    logger_setup()
    parser = argparse.ArgumentParser(
        prog="cobaya create-image",
        description=(
            "Cobaya's tool for preparing Docker (for Shifter) and Singularity images."))
    parser.add_argument("files", action="store", nargs="+", metavar="input_file.yaml",
                        help="One or more input files.")
    parser.add_argument("-v", "--mpi-version", action="store", default=None,
                        metavar="X.Y(.Z)", dest="version", help="Version of the MPI lib.")
    group_type = parser.add_mutually_exclusive_group(required=True)
    group_type.add_argument("-d", "--docker", action="store_const", const="docker",
                            help="Create a Docker image (for Shifter).", dest="type")
    group_type.add_argument("-s", "--singularity", action="store_const", dest="type",
                            const="singularity", help="Create a Singularity image.")
    arguments = parser.parse_args()
    if arguments.type == "docker":
        create_docker_image(arguments.files, MPI_version=arguments.version)
    elif arguments.type == "singularity":
        create_singularity_image(arguments.files, MPI_version=arguments.version)
Esempio n. 11
0
def run_script():
    warn_deprecation()
    import os
    import argparse
    parser = argparse.ArgumentParser(description="Cobaya's run script.")
    parser.add_argument("input_file",
                        nargs=1,
                        action="store",
                        metavar="input_file.yaml",
                        help="An input file to run.")
    parser.add_argument("-" + _packages_path_arg[0],
                        "--" + _packages_path_arg_posix,
                        action="store",
                        nargs=1,
                        metavar="/packages/path",
                        default=[None],
                        help="Path where external packages were installed.")
    # MARKED FOR DEPRECATION IN v3.0
    modules = "modules"
    parser.add_argument("-" + modules[0],
                        "--" + modules,
                        action="store",
                        nargs=1,
                        required=False,
                        metavar="/packages/path",
                        default=[None],
                        help="To be deprecated! "
                        "Alias for %s, which should be used instead." %
                        _packages_path_arg_posix)
    # END OF DEPRECATION BLOCK -- CONTINUES BELOW!
    parser.add_argument("-" + _output_prefix[0],
                        "--" + _output_prefix,
                        action="store",
                        nargs=1,
                        metavar="/some/path",
                        default=[None],
                        help="Path and prefix for the text output.")
    parser.add_argument("-" + _debug[0],
                        "--" + _debug,
                        action="store_true",
                        help="Produce verbose debug output.")
    continuation = parser.add_mutually_exclusive_group(required=False)
    continuation.add_argument(
        "-" + _resume[0],
        "--" + _resume,
        action="store_true",
        help="Resume an existing chain if it has similar info "
        "(fails otherwise).")
    continuation.add_argument("-" + _force[0],
                              "--" + _force,
                              action="store_true",
                              help="Overwrites previous output, if it exists "
                              "(use with care!)")
    parser.add_argument("--%s" % _test_run,
                        action="store_true",
                        help="Initialize model and sampler, and exit.")
    parser.add_argument("--version", action="version", version=__version__)
    parser.add_argument("--no-mpi",
                        action='store_true',
                        help="disable MPI when mpi4py installed but MPI does "
                        "not actually work")
    arguments = parser.parse_args()
    if arguments.no_mpi or getattr(arguments, _test_run, False):
        set_mpi_disabled()
    if any((os.path.splitext(f)[0] in ("input", "updated"))
           for f in arguments.input_file):
        raise ValueError("'input' and 'updated' are reserved file names. "
                         "Please, use a different one.")
    load_input = import_MPI(".input", "load_input")
    given_input = arguments.input_file[0]
    if any(given_input.lower().endswith(ext) for ext in _yaml_extensions):
        info = load_input(given_input)
        output_prefix_cmd = getattr(arguments, _output_prefix)[0]
        output_prefix_input = info.get(_output_prefix)
        info[_output_prefix] = output_prefix_cmd or output_prefix_input
    else:
        # Passed an existing output_prefix? Try to find the corresponding *.updated.yaml
        updated_file = get_info_path(*split_prefix(given_input),
                                     kind="updated")
        try:
            info = load_input(updated_file)
        except IOError:
            raise ValueError(
                "Not a valid input file, or non-existent run to resume")
        # We need to update the output_prefix to resume the run *where it is*
        info[_output_prefix] = given_input
        # If input given this way, we obviously want to resume!
        info[_resume] = True
    # solve packages installation path cmd > env > input
    # MARKED FOR DEPRECATION IN v3.0
    if getattr(arguments, modules) != [None]:
        logger_setup()
        logger = logging.getLogger(__name__.split(".")[-1])
        logger.warning(
            "*DEPRECATION*: -m/--modules will be deprecated in favor of "
            "-%s/--%s in the next version. Please, use that one instead.",
            _packages_path_arg[0], _packages_path_arg_posix)
        # BEHAVIOUR TO BE REPLACED BY ERROR:
        if getattr(arguments, _packages_path_arg) == [None]:
            setattr(arguments, _packages_path_arg, getattr(arguments, modules))
    # BEHAVIOUR TO BE REPLACED BY ERROR:
    check_deprecated_modules_path(info)
    # END OF DEPRECATION BLOCK
    info[_packages_path] = \
        getattr(arguments, _packages_path_arg)[0] or info.get(_packages_path)
    info[_debug] = getattr(arguments, _debug) or info.get(
        _debug, _debug_default)
    info[_test_run] = getattr(arguments, _test_run, False)
    # If any of resume|force given as cmd args, ignore those in the input file
    resume_arg, force_arg = [
        getattr(arguments, arg) for arg in [_resume, _force]
    ]
    if any([resume_arg, force_arg]):
        info[_resume], info[_force] = resume_arg, force_arg
    if _post in info:
        post(info)
    else:
        run(info)
Esempio n. 12
0
def install_script():
    from cobaya.mpi import get_mpi_rank
    if not get_mpi_rank():
        # Configure the logger ASAP
        logger_setup()
        log = logging.getLogger(__name__.split(".")[-1])
        # Parse arguments
        import argparse
        parser = argparse.ArgumentParser(
            description="Cobaya's installation tool for external modules.")
        parser.add_argument("files",
                            action="store",
                            nargs="+",
                            metavar="input_file.yaml",
                            help="One or more input files.")
        parser.add_argument(
            "-p",
            "--path",
            action="store",
            nargs=1,
            required=True,
            metavar="/install/path",
            help="Desired path where to install external modules.")
        parser.add_argument(
            "-f",
            "--force",
            action="store_true",
            default=False,
            help="Force re-installation of apparently installed modules.")
        parser.add_argument(
            "--no-progress-bars",
            action="store_true",
            default=False,
            help="No progress bars shown. Shorter logs (used in Travis).")
        group_just = parser.add_mutually_exclusive_group(required=False)
        group_just.add_argument("-c",
                                "--just-code",
                                action="store_false",
                                default=True,
                                help="Install code of the modules.",
                                dest=_data)
        group_just.add_argument("-d",
                                "--just-data",
                                action="store_false",
                                default=True,
                                help="Install data of the modules.",
                                dest=_code)
        arguments = parser.parse_args()
        from cobaya.input import load_input
        try:
            infos = [load_input(f) for f in arguments.files]
        except HandledException:
            log.error("Maybe you meant to pass an installation path? "
                      "In that case, use '--path=/path/to/modules'.")
            raise HandledException
        # Launch installer
        install(*infos,
                path=arguments.path[0],
                **{
                    arg: getattr(arguments, arg)
                    for arg in ["force", _code, _data, "no_progress_bars"]
                })
Esempio n. 13
0
def run(info):

    assert hasattr(info, "items"), (
        "The agument of `run` must be a dictionary with the info needed for the run. "
        "If you were trying to pass an input file instead, load it first with "
        "`cobaya.input.load_input`.")

    # Import names
    from cobaya.conventions import _likelihood, _prior, _params
    from cobaya.conventions import _theory, _sampler, _path_install
    from cobaya.conventions import _debug, _debug_file, _output_prefix

    # Configure the logger ASAP
    from cobaya.log import logger_setup
    logger_setup(info.get(_debug), info.get(_debug_file))

    # Debug (lazy call)
    import logging
    if logging.root.getEffectiveLevel() <= logging.DEBUG:
        # Don't dump unless we are doing output, just in case something not serializable
        # May be fixed in the future if we find a way to serialize external functions
        if info.get(_output_prefix):
            from cobaya.yaml import yaml_dump
            logging.getLogger(__name__.split(".")[-1]).debug(
                "Input info (dumped to YAML):\n%s", yaml_dump(info))

    # Import general classes
    from cobaya.prior import Prior
    from cobaya.sampler import get_Sampler as Sampler

    # Import the functions and classes that need MPI wrapping
    from cobaya.mpi import import_MPI
    #    Likelihood = import_MPI(".likelihood", "LikelihoodCollection")
    from cobaya.likelihood import LikelihoodCollection as Likelihood

    # Initialise output, if requiered
    do_output = info.get(_output_prefix)
    if do_output:
        Output = import_MPI(".output", "Output")
        output = Output(info)
    else:
        from cobaya.output import Output_dummy
        output = Output_dummy(info)

    # Create the full input information, including defaults for each module.
    from cobaya.input import get_full_info
    full_info = get_full_info(info)
    if logging.root.getEffectiveLevel() <= logging.DEBUG:
        # Don't dump unless we are doing output, just in case something not serializable
        # May be fixed in the future if we find a way to serialize external functions
        if info.get(_output_prefix):
            logging.getLogger(__name__.split(".")[-1]).debug(
                "Updated info (dumped to YAML):\n%s", yaml_dump(full_info))
    # We dump the info now, before modules initialization, lest it is accidentaly modified
    output.dump_info(info, full_info)

    # Set the path of the installed modules, if given
    from cobaya.tools import set_path_to_installation
    set_path_to_installation(info.get(_path_install))

    # Initialise parametrization, likelihoods and prior
    from cobaya.parametrization import Parametrization
    with Parametrization(full_info[_params]) as par:
        with Prior(par, full_info.get(_prior)) as prior:
            with Likelihood(full_info[_likelihood], par,
                            full_info.get(_theory)) as lik:
                with Sampler(full_info[_sampler], par, prior, lik,
                             output) as sampler:
                    sampler.run()

    # For scripted calls
    return deepcopy(full_info), sampler.products()
Esempio n. 14
0
def install_script():
    set_mpi_disabled(True)
    warn_deprecation()
    # Parse arguments
    import argparse
    parser = argparse.ArgumentParser(
        description="Cobaya's installation tool for external packages.")
    parser.add_argument(
        "files_or_components",
        action="store",
        nargs="+",
        metavar="input_file.yaml|component_name",
        help="One or more input files or component names "
        "(or simply 'cosmo' to install all the requisites for basic"
        " cosmological runs)")
    parser.add_argument(
        "-" + _packages_path_arg[0],
        "--" + _packages_path_arg_posix,
        action="store",
        nargs=1,
        required=False,
        metavar="/packages/path",
        default=[None],
        help="Desired path where to install external packages. "
        "Optional if one has been set globally or as an env variable"
        " (run with '--show_%s' to check)." % _packages_path_arg_posix)
    # MARKED FOR DEPRECATION IN v3.0
    modules = "modules"
    parser.add_argument("-" + modules[0],
                        "--" + modules,
                        action="store",
                        nargs=1,
                        required=False,
                        metavar="/packages/path",
                        default=[None],
                        help="To be deprecated! "
                        "Alias for %s, which should be used instead." %
                        _packages_path_arg_posix)
    # END OF DEPRECATION BLOCK -- CONTINUES BELOW!
    output_show_packages_path = resolve_packages_path()
    if output_show_packages_path and os.environ.get(_packages_path_env):
        output_show_packages_path += " (from env variable %r)" % _packages_path_env
    elif output_show_packages_path:
        output_show_packages_path += " (from config file)"
    else:
        output_show_packages_path = "(Not currently set.)"
    parser.add_argument(
        "--show-" + _packages_path_arg_posix,
        action="version",
        version=output_show_packages_path,
        help="Prints default external packages installation folder "
        "and exits.")
    parser.add_argument(
        "-" + _force[0],
        "--" + _force,
        action="store_true",
        default=False,
        help="Force re-installation of apparently installed packages.")
    parser.add_argument(
        "--skip",
        action="store",
        nargs="*",
        metavar="keyword",
        help="Keywords of components that will be skipped during "
        "installation.")
    parser.add_argument(
        "--no-progress-bars",
        action="store_true",
        default=False,
        help="No progress bars shown. Shorter logs (used in Travis).")
    parser.add_argument("--just-check",
                        action="store_true",
                        default=False,
                        help="Just check whether components are installed.")
    parser.add_argument(
        "--no-set-global",
        action="store_true",
        default=False,
        help="Do not store the installation path for later runs.")
    group_just = parser.add_mutually_exclusive_group(required=False)
    group_just.add_argument("-C",
                            "--just-code",
                            action="store_false",
                            default=True,
                            help="Install code of the components.",
                            dest=_data)
    group_just.add_argument("-D",
                            "--just-data",
                            action="store_false",
                            default=True,
                            help="Install data of the components.",
                            dest=_code)
    arguments = parser.parse_args()
    # Configure the logger ASAP
    logger_setup()
    logger = logging.getLogger(__name__.split(".")[-1])
    # Gather requests
    infos = []
    for f in arguments.files_or_components:
        if f.lower() == "cosmo":
            logger.info("Installing basic cosmological packages.")
            from cobaya.cosmo_input import install_basic
            infos += [install_basic]
        elif f.lower() == "cosmo-tests":
            logger.info("Installing *tested* cosmological packages.")
            from cobaya.cosmo_input import install_tests
            infos += [install_tests]
        elif os.path.splitext(f)[1].lower() in _yaml_extensions:
            from cobaya.input import load_input
            infos += [load_input(f)]
        else:
            try:
                kind = get_kind(f)
                infos += [{kind: {f: None}}]
            except Exception:
                logger.warning("Could not identify component %r. Skipping.", f)
    if not infos:
        logger.info("Nothing to install.")
        return
    # MARKED FOR DEPRECATION IN v3.0
    if getattr(arguments, modules) != [None]:
        logger.warning(
            "*DEPRECATION*: -m/--modules will be deprecated in favor of "
            "-%s/--%s in the next version. Please, use that one instead.",
            _packages_path_arg[0], _packages_path_arg_posix)
        # BEHAVIOUR TO BE REPLACED BY ERROR:
        if getattr(arguments, _packages_path_arg) == [None]:
            setattr(arguments, _packages_path_arg, getattr(arguments, modules))
    # END OF DEPRECATION BLOCK
    # Launch installer
    install(*infos,
            path=getattr(arguments, _packages_path_arg)[0],
            **{
                arg: getattr(arguments, arg)
                for arg in [
                    "force", _code, _data, "no_progress_bars", "just_check",
                    "no_set_global", "skip"
                ]
            })
Esempio n. 15
0
def install(*infos, **kwargs):
    if not log.root.handlers:
        logger_setup()
    path = kwargs.get("path")
    if not path:
        path = resolve_packages_path(infos)
    if not path:
        raise LoggedError(
            log,
            "No 'path' argument given, and none could be found in input infos "
            "(as %r), the %r env variable or the config file. "
            "Maybe specify one via a command line argument '-%s [...]'?",
            _packages_path, _packages_path_env, _packages_path_arg[0])
    abspath = os.path.abspath(path)
    log.info("Installing external packages at '%s'", abspath)
    kwargs_install = {
        "force": kwargs.get("force", False),
        "no_progress_bars": kwargs.get("no_progress_bars")
    }
    for what in (_code, _data):
        kwargs_install[what] = kwargs.get(what, True)
        spath = os.path.join(abspath, what)
        if kwargs_install[what] and not os.path.exists(spath):
            try:
                os.makedirs(spath)
            except OSError:
                raise LoggedError(
                    log,
                    "Could not create the desired installation folder '%s'",
                    spath)
    failed_components = []
    skip_keywords = set(kwargs.get("skip", []) or [])
    skip_keywords_env = set(
        os.environ.get(_install_skip_env, "").replace(",",
                                                      " ").lower().split())
    skip_keywords = skip_keywords.union(skip_keywords_env)
    for kind, components in get_used_components(*infos).items():
        for component in components:
            print()
            print(create_banner(kind + ":" + component, symbol="=", length=80))
            print()
            if _skip_helper(component.lower(), skip_keywords,
                            skip_keywords_env, log):
                continue
            info = (next(
                info for info in infos
                if component in info.get(kind, {}))[kind][component]) or {}
            if isinstance(info, str) or _external in info:
                log.warning(
                    "Component '%s' is a custom function. "
                    "Nothing to do.", component)
                continue
            try:
                imported_class = get_class(component,
                                           kind,
                                           component_path=info.pop(
                                               _component_path, None))
            except ImportError as e:
                log.error("Component '%s' not recognized. [%s]." %
                          (component, e))
                failed_components += ["%s:%s" % (kind, component)]
                continue
            else:
                if _skip_helper(imported_class.__name__.lower(), skip_keywords,
                                skip_keywords_env, log):
                    continue
            is_installed = getattr(imported_class, "is_installed", None)
            if is_installed is None:
                log.info("%s.%s is a fully built-in component: nothing to do.",
                         kind, imported_class.__name__)
                continue
            if is_installed(path=abspath, **kwargs_install):
                log.info("External component already installed.")
                if kwargs.get("just_check", False):
                    continue
                if kwargs_install["force"]:
                    log.info("Forcing re-installation, as requested.")
                else:
                    log.info("Doing nothing.")
                    continue
            else:
                if kwargs.get("just_check", False):
                    log.info("NOT INSTALLED!")
                    continue
            try:
                install_this = getattr(imported_class, "install", None)
                success = install_this(path=abspath, **kwargs_install)
            except KeyboardInterrupt:
                raise
            except:
                traceback.print_exception(*sys.exc_info(), file=sys.stdout)
                log.error(
                    "An unknown error occurred. Delete the external packages "
                    "folder %r and try again. "
                    "Please, notify the developers if this error persists.",
                    abspath)
                success = False
            if success:
                log.info("Successfully installed!")
            else:
                log.error(
                    "Installation failed! Look at the error messages above. "
                    "Solve them and try again, or, if you are unable to solve, "
                    "install the packages required by this component manually."
                )
                failed_components += ["%s:%s" % (kind, component)]
                continue
            # test installation
            if not is_installed(path=abspath, **kwargs_install):
                log.error(
                    "Installation apparently worked, "
                    "but the subsequent installation test failed! "
                    "Look at the error messages above. "
                    "Solve them and try again, or, if you are unable to solve, "
                    "install the packages required by this component manually."
                )
                failed_components += ["%s:%s" % (kind, component)]
    if failed_components:
        bullet = "\n - "
        raise LoggedError(
            log,
            "The installation (or installation test) of some component(s) has "
            "failed: %s\nCheck output of the installer of each component above "
            "for precise error info.\n",
            bullet + bullet.join(failed_components))
    # Set the installation path in the global config file
    if not kwargs.get("no_set_global", False) and not kwargs.get(
            "just_check", False):
        write_packages_path_in_config_file(abspath)
        log.info(
            "The installation path has been written in the global config file."
        )
Esempio n. 16
0
def post(info, sample=None):
    logger_setup(info.get(_debug), info.get(_debug_file))
    log = logging.getLogger(__name__.split(".")[-1])
    try:
        info_post = info[_post]
    except KeyError:
        log.error("No 'post' block given. Nothing to do!")
        raise HandledException
    if get_mpi_rank():
        log.warning(
            "Post-processing is not yet MPI-able. Doing nothing for rank > 1 processes."
        )
        return
    # 1. Load existing sample
    output_in = Output(output_prefix=info.get(_output_prefix), resume=True)
    info_in = load_input(output_in.file_full) if output_in else deepcopy(info)
    dummy_model_in = DummyModel(info_in[_params], info_in[_likelihood],
                                info_in.get(_prior, None),
                                info_in.get(_theory, None))
    if output_in:
        i = 0
        while True:
            try:
                collection = Collection(dummy_model_in,
                                        output_in,
                                        name="%d" % (1 + i),
                                        load=True,
                                        onload_skip=info_post.get("skip", 0),
                                        onload_thin=info_post.get("thin", 1))
                if i == 0:
                    collection_in = collection
                else:
                    collection_in._append(collection)
                i += 1
            except IOError:
                break
    elif sample:
        if isinstance(sample, Collection):
            sample = [sample]
        collection_in = deepcopy(sample[0])
        for s in sample[1:]:
            try:
                collection_in._append(s)
            except:
                log.error("Failed to load some of the input samples.")
                raise HandledException
        i = len(sample)
    else:
        log.error(
            "Not output from where to load from or input collections given.")
        raise HandledException
    log.info("Loaded %d chain%s. Will process %d samples.", i,
             "s" if i - 1 else "", collection_in.n())
    if collection_in.n() <= 1:
        log.error(
            "Not enough samples for post-processing. Try using a larger sample, "
            "or skipping or thinning less.")
        raise HandledException
    # 2. Compare old and new info: determine what to do
    add = info_post.get("add", {})
    remove = info_post.get("remove", {})
    # Add a dummy 'one' likelihood, to absorb unused parameters
    if not add.get(_likelihood):
        add[_likelihood] = odict()
    add[_likelihood].update({"one": None})
    # Expand the "add" info
    add = get_full_info(add)
    # 2.1 Adding/removing derived parameters and changes in priors of sampled parameters
    out = {_params: deepcopy(info_in[_params])}
    for p in remove.get(_params, {}):
        pinfo = info_in[_params].get(p)
        if pinfo is None or not is_derived_param(pinfo):
            log.error(
                "You tried to remove parameter '%s', which is not a derived paramter. "
                "Only derived parameters can be removed during post-processing.",
                p)
            raise HandledException
        out[_params].pop(p)
    mlprior_names_add = []
    for p, pinfo in add.get(_params, {}).items():
        pinfo_in = info_in[_params].get(p)
        if is_sampled_param(pinfo):
            if not is_sampled_param(pinfo_in):
                # No added sampled parameters (de-marginalisation not implemented)
                if pinfo_in is None:
                    log.error(
                        "You added a new sampled parameter %r (maybe accidentaly "
                        "by adding a new likelihood that depends on it). "
                        "Adding new sampled parameters is not possible. Try fixing "
                        "it to some value.", p)
                    raise HandledException
                else:
                    log.error(
                        "You tried to change the prior of parameter '%s', "
                        "but it was not a sampled parameter. "
                        "To change that prior, you need to define as an external one.",
                        p)
                    raise HandledException
            if mlprior_names_add[:1] != _prior_1d_name:
                mlprior_names_add = (
                    [_minuslogprior + _separator + _prior_1d_name] +
                    mlprior_names_add)
        elif is_derived_param(pinfo):
            if p in out[_params]:
                log.error(
                    "You tried to add derived parameter '%s', which is already "
                    "present. To force its recomputation, 'remove' it too.", p)
                raise HandledException
        elif is_fixed_param(pinfo):
            # Only one possibility left "fixed" parameter that was not present before:
            # input of new likelihood, or just an argument for dynamical derived (dropped)
            if ((p in info_in[_params] and pinfo[_p_value] !=
                 (pinfo_in or {}).get(_p_value, None))):
                log.error(
                    "You tried to add a fixed parameter '%s: %r' that was already present"
                    " but had a different value or was not fixed. This is not allowed. "
                    "The old info of the parameter was '%s: %r'", p,
                    dict(pinfo), p, dict(pinfo_in))
                raise HandledException
        else:
            log.error("This should not happen. Contact the developers.")
            raise HandledException
        out[_params][p] = pinfo
    # For the likelihood only, turn the rest of *derived* parameters into constants,
    # so that the likelihoods do not try to compute them)
    # But be careful to exclude *input* params that have a "derived: True" value
    # (which in "full info" turns into "derived: 'lambda [x]: [x]'")
    out_params_like = deepcopy(out[_params])
    for p, pinfo in out_params_like.items():
        if ((is_derived_param(pinfo) and not (_p_value in pinfo)
             and p not in add.get(_params, {}))):
            out_params_like[p] = {_p_value: np.nan, _p_drop: True}
    parameterization_like = Parameterization(out_params_like,
                                             ignore_unused_sampled=True)
    # 2.2 Manage adding/removing priors and likelihoods
    warn_remove = False
    for level in [_prior, _likelihood]:
        out[level] = getattr(dummy_model_in, level)
        if level == _prior:
            out[level].remove(_prior_1d_name)
        for pdf in info_post.get("remove", {}).get(level, []) or []:
            try:
                out[level].remove(pdf)
                warn_remove = True
            except ValueError:
                log.error(
                    "Trying to remove %s '%s', but it is not present. "
                    "Existing ones: %r", level, pdf, out[level])
                raise HandledException
    if warn_remove:
        log.warning("You are removing a prior or likelihood pdf. "
                    "Notice that if the resulting posterior is much wider "
                    "than the original one, or displaced enough, "
                    "it is probably safer to explore it directly.")
    if _prior in add:
        mlprior_names_add += [
            _minuslogprior + _separator + name for name in add[_prior]
        ]
        out[_prior] += list(add[_prior])
    prior_recompute_1d = (mlprior_names_add[:1] == [
        _minuslogprior + _separator + _prior_1d_name
    ])
    # Don't initialise the theory code if not adding/recomputing theory,
    # theory-derived params or likelihoods
    recompute_theory = info_in.get(_theory) and not (list(
        add[_likelihood]) == ["one"] and not any([
            is_derived_param(pinfo) for pinfo in add.get(_params, {}).values()
        ]))
    if recompute_theory:
        # Inherit from the original chain (needs input|output_params, renames, etc
        theory = list(info_in[_theory].keys())[0]
        info_theory_out = odict([[
            theory,
            recursive_update(deepcopy(info_in[_theory][theory]),
                             add.get(_theory, {theory: {}})[theory])
        ]])
    else:
        info_theory_out = None
    chi2_names_add = [
        _chi2 + _separator + name for name in add[_likelihood]
        if name is not "one"
    ]
    out[_likelihood] += [l for l in add[_likelihood] if l is not "one"]
    if recompute_theory:
        log.warn(
            "You are recomputing the theory, but in the current version this does "
            "not force recomputation of any likelihood or derived parameter, "
            "unless explicitly removed+added.")
    for level in [_prior, _likelihood]:
        for i, x_i in enumerate(out[level]):
            if x_i in list(out[level])[i + 1:]:
                log.error(
                    "You have added %s '%s', which was already present. If you "
                    "want to force its recomputation, you must also 'remove' it.",
                    level, x_i)
                raise HandledException
    # 3. Create output collection
    if "suffix" not in info_post:
        log.error("You need to provide a 'suffix' for your chains.")
        raise HandledException
    # Use default prefix if it exists. If it does not, produce no output by default.
    # {post: {output: None}} suppresses output, and if it's a string, updates it.
    out_prefix = info_post.get(_output_prefix, info.get(_output_prefix))
    if out_prefix not in [None, False]:
        out_prefix += "_" + _post + "_" + info_post["suffix"]
    output_out = Output(output_prefix=out_prefix,
                        force_output=info.get(_force))
    info_out = deepcopy(info)
    info_out[_post] = info_post
    # Updated with input info and extended (full) add info
    info_out.update(info_in)
    info_out[_post]["add"] = add
    dummy_model_out = DummyModel(out[_params],
                                 out[_likelihood],
                                 info_prior=out[_prior])
    if recompute_theory:
        theory = list(info_theory_out.keys())[0]
        if _input_params not in info_theory_out[theory]:
            log.error(
                "You appear to be post-processing a chain generated with an older "
                "version of Cobaya. For post-processing to work, please edit the "
                "'[root]__full.info' file of the original chain to add, inside the "
                "theory code block, the list of its input parameters. E.g.\n----\n"
                "theory:\n  %s:\n    input_params: [param1, param2, ...]\n"
                "----\nIf you get strange errors later, it is likely that you did not "
                "specify the correct set of theory parameters.\n"
                "The full set of input parameters are %s.", theory,
                list(dummy_model_out.parameterization.input_params()))
            raise HandledException
    prior_add = Prior(dummy_model_out.parameterization, add.get(_prior))
    likelihood_add = Likelihood(add[_likelihood],
                                parameterization_like,
                                info_theory=info_theory_out,
                                modules=info.get(_path_install))
    # Remove auxiliary "one" before dumping -- 'add' *is* info_out[_post]["add"]
    add[_likelihood].pop("one")
    if likelihood_add.theory:
        # Make sure that theory.needs is called at least once, for adjustments
        likelihood_add.theory.needs()
    collection_out = Collection(dummy_model_out, output_out, name="1")
    output_out.dump_info({}, info_out)
    # 4. Main loop!
    log.info("Running post-processing...")
    last_percent = 0
    for i, point in enumerate(collection_in.data.itertuples()):
        log.debug("Point: %r", point)
        sampled = [
            getattr(point, param)
            for param in dummy_model_in.parameterization.sampled_params()
        ]
        derived = odict(
            [[param, getattr(point, param, None)]
             for param in dummy_model_out.parameterization.derived_params()])
        inputs = odict([[
            param,
            getattr(
                point, param,
                dummy_model_in.parameterization.constant_params().get(
                    param,
                    dummy_model_out.parameterization.constant_params().get(
                        param, None)))
        ] for param in dummy_model_out.parameterization.input_params()])
        # Solve inputs that depend on a function and were not saved
        # (we don't use the Parameterization_to_input method in case there are references
        #  to functions that cannot be loaded at the moment)
        for p, value in inputs.items():
            if value is None:
                func = dummy_model_out.parameterization._input_funcs[p]
                args = dummy_model_out.parameterization._input_args[p]
                inputs[p] = func(*[getattr(point, arg) for arg in args])
        # Add/remove priors
        priors_add = prior_add.logps(sampled)
        if not prior_recompute_1d:
            priors_add = priors_add[1:]
        logpriors_add = odict(zip(mlprior_names_add, priors_add))
        logpriors_new = [
            logpriors_add.get(name, -getattr(point, name, 0))
            for name in collection_out.minuslogprior_names
        ]
        if log.getEffectiveLevel() <= logging.DEBUG:
            log.debug("New set of priors: %r",
                      dict(zip(dummy_model_out.prior, logpriors_new)))
        if -np.inf in logpriors_new:
            continue
        # Add/remove likelihoods
        output_like = []
        if likelihood_add:
            # Notice "one" (last in likelihood_add) is ignored: not in chi2_names
            loglikes_add = odict(
                zip(chi2_names_add,
                    likelihood_add.logps(inputs, _derived=output_like)))
            output_like = dict(zip(likelihood_add.output_params, output_like))
        else:
            loglikes_add = dict()
        loglikes_new = [
            loglikes_add.get(name, -0.5 * getattr(point, name, 0))
            for name in collection_out.chi2_names
        ]
        if log.getEffectiveLevel() <= logging.DEBUG:
            log.debug("New set of likelihoods: %r",
                      dict(zip(dummy_model_out.likelihood, loglikes_new)))
            if output_like:
                log.debug("New set of likelihood-derived parameters: %r",
                          output_like)
        if -np.inf in loglikes_new:
            continue
        # Add/remove derived parameters and change priors of sampled parameters
        for p in add[_params]:
            if p in dummy_model_out.parameterization._directly_output:
                derived[p] = output_like[p]
            elif p in dummy_model_out.parameterization._derived_funcs:
                func = dummy_model_out.parameterization._derived_funcs[p]
                args = dummy_model_out.parameterization._derived_args[p]
                derived[p] = func(*[
                    getattr(point, arg, output_like.get(arg, None))
                    for arg in args
                ])
        if log.getEffectiveLevel() <= logging.DEBUG:
            log.debug(
                "New derived parameters: %r",
                dict([[
                    p, derived[p]
                ] for p in dummy_model_out.parameterization.derived_params()
                      if p in add[_params]]))
        # Save to the collection (keep old weight for now)
        collection_out.add(sampled,
                           derived=derived.values(),
                           weight=getattr(point, _weight),
                           logpriors=logpriors_new,
                           loglikes=loglikes_new)
        # Display progress
        percent = np.round(i / collection_in.n() * 100)
        if percent != last_percent and not percent % 5:
            last_percent = percent
            progress_bar(log, percent, " (%d/%d)" % (i, collection_in.n()))
    if not collection_out.data.last_valid_index():
        log.error(
            "No elements in the final sample. Possible causes: "
            "added a prior or likelihood valued zero over the full sampled domain, "
            "or the computation of the theory failed everywhere, etc.")
        raise HandledException
    # Reweight -- account for large dynamic range!
    #   Prefer to rescale +inf to finite, and ignore final points with -inf.
    #   Remove -inf's (0-weight), and correct indices
    difflogmax = max(collection_in[_minuslogpost] -
                     collection_out[_minuslogpost])
    collection_out.data[_weight] *= np.exp(collection_in[_minuslogpost] -
                                           collection_out[_minuslogpost] -
                                           difflogmax)
    collection_out.data = (
        collection_out.data[collection_out.data.weight > 0].reset_index(
            drop=True))
    collection_out._n = collection_out.data.last_valid_index() + 1
    # Write!
    collection_out._out_update()
    log.info("Finished! Final number of samples: %d", collection_out.n())
    return info_out, {"sample": collection_out}
Esempio n. 17
0
def install_script(args=None):
    set_mpi_disabled()
    warn_deprecation()
    # Parse arguments
    import argparse
    parser = argparse.ArgumentParser(
        prog="cobaya install",
        description="Cobaya's installation tool for external packages.")
    parser.add_argument(
        "files_or_components",
        action="store",
        nargs="+",
        metavar="input_file.yaml|component_name",
        help="One or more input files or component names "
        "(or simply 'cosmo' to install all the requisites for basic"
        " cosmological runs)")
    parser.add_argument(
        "-" + packages_path_arg[0],
        "--" + packages_path_arg_posix,
        action="store",
        required=False,
        metavar="/packages/path",
        default=None,
        help="Desired path where to install external packages. "
        "Optional if one has been set globally or as an env variable"
        " (run with '--show_%s' to check)." % packages_path_arg_posix)
    # MARKED FOR DEPRECATION IN v3.0
    modules = "modules"
    parser.add_argument("-" + modules[0],
                        "--" + modules,
                        action="store",
                        required=False,
                        metavar="/packages/path",
                        default=None,
                        help="Deprecated! Use %s instead." %
                        packages_path_arg_posix)
    # END OF DEPRECATION BLOCK -- CONTINUES BELOW!
    output_show_packages_path = resolve_packages_path()
    if output_show_packages_path and os.environ.get(packages_path_env):
        output_show_packages_path += " (from env variable %r)" % packages_path_env
    elif output_show_packages_path:
        output_show_packages_path += " (from config file)"
    else:
        output_show_packages_path = "(Not currently set.)"
    parser.add_argument(
        "--show-" + packages_path_arg_posix,
        action="version",
        version=output_show_packages_path,
        help="Prints default external packages installation folder "
        "and exits.")
    parser.add_argument(
        "-" + "f",
        "--" + "force",
        action="store_true",
        default=False,
        help="Force re-installation of apparently installed packages.")
    parser.add_argument(
        "--skip",
        action="store",
        nargs="*",
        metavar="keyword",
        help="Keywords of components that will be skipped during "
        "installation.")
    parser.add_argument(
        "--no-progress-bars",
        action="store_true",
        default=False,
        help="No progress bars shown. Shorter logs (used in Travis).")
    parser.add_argument("--%s" % "test",
                        action="store_true",
                        default=False,
                        help="Just check whether components are installed.")
    # MARKED FOR DEPRECATION IN v3.0
    parser.add_argument("--just-check",
                        action="store_true",
                        default=False,
                        help="Just check whether components are installed.")
    # END OF DEPRECATION BLOCK -- CONTINUES BELOW!
    parser.add_argument(
        "--no-set-global",
        action="store_true",
        default=False,
        help="Do not store the installation path for later runs.")
    parser.add_argument(
        "--skip-global",
        action="store_true",
        default=False,
        help="Skip installation of already-available Python modules.")
    parser.add_argument("-" + "d",
                        "--" + "debug",
                        action="store_true",
                        help="Produce verbose debug output.")
    group_just = parser.add_mutually_exclusive_group(required=False)
    group_just.add_argument("-C",
                            "--just-code",
                            action="store_false",
                            default=True,
                            help="Install code of the components.",
                            dest=data_path)
    group_just.add_argument("-D",
                            "--just-data",
                            action="store_false",
                            default=True,
                            help="Install data of the components.",
                            dest=code_path)
    arguments = parser.parse_args(args)
    # Configure the logger ASAP
    logger_setup()
    logger = get_logger("install")
    # Gather requests
    infos: List[InputDict] = []
    for f in arguments.files_or_components:
        if f.lower() == "cosmo":
            logger.info("Installing basic cosmological packages.")
            from cobaya.cosmo_input import install_basic
            infos += [install_basic]
        elif f.lower() == "cosmo-tests":
            logger.info("Installing *tested* cosmological packages.")
            from cobaya.cosmo_input import install_tests
            infos += [install_tests]
        elif os.path.splitext(f)[1].lower() in Extension.yamls:
            from cobaya.input import load_input
            infos += [load_input(f)]
        else:
            try:
                kind = get_kind(f)
                infos += [{kind: {f: None}}]
            except Exception:
                logger.warning("Could not identify component %r. Skipping.", f)
    if not infos:
        logger.info("Nothing to install.")
        return
    # List of deprecation warnings, to be printed *after* installation
    deprecation_warnings = []
    # MARKED FOR DEPRECATION IN v3.0
    if getattr(arguments, modules) is not None:
        raise LoggedError(
            logger, "-m/--modules has been deprecated in favor of "
            "-%s/--%s", packages_path_arg[0], packages_path_arg_posix)
    # END OF DEPRECATION BLOCK
    # MARKED FOR DEPRECATION IN v3.0
    if arguments.just_check is True:
        raise LoggedError(logger,
                          "--just-check has been deprecated in favor of --%s",
                          "test")
    # END OF DEPRECATION BLOCK
    # Launch installer
    install(*infos,
            path=getattr(arguments, packages_path_arg),
            **{
                arg: getattr(arguments, arg)
                for arg in [
                    "force", code_path, data_path, "no_progress_bars", "test",
                    "no_set_global", "skip", "skip_global", "debug"
                ]
            })
    # MARKED FOR DEPRECATION IN v3.0
    for warning_msg in deprecation_warnings:
        logger.warning(warning_msg)
Esempio n. 18
0
def post(info, sample=None):
    logger_setup(info.get(_debug), info.get(_debug_file))
    log = logging.getLogger(__name__.split(".")[-1])
    # MARKED FOR DEPRECATION IN v3.0
    # BEHAVIOUR TO BE REPLACED BY ERROR:
    check_deprecated_modules_path(info)
    # END OF DEPRECATION BLOCK
    try:
        info_post = info[_post]
    except KeyError:
        raise LoggedError(log, "No 'post' block given. Nothing to do!")
    if get_mpi_rank():
        log.warning(
            "Post-processing is not yet MPI-aware. Doing nothing for rank > 1 processes.")
        return
    if info.get(_resume):
        log.warning("Resuming not implemented for post-processing. Re-starting.")
    # 1. Load existing sample
    output_in = get_output(output_prefix=info.get(_output_prefix))
    if output_in:
        try:
            info_in = output_in.reload_updated_info()
        except FileNotFoundError:
            raise LoggedError(log, "Error loading input model: "
                                   "could not find input info at %s",
                              output_in.file_updated)
    else:
        info_in = deepcopy_where_possible(info)
    dummy_model_in = DummyModel(info_in[_params], info_in[kinds.likelihood],
                                info_in.get(_prior, None))
    if output_in:
        if not output_in.find_collections():
            raise LoggedError(log, "No samples found for the input model with prefix %s",
                              os.path.join(output_in.folder, output_in.prefix))
        collection_in = output_in.load_collections(
            dummy_model_in, skip=info_post.get("skip", 0), thin=info_post.get("thin", 1),
            concatenate=True)
    elif sample:
        if isinstance(sample, Collection):
            sample = [sample]
        collection_in = deepcopy(sample[0])
        for s in sample[1:]:
            try:
                collection_in.append(s)
            except:
                raise LoggedError(log, "Failed to load some of the input samples.")
    else:
        raise LoggedError(log,
                          "Not output from where to load from or input collections given.")
    log.info("Will process %d samples.", len(collection_in))
    if len(collection_in) <= 1:
        raise LoggedError(
            log, "Not enough samples for post-processing. Try using a larger sample, "
                 "or skipping or thinning less.")
    # 2. Compare old and new info: determine what to do
    add = info_post.get(_post_add, {}) or {}
    remove = info_post.get(_post_remove, {})
    # Add a dummy 'one' likelihood, to absorb unused parameters
    if not add.get(kinds.likelihood):
        add[kinds.likelihood] = {}
    add[kinds.likelihood]["one"] = None
    # Expand the "add" info
    add = update_info(add)
    # 2.1 Adding/removing derived parameters and changes in priors of sampled parameters
    out = {_params: deepcopy_where_possible(info_in[_params])}
    for p in remove.get(_params, {}):
        pinfo = info_in[_params].get(p)
        if pinfo is None or not is_derived_param(pinfo):
            raise LoggedError(
                log,
                "You tried to remove parameter '%s', which is not a derived parameter. "
                "Only derived parameters can be removed during post-processing.", p)
        out[_params].pop(p)
    # Force recomputation of aggregated chi2
    for p in list(out[_params]):
        if p.startswith(_get_chi2_name("")):
            out[_params].pop(p)
    mlprior_names_add = []
    for p, pinfo in add.get(_params, {}).items():
        pinfo_in = info_in[_params].get(p)
        if is_sampled_param(pinfo):
            if not is_sampled_param(pinfo_in):
                # No added sampled parameters (de-marginalisation not implemented)
                if pinfo_in is None:
                    raise LoggedError(
                        log, "You added a new sampled parameter %r (maybe accidentally "
                             "by adding a new likelihood that depends on it). "
                             "Adding new sampled parameters is not possible. Try fixing "
                             "it to some value.", p)
                else:
                    raise LoggedError(
                        log,
                        "You tried to change the prior of parameter '%s', "
                        "but it was not a sampled parameter. "
                        "To change that prior, you need to define as an external one.", p)
            if mlprior_names_add[:1] != _prior_1d_name:
                mlprior_names_add = ([_minuslogprior + _separator + _prior_1d_name]
                                     + mlprior_names_add)
        elif is_derived_param(pinfo):
            if p in out[_params]:
                raise LoggedError(
                    log, "You tried to add derived parameter '%s', which is already "
                         "present. To force its recomputation, 'remove' it too.", p)
        elif is_fixed_param(pinfo):
            # Only one possibility left "fixed" parameter that was not present before:
            # input of new likelihood, or just an argument for dynamical derived (dropped)
            if ((p in info_in[_params] and
                 pinfo[partag.value] != (pinfo_in or {}).get(partag.value, None))):
                raise LoggedError(
                    log,
                    "You tried to add a fixed parameter '%s: %r' that was already present"
                    " but had a different value or was not fixed. This is not allowed. "
                    "The old info of the parameter was '%s: %r'",
                    p, dict(pinfo), p, dict(pinfo_in))
        else:
            raise LoggedError(log, "This should not happen. Contact the developers.")
        out[_params][p] = pinfo
    # For the likelihood only, turn the rest of *derived* parameters into constants,
    # so that the likelihoods do not try to compute them)
    # But be careful to exclude *input* params that have a "derived: True" value
    # (which in "updated info" turns into "derived: 'lambda [x]: [x]'")
    out_params_like = deepcopy_where_possible(out[_params])
    for p, pinfo in out_params_like.items():
        if ((is_derived_param(pinfo) and not (partag.value in pinfo)
             and p not in add.get(_params, {}))):
            out_params_like[p] = {partag.value: np.nan, partag.drop: True}
    # 2.2 Manage adding/removing priors and likelihoods
    warn_remove = False
    for level in [_prior, kinds.likelihood]:
        out[level] = getattr(dummy_model_in, level)
        if level == _prior:
            out[level].remove(_prior_1d_name)
        for pdf in info_post.get(_post_remove, {}).get(level, []) or []:
            try:
                out[level].remove(pdf)
                warn_remove = True
            except ValueError:
                raise LoggedError(
                    log, "Trying to remove %s '%s', but it is not present. "
                         "Existing ones: %r", level, pdf, out[level])
    if warn_remove:
        log.warning("You are removing a prior or likelihood pdf. "
                    "Notice that if the resulting posterior is much wider "
                    "than the original one, or displaced enough, "
                    "it is probably safer to explore it directly.")
    if _prior in add:
        mlprior_names_add += [_minuslogprior + _separator + name for name in add[_prior]]
        out[_prior] += list(add[_prior])
    prior_recompute_1d = (
            mlprior_names_add[:1] == [_minuslogprior + _separator + _prior_1d_name])
    # Don't initialise the theory code if not adding/recomputing theory,
    # theory-derived params or likelihoods
    recompute_theory = info_in.get(kinds.theory) and not (
            list(add[kinds.likelihood]) == ["one"] and
            not any(is_derived_param(pinfo) for pinfo in add.get(_params, {}).values()))
    if recompute_theory:
        # Inherit from the original chain (needs input|output_params, renames, etc
        add_theory = add.get(kinds.theory)
        if add_theory:
            info_theory_out = {}
            if len(add_theory) > 1:
                log.warning('Importance sampling with more than one theory is '
                            'not really tested')
            add_theory = add_theory.copy()
            for theory, theory_info in info_in[kinds.theory].items():
                theory_copy = deepcopy_where_possible(theory_info)
                if theory in add_theory:
                    info_theory_out[theory] = \
                        recursive_update(theory_copy, add_theory.pop(theory))
                else:
                    info_theory_out[theory] = theory_copy
            info_theory_out.update(add_theory)
        else:
            info_theory_out = deepcopy_where_possible(info_in[kinds.theory])
    else:
        info_theory_out = None
    chi2_names_add = [
        _get_chi2_name(name) for name in add[kinds.likelihood] if name != "one"]
    out[kinds.likelihood] += [l for l in add[kinds.likelihood] if l != "one"]
    if recompute_theory:
        log.warning("You are recomputing the theory, but in the current version this does"
                    " not force recomputation of any likelihood or derived parameter, "
                    "unless explicitly removed+added.")
    for level in [_prior, kinds.likelihood]:
        for i, x_i in enumerate(out[level]):
            if x_i in list(out[level])[i + 1:]:
                raise LoggedError(
                    log, "You have added %s '%s', which was already present. If you "
                         "want to force its recomputation, you must also 'remove' it.",
                    level, x_i)
    # 3. Create output collection
    if _post_suffix not in info_post:
        raise LoggedError(log, "You need to provide a '%s' for your chains.",
                          _post_suffix)
    # Use default prefix if it exists. If it does not, produce no output by default.
    # {post: {output: None}} suppresses output, and if it's a string, updates it.
    out_prefix = info_post.get(_output_prefix, info.get(_output_prefix))
    if out_prefix not in [None, False]:
        out_prefix += _separator_files + _post + _separator_files + info_post[
            _post_suffix]
    output_out = get_output(output_prefix=out_prefix, force=info.get(_force))
    if output_out and not output_out.force and output_out.find_collections():
        raise LoggedError(log, "Found existing post-processing output with prefix %r. "
                               "Delete it manually or re-run with `force: True` "
                               "(or `-f`, `--force` from the shell).", out_prefix)
    elif output_out and output_out.force:
        output_out.delete_infos()
        for regexp in output_out.find_collections():
            output_out.delete_with_regexp(re.compile(regexp))
    info_out = deepcopy_where_possible(info)
    info_out[_post] = info_post
    # Updated with input info and extended (updated) add info
    info_out.update(info_in)
    info_out[_post][_post_add] = add
    dummy_model_out = DummyModel(out[_params], out[kinds.likelihood],
                                 info_prior=out[_prior])
    if recompute_theory:
        # TODO: May need updating for more than one, or maybe can be removed
        theory = list(info_theory_out)[0]
        if _input_params not in info_theory_out[theory]:
            raise LoggedError(
                log,
                "You appear to be post-processing a chain generated with an older "
                "version of Cobaya. For post-processing to work, please edit the "
                "'[root].updated.yaml' file of the original chain to add, inside the "
                "theory code block, the list of its input parameters. E.g.\n----\n"
                "theory:\n  %s:\n    input_params: [param1, param2, ...]\n"
                "----\nIf you get strange errors later, it is likely that you did not "
                "specify the correct set of theory parameters.\n"
                "The full set of input parameters are %s.",
                theory, list(dummy_model_out.parameterization.input_params()))
    # TODO: check allow_renames=False?
    # TODO: May well be simplifications here, this is v close to pre-refactor logic
    # Have not gone through or understood all the parameterization  stuff
    model_add = Model(out_params_like, add[kinds.likelihood], info_prior=add.get(_prior),
                      info_theory=info_theory_out, packages_path=info.get(_packages_path),
                      allow_renames=False, post=True,
                      prior_parameterization=dummy_model_out.parameterization)
    # Remove auxiliary "one" before dumping -- 'add' *is* info_out[_post][_post_add]
    add[kinds.likelihood].pop("one")
    collection_out = Collection(dummy_model_out, output_out, name="1")
    output_out.check_and_dump_info(None, info_out, check_compatible=False)
    # Prepare recomputation of aggregated chi2
    # (they need to be recomputed by hand, because its autocomputation won't pick up
    #  old likelihoods for a given type)
    all_types = {
        like: str_to_list(add[kinds.likelihood].get(
            like, info_in[kinds.likelihood].get(like)).get("type", []) or [])
        for like in out[kinds.likelihood]}
    types = set(chain(*list(all_types.values())))
    inv_types = {t: [like for like, like_types in all_types.items() if t in like_types]
                 for t in types}
    # 4. Main loop!
    log.info("Running post-processing...")
    last_percent = 0
    for i, point in collection_in.data.iterrows():
        log.debug("Point: %r", point)
        sampled = [point[param] for param in
                   dummy_model_in.parameterization.sampled_params()]
        derived = {param: point.get(param, None)
                   for param in dummy_model_out.parameterization.derived_params()}
        inputs = {param: point.get(
            param, dummy_model_in.parameterization.constant_params().get(
                param, dummy_model_out.parameterization.constant_params().get(
                    param, None)))
            for param in dummy_model_out.parameterization.input_params()}
        # Solve inputs that depend on a function and were not saved
        # (we don't use the Parameterization_to_input method in case there are references
        #  to functions that cannot be loaded at the moment)
        for p, value in inputs.items():
            if value is None:
                func = dummy_model_out.parameterization._input_funcs[p]
                args = dummy_model_out.parameterization._input_args[p]
                inputs[p] = func(*[point.get(arg) for arg in args])
        # Add/remove priors
        priors_add = model_add.prior.logps(sampled)
        if not prior_recompute_1d:
            priors_add = priors_add[1:]
        logpriors_add = dict(zip(mlprior_names_add, priors_add))
        logpriors_new = [logpriors_add.get(name, - point.get(name, 0))
                         for name in collection_out.minuslogprior_names]
        if log.getEffectiveLevel() <= logging.DEBUG:
            log.debug(
                "New set of priors: %r", dict(zip(dummy_model_out.prior, logpriors_new)))
        if -np.inf in logpriors_new:
            continue
        # Add/remove likelihoods
        output_like = []
        if add[kinds.likelihood]:
            # Notice "one" (last in likelihood_add) is ignored: not in chi2_names
            loglikes_add, output_like = model_add.logps(inputs, return_derived=True)
            loglikes_add = dict(zip(chi2_names_add, loglikes_add))
            output_like = dict(zip(model_add.output_params, output_like))
        else:
            loglikes_add = dict()
        loglikes_new = [loglikes_add.get(name, -0.5 * point.get(name, 0))
                        for name in collection_out.chi2_names]
        if log.getEffectiveLevel() <= logging.DEBUG:
            log.debug(
                "New set of likelihoods: %r",
                dict(zip(dummy_model_out.likelihood, loglikes_new)))
            if output_like:
                log.debug("New set of likelihood-derived parameters: %r", output_like)
        if -np.inf in loglikes_new:
            continue
        # Add/remove derived parameters and change priors of sampled parameters
        for p in add[_params]:
            if p in dummy_model_out.parameterization._directly_output:
                derived[p] = output_like[p]
            elif p in dummy_model_out.parameterization._derived_funcs:
                func = dummy_model_out.parameterization._derived_funcs[p]
                args = dummy_model_out.parameterization._derived_args[p]
                derived[p] = func(
                    *[point.get(arg, output_like.get(arg, None)) for arg in args])
        # We need to recompute the aggregated chi2 by hand
        for type_, likes in inv_types.items():
            derived[_get_chi2_name(type_)] = sum(
                [-2 * lvalue for lname, lvalue
                 in zip(collection_out.chi2_names, loglikes_new)
                 if _undo_chi2_name(lname) in likes])
        if log.getEffectiveLevel() <= logging.DEBUG:
            log.debug("New derived parameters: %r",
                      dict([(p, derived[p])
                            for p in dummy_model_out.parameterization.derived_params()
                            if p in add[_params]]))
        # Save to the collection (keep old weight for now)
        collection_out.add(
            sampled, derived=derived.values(), weight=point.get(_weight),
            logpriors=logpriors_new, loglikes=loglikes_new)
        # Display progress
        percent = np.round(i / len(collection_in) * 100)
        if percent != last_percent and not percent % 5:
            last_percent = percent
            progress_bar(log, percent, " (%d/%d)" % (i, len(collection_in)))
    if not collection_out.data.last_valid_index():
        raise LoggedError(
            log, "No elements in the final sample. Possible causes: "
                 "added a prior or likelihood valued zero over the full sampled domain, "
                 "or the computation of the theory failed everywhere, etc.")
    # Reweight -- account for large dynamic range!
    #   Prefer to rescale +inf to finite, and ignore final points with -inf.
    #   Remove -inf's (0-weight), and correct indices
    difflogmax = max(collection_in[_minuslogpost] - collection_out[_minuslogpost])
    collection_out.data[_weight] *= np.exp(
        collection_in[_minuslogpost] - collection_out[_minuslogpost] - difflogmax)
    collection_out.data = (
        collection_out.data[collection_out.data.weight > 0].reset_index(drop=True))
    collection_out._n = collection_out.data.last_valid_index() + 1
    # Write!
    collection_out.out_update()
    log.info("Finished! Final number of samples: %d", len(collection_out))
    return info_out, {"sample": collection_out}
Esempio n. 19
0
def run_script(args=None):
    warn_deprecation()
    import argparse
    parser = argparse.ArgumentParser(prog="cobaya run",
                                     description="Cobaya's run script.")
    parser.add_argument("input_file",
                        action="store",
                        metavar="input_file.yaml",
                        help="An input file to run.")
    parser.add_argument("-" + packages_path_arg[0],
                        "--" + packages_path_arg_posix,
                        action="store",
                        metavar="/packages/path",
                        default=None,
                        help="Path where external packages were installed.")
    # MARKED FOR DEPRECATION IN v3.0
    modules = "modules"
    parser.add_argument("-" + modules[0],
                        "--" + modules,
                        action="store",
                        required=False,
                        metavar="/packages/path",
                        default=None,
                        help="To be deprecated! "
                        "Alias for %s, which should be used instead." %
                        packages_path_arg_posix)
    # END OF DEPRECATION BLOCK -- CONTINUES BELOW!
    parser.add_argument("-" + "o",
                        "--" + "output",
                        action="store",
                        metavar="/some/path",
                        default=None,
                        help="Path and prefix for the text output.")
    parser.add_argument("-" + "d",
                        "--" + "debug",
                        action="store_true",
                        help="Produce verbose debug output.")
    continuation = parser.add_mutually_exclusive_group(required=False)
    continuation.add_argument(
        "-" + "r",
        "--" + "resume",
        action="store_true",
        help="Resume an existing chain if it has similar info "
        "(fails otherwise).")
    continuation.add_argument("-" + "f",
                              "--" + "force",
                              action="store_true",
                              help="Overwrites previous output, if it exists "
                              "(use with care!)")
    parser.add_argument("--%s" % "test",
                        action="store_true",
                        help="Initialize model and sampler, and exit.")
    parser.add_argument("--version", action="version", version=get_version())
    parser.add_argument("--no-mpi",
                        action='store_true',
                        help="disable MPI when mpi4py installed but MPI does "
                        "not actually work")
    arguments = parser.parse_args(args)

    # MARKED FOR DEPRECATION IN v3.0
    if arguments.modules is not None:
        logger_setup()
        logger = get_logger("run")
        logger.warning(
            "*DEPRECATION*: -m/--modules will be deprecated in favor of "
            "-%s/--%s in the next version. Please, use that one instead.",
            packages_path_arg[0], packages_path_arg_posix)
        # BEHAVIOUR TO BE REPLACED BY ERROR:
        if getattr(arguments, packages_path_arg) is None:
            setattr(arguments, packages_path_arg, arguments.modules)
    del arguments.modules
    # END OF DEPRECATION BLOCK
    info = arguments.input_file
    del arguments.input_file
    run(info, **arguments.__dict__)
Esempio n. 20
0
def install_script():
    from cobaya.mpi import am_single_or_primary_process
    if not am_single_or_primary_process():
        # Configure the logger ASAP
        logger_setup()
        log = logging.getLogger(__name__.split(".")[-1])
        # Parse arguments
        import argparse
        parser = argparse.ArgumentParser(
            description="Cobaya's installation tool for external modules.")
        parser.add_argument(
            "files",
            action="store",
            nargs="+",
            metavar="input_file.yaml",
            help="One or more input files "
            "(or 'cosmo' for a basic collection of cosmological modules)")
        parser.add_argument(
            "-" + _modules_path_arg[0],
            "--" + _modules_path_arg,
            action="store",
            nargs=1,
            required=True,
            metavar="/install/path",
            help="Desired path where to install external modules.")
        parser.add_argument(
            "-f",
            "--force",
            action="store_true",
            default=False,
            help="Force re-installation of apparently installed modules.")
        parser.add_argument(
            "--no-progress-bars",
            action="store_true",
            default=False,
            help="No progress bars shown. Shorter logs (used in Travis).")
        group_just = parser.add_mutually_exclusive_group(required=False)
        group_just.add_argument("-C",
                                "--just-code",
                                action="store_false",
                                default=True,
                                help="Install code of the modules.",
                                dest=_data)
        group_just.add_argument("-D",
                                "--just-data",
                                action="store_false",
                                default=True,
                                help="Install data of the modules.",
                                dest=_code)
        arguments = parser.parse_args()
        if arguments.files == ["cosmo"]:
            log.info(
                "Installing cosmological modules (input files will be ignored")
            from cobaya.cosmo_input import install_basic
            infos = [install_basic]
        else:
            from cobaya.input import load_input
            infos = [load_input(f) for f in arguments.files]
        # Launch installer
        install(*infos,
                path=getattr(arguments, _modules_path_arg)[0],
                **{
                    arg: getattr(arguments, arg)
                    for arg in ["force", _code, _data, "no_progress_bars"]
                })
Esempio n. 21
0
def run(info):
    # This function reproduces the model-->output-->sampler pipeline one would follow
    # when instantiating by hand, but alters the order to performs checks and dump info
    # as early as possible, e.g. to check if resuming possible or `force` needed.
    assert isinstance(info, Mapping), (
        "The first argument must be a dictionary with the info needed for the run. "
        "If you were trying to pass the name of an input file instead, "
        "load it first with 'cobaya.input.load_input', "
        "or, if you were passing a yaml string, load it with 'cobaya.yaml.yaml_load'."
    )
    logger_setup(info.get(_debug), info.get(_debug_file))
    logger_run = logging.getLogger(__name__.split(".")[-1])
    # MARKED FOR DEPRECATION IN v3.0
    # BEHAVIOUR TO BE REPLACED BY ERROR:
    check_deprecated_modules_path(info)
    # END OF DEPRECATION BLOCK
    # 1. Prepare output driver, if requested by defining an output_prefix
    output = get_output(output_prefix=info.get(_output_prefix),
                        resume=info.get(_resume),
                        force=info.get(_force))
    # 2. Update the input info with the defaults for each component
    updated_info = update_info(info)
    if logging.root.getEffectiveLevel() <= logging.DEBUG:
        # Dump only if not doing output (otherwise, the user can check the .updated file)
        if not output and is_main_process():
            logger_run.info(
                "Input info updated with defaults (dumped to YAML):\n%s",
                yaml_dump(sort_cosmetic(updated_info)))
    # 3. If output requested, check compatibility if existing one, and dump.
    # 3.1 First: model only
    output.check_and_dump_info(info,
                               updated_info,
                               cache_old=True,
                               ignore_blocks=[kinds.sampler])
    # 3.2 Then sampler -- 1st get the last sampler mentioned in the updated.yaml
    # TODO: ideally, using Minimizer would *append* to the sampler block.
    #       Some code already in place, but not possible at the moment.
    try:
        last_sampler = list(updated_info[kinds.sampler])[-1]
        last_sampler_info = {
            last_sampler: updated_info[kinds.sampler][last_sampler]
        }
    except (KeyError, TypeError):
        raise LoggedError(logger_run, "No sampler requested.")
    sampler_name, sampler_class = get_sampler_name_and_class(last_sampler_info)
    check_sampler_info((output.reload_updated_info(use_cache=True)
                        or {}).get(kinds.sampler),
                       updated_info[kinds.sampler],
                       is_resuming=output.is_resuming())
    # Dump again, now including sampler info
    output.check_and_dump_info(info, updated_info, check_compatible=False)
    # Check if resumable run
    sampler_class.check_force_resume(
        output, info=updated_info[kinds.sampler][sampler_name])
    # 4. Initialize the posterior and the sampler
    with Model(updated_info[_params], updated_info[kinds.likelihood],
               updated_info.get(_prior), updated_info.get(kinds.theory),
               packages_path=info.get(_packages_path), timing=updated_info.get(_timing),
               allow_renames=False, stop_at_error=info.get("stop_at_error", False)) \
            as model:
        # Re-dump the updated info, now containing parameter routes and version info
        updated_info = recursive_update(updated_info, model.info())
        output.check_and_dump_info(None, updated_info, check_compatible=False)
        sampler = sampler_class(updated_info[kinds.sampler][sampler_name],
                                model,
                                output,
                                packages_path=info.get(_packages_path))
        # Re-dump updated info, now also containing updates from the sampler
        updated_info[kinds.sampler][sampler.get_name()] = \
            recursive_update(
                updated_info[kinds.sampler][sampler.get_name()], sampler.info())
        # TODO -- maybe also re-dump model info, now possibly with measured speeds
        # (waiting until the camb.transfers issue is solved)
        output.check_and_dump_info(None, updated_info, check_compatible=False)
        if info.get(_test_run, False):
            logger_run.info(
                "Test initialization successful! "
                "You can probably run now without `--%s`.", _test_run)
            return updated_info, sampler
        # Run the sampler
        sampler.run()
    return updated_info, sampler
Esempio n. 22
0
def doc_script(args=None):
    """Command line script for the documentation."""
    warn_deprecation()
    logger_setup()
    logger = get_logger("doc")
    # Parse arguments
    import argparse
    parser = argparse.ArgumentParser(
        prog="cobaya doc",
        description="Prints defaults for Cobaya's components.")
    parser.add_argument(
        "component",
        action="store",
        nargs="?",
        default="",
        metavar="component_name",
        help=("The component whose defaults are requested. "
              "Pass a component kind (sampler, theory, likelihood) to "
              "list all available (internal) ones, pass nothing to list "
              "all available (internal) components of all kinds."))
    parser.add_argument("-p",
                        "--python",
                        action="store_true",
                        default=False,
                        help="Request Python instead of YAML.")
    expand_flag, expand_flag_ishort = "expand", 1
    parser.add_argument("-" + expand_flag[expand_flag_ishort],
                        "--" + expand_flag,
                        action="store_true",
                        default=False,
                        help="Expand YAML defaults.")
    arguments = parser.parse_args(args)
    # Nothing passed: list all
    if not arguments.component:
        msg = "Available components: (some may need external code/data)"
        print(msg + "\n" + "-" * len(msg))
        for kind in kinds:
            print("%s:" % kind)
            print(_indent +
                  ("\n" +
                   _indent).join(get_available_internal_class_names(kind)))
        return
    # A kind passed (plural or singular): list all of that kind
    if arguments.component.lower() in subfolders.values():
        arguments.component = next(k for k in subfolders
                                   if arguments.component == subfolders[k])
    if arguments.component.lower() in kinds:
        print("%s:" % arguments.component.lower())
        print(_indent + ("\n" + _indent).join(
            get_available_internal_class_names(arguments.component.lower())))
        return
    # Otherwise, try to identify the component
    try:
        cls = get_component_class(arguments.component, logger=logger)
    except ComponentNotFoundError:
        suggestions = similar_internal_class_names(arguments.component)
        logger.error(
            f"Could not identify component '{arguments.component}'. "
            f"Did you mean any of the following? {suggestions} (mind capitalization!)"
        )
        return 1
    to_print = get_default_info(cls,
                                return_yaml=not arguments.python,
                                yaml_expand_defaults=arguments.expand)
    if arguments.python:
        print(pformat({cls.get_kind(): {arguments.component: to_print}}))
    else:
        print(cls.get_kind() + ":\n" + _indent + arguments.component + ":\n" +
              2 * _indent + ("\n" + 2 * _indent).join(to_print.split("\n")))
        if "!defaults" in to_print:
            print("# This file contains defaults. "
                  "To populate them, use the flag --%s (or -%s)." %
                  (expand_flag, expand_flag[expand_flag_ishort]))
Esempio n. 23
0
config = __import__('_config')


def process_modules_path(modules):
    if not modules:
        if os.path.exists(os.path.join(os.getcwd(), '..', 'modules')):
            modules = os.path.join('..', 'modules')
    assert modules, "I need a modules folder!"
    return modules if os.path.isabs(modules) else os.path.join(
        os.getcwd(), modules)


if __name__ == "__main__":
    from cobaya.log import logger_setup

    logger_setup()
    from cobaya.conventions import _likelihood, _theory, _sampler
    import sys

    info_install = {
        _sampler: {
            "polychord": None
        },
        _theory: {
            "camb": None,
            "classy": None
        },
        _likelihood: {
            "planck_2015_lowl": None,
            "planck_2015_plikHM_TT": None,
            "planck_2015_lowTEB": None,
Esempio n. 24
0
def install(*infos, **kwargs):
    debug = kwargs.get(_debug)
    if not log.root.handlers:
        logger_setup()
    path = kwargs.get("path")
    if not path:
        path = resolve_packages_path(infos)
    if not path:
        raise LoggedError(
            log, "No 'path' argument given, and none could be found in input infos "
                 "(as %r), the %r env variable or the config file. "
                 "Maybe specify one via a command line argument '-%s [...]'?",
            _packages_path, _packages_path_env, _packages_path_arg[0])
    abspath = os.path.abspath(path)
    log.info("Installing external packages at '%s'", abspath)
    kwargs_install = {"force": kwargs.get("force", False),
                      "no_progress_bars": kwargs.get("no_progress_bars")}
    for what in (_code, _data):
        kwargs_install[what] = kwargs.get(what, True)
        spath = os.path.join(abspath, what)
        if kwargs_install[what] and not os.path.exists(spath):
            try:
                os.makedirs(spath)
            except OSError:
                raise LoggedError(
                    log, "Could not create the desired installation folder '%s'", spath)
    failed_components = []
    skip_keywords_arg = set(kwargs.get("skip", []) or [])
    # NB: if passed with quotes as `--skip "a b"`, it's interpreted as a single key
    skip_keywords_arg = set(chain(*[word.split() for word in skip_keywords_arg]))
    skip_keywords_env = set(
        os.environ.get(_install_skip_env, "").replace(",", " ").lower().split())
    skip_keywords = skip_keywords_arg.union(skip_keywords_env)
    for kind, components in get_used_components(*infos).items():
        for component in components:
            print()
            print(create_banner(kind + ":" + component,
                                symbol=_banner_symbol, length=_banner_length), end="")
            print()
            if _skip_helper(component.lower(), skip_keywords, skip_keywords_env, log):
                continue
            info = (next(info for info in infos if component in
                         info.get(kind, {}))[kind][component]) or {}
            if isinstance(info, str) or _external in info:
                log.warning("Component '%s' is a custom function. "
                            "Nothing to do.", component)
                continue
            try:
                imported_class = get_class(component, kind,
                                           component_path=info.pop(_component_path, None))
            except ImportError as excpt:
                log.error("Component '%s' not recognized. [%s].", component, excpt)
                failed_components += ["%s:%s" % (kind, component)]
                continue
            else:
                if _skip_helper(imported_class.__name__.lower(), skip_keywords,
                                skip_keywords_env, log):
                    continue
            is_compatible = getattr(imported_class, "is_compatible", lambda: True)()
            if not is_compatible:
                log.info(
                    "Skipping %r because it is not compatible with your OS.", component)
                continue
            log.info("Checking if dependencies have already been installed...")
            is_installed = getattr(imported_class, "is_installed", None)
            if is_installed is None:
                log.info("%s.%s is a fully built-in component: nothing to do.",
                         kind, imported_class.__name__)
                continue
            install_path = abspath
            get_path = getattr(imported_class, "get_path", None)
            if get_path:
                install_path = get_path(install_path)
            has_been_installed = False
            if not debug:
                logging.disable(logging.ERROR)
            if kwargs.get("skip_global"):
                has_been_installed = is_installed(path="global", **kwargs_install)
            if not has_been_installed:
                has_been_installed = is_installed(path=install_path, **kwargs_install)
            if not debug:
                logging.disable(logging.NOTSET)
            if has_been_installed:
                log.info("External dependencies for this component already installed.")
                if kwargs.get(_test_run, False):
                    continue
                if kwargs_install["force"] and not kwargs.get("skip_global"):
                    log.info("Forcing re-installation, as requested.")
                else:
                    log.info("Doing nothing.")
                    continue
            else:
                log.info("Installation check failed!")
                if not debug:
                    log.info(
                        "(If you expected this to be already installed, re-run "
                        "`cobaya-install` with --debug to get more verbose output.)")
                if kwargs.get(_test_run, False):
                    continue
                log.info("Installing...")
            try:
                install_this = getattr(imported_class, "install", None)
                success = install_this(path=abspath, **kwargs_install)
            except KeyboardInterrupt:
                raise
            except:
                traceback.print_exception(*sys.exc_info(), file=sys.stdout)
                log.error("An unknown error occurred. Delete the external packages "
                          "folder %r and try again. "
                          "Please, notify the developers if this error persists.",
                          abspath)
                success = False
            if success:
                log.info("Successfully installed! Let's check it...")
            else:
                log.error("Installation failed! Look at the error messages above. "
                          "Solve them and try again, or, if you are unable to solve, "
                          "install the packages required by this component manually.")
                failed_components += ["%s:%s" % (kind, component)]
                continue
            # test installation
            if not debug:
                logging.disable(logging.ERROR)
            successfully_installed = is_installed(path=install_path, **kwargs_install)
            if not debug:
                logging.disable(logging.NOTSET)
            if not successfully_installed:
                log.error("Installation apparently worked, "
                          "but the subsequent installation test failed! "
                          "Look at the error messages above, or re-run with --debug "
                          "for more more verbose output. "
                          "Try to solve the issues and try again, or, if you are unable "
                          "to solve them, install the packages required by this "
                          "component manually.")
                failed_components += ["%s:%s" % (kind, component)]
            else:
                log.info("Installation check successful.")
    print()
    print(create_banner(" * Summary * ",
                        symbol=_banner_symbol, length=_banner_length), end="")
    print()
    if failed_components:
        bullet = "\n - "
        raise LoggedError(
            log, "The installation (or installation test) of some component(s) has "
                 "failed: %s\nCheck output of the installer of each component above "
                 "for precise error info.\n",
            bullet + bullet.join(failed_components))
    log.info("All requested components' dependencies correctly installed.")
    # Set the installation path in the global config file
    if not kwargs.get("no_set_global", False) and not kwargs.get(_test_run, False):
        write_packages_path_in_config_file(abspath)
        log.info("The installation path has been written into the global config file: %s",
                 os.path.join(get_config_path(), _packages_path_config_file))
Esempio n. 25
0
def install(*infos, **kwargs):
    if not log.root.handlers:
        logger_setup()
    path = kwargs.get("path", ".")
    if not path:
        # See if we can get one (and only one) from infos
        paths = set(
            [p for p in [info.get(_path_install) for info in infos] if p])
        if len(paths) == 1:
            path = paths[0]
        else:
            print("logging?")
            log.error(
                "No 'path' argument given and could not extract one (and only one) "
                "from the infos.")
            raise HandledException
    abspath = os.path.abspath(path)
    log.info("Installing modules at '%s'\n", abspath)
    kwargs_install = {
        "force": kwargs.get("force", False),
        "no_progress_bars": kwargs.get("no_progress_bars")
    }
    for what in (_code, _data):
        kwargs_install[what] = kwargs.get(what, True)
        spath = os.path.join(abspath, what)
        if kwargs_install[what] and not os.path.exists(spath):
            try:
                os.makedirs(spath)
            except OSError:
                log.error(
                    "Could not create the desired installation folder '%s'",
                    spath)
                raise HandledException
    failed_modules = []
    for kind, modules in get_modules(*infos).items():
        for module in modules:
            print(make_header(kind, module))
            module_folder = get_folder(module, kind, sep=".", absolute=False)
            try:
                imported_module = import_module(module_folder,
                                                package=_package)
            except ImportError:
                if kind == _likelihood:
                    info = (next(info for info in infos if module in info.get(
                        _likelihood, {}))[_likelihood][module]) or {}
                    if isinstance(info, string_types) or _external in info:
                        log.warning(
                            "Module '%s' is a custom likelihood. "
                            "Nothing to do.\n", module)
                        flag = False
                    else:
                        log.error("Module '%s' not recognized.\n" % module)
                        failed_modules += ["%s:%s" % (kind, module)]
                continue
            is_installed = getattr(imported_module, "is_installed", None)
            if is_installed is None:
                log.info("Built-in module: nothing to do.\n")
                continue
            if is_installed(path=abspath, **kwargs_install):
                log.info("External module already installed.\n")
                if kwargs_install["force"]:
                    log.info("Forcing re-installation, as requested.")
                else:
                    log.info("Doing nothing.\n")
                    continue
            try:
                success = imported_module.install(path=abspath,
                                                  **kwargs_install)
            except:
                traceback.print_exception(*sys.exc_info(), file=sys.stdout)
                log.error(
                    "An unknown error occurred. Delete the modules folder and try "
                    "again. Notify the developers if this error persists.")
                success = False
            if success:
                log.info("Successfully installed!\n")
            else:
                log.error(
                    "Installation failed! Look at the error messages above. "
                    "Solve them and try again, or, if you are unable to solve, "
                    "install this module manually.")
                failed_modules += ["%s:%s" % (kind, module)]
                continue
            # test installation
            if not is_installed(path=abspath, **kwargs_install):
                log.error(
                    "Installation apparently worked, "
                    "but the subsequent installation test failed! "
                    "Look at the error messages above. "
                    "Solve them and try again, or, if you are unable to solve, "
                    "install this module manually.")
                failed_modules += ["%s:%s" % (kind, module)]
    if failed_modules:
        bullet = "\n - "
        log.error(
            "The installation (or installation test) of some module(s) has failed: "
            "%s\nCheck output of the installer of each module above "
            "for precise error info.\n", bullet + bullet.join(failed_modules))
        raise HandledException
Esempio n. 26
0
def makeGrid(batchPath,
             settingName=None,
             settings=None,
             read_only=False,
             interactive=False,
             install_reqs_at=None,
             install_reqs_force=None):
    print("Generating grid...")
    batchPath = os.path.abspath(batchPath) + os.sep
    if not settings:
        if not settingName:
            raise NotImplementedError(
                "Re-using previous batch is work in progress...")
        #            if not pathIsGrid(batchPath):
        #                raise Exception('Need to give name of setting file if batchPath/config '
        #                                'does not exist')
        #            read_only = True
        #            sys.path.insert(0, batchPath + 'config')
        #            settings = __import__(IniFile(batchPath + 'config/config.ini').params['setting_file'].replace('.py', ''))
        elif os.path.splitext(settingName)[-1].lower() in _yaml_extensions:
            settings = yaml_load_file(settingName)
        else:
            raise NotImplementedError(
                "Using a python script is work in progress...")
            # In this case, info-as-dict would be passed
            # settings = __import__(settingName, fromlist=['dummy'])
    batch = batchjob.BatchJob(batchPath)
    # batch.skip = settings.get("skip", False)
    batch.makeItems(settings, messages=not read_only)
    if read_only:
        for jobItem in [b for b in batch.jobItems]:
            if not jobItem.chainExists():
                batch.jobItems.remove(jobItem)
        batch.save()
        print('OK, configured grid with %u existing chains' %
              (len(batch.jobItems)))
        return batch
    else:
        batch.makeDirectories(setting_file=None)
        batch.save()
    infos = {}
    components_used = {}
    # Default info
    defaults = copy.deepcopy(settings)
    grid_definition = defaults.pop("grid")
    models_definitions = grid_definition["models"]
    datasets_definitions = grid_definition["datasets"]
    for jobItem in batch.items(wantSubItems=False):
        # Model info
        jobItem.makeChainPath()
        try:
            model_info = copy.deepcopy(models_definitions[jobItem.param_set]
                                       or {})
        except KeyError:
            raise ValueError("Model '%s' must be defined." % jobItem.param_set)
        model_info = merge_info(defaults, model_info)
        # Dataset info
        try:
            dataset_info = copy.deepcopy(
                datasets_definitions[jobItem.data_set.tag])
        except KeyError:
            raise ValueError("Data set '%s' must be defined." %
                             jobItem.data_set.tag)
        # Combined info
        combined_info = merge_info(defaults, model_info, dataset_info)
        if "preset" in combined_info:
            preset = combined_info.pop("preset")
            combined_info = merge_info(create_input(**preset), combined_info)
        combined_info[_output_prefix] = jobItem.chainRoot
        # Requisites
        components_used = get_used_components(components_used, combined_info)
        if install_reqs_at:
            combined_info[_packages_path] = os.path.abspath(install_reqs_at)
        # Save the info (we will write it after installation:
        # we need to install to add auto covmats
        if jobItem.param_set not in infos:
            infos[jobItem.param_set] = {}
        infos[jobItem.param_set][jobItem.data_set.tag] = combined_info
    # Installing requisites
    if install_reqs_at:
        print("Installing required code and data for the grid.")
        from cobaya.log import logger_setup
        logger_setup()
        install_reqs(components_used,
                     path=install_reqs_at,
                     force=install_reqs_force)
    print("Adding covmats (if necessary) and writing input files")
    for jobItem in batch.items(wantSubItems=False):
        info = infos[jobItem.param_set][jobItem.data_set.tag]
        # Covariance matrices
        # We try to find them now, instead of at run time, to check if correctly selected
        try:
            sampler = list(info[kinds.sampler])[0]
        except KeyError:
            raise ValueError("No sampler has been chosen")
        if sampler == "mcmc" and info[kinds.sampler][sampler].get(
                "covmat", "auto"):
            packages_path = install_reqs_at or info.get(_packages_path, None)
            if not packages_path:
                raise ValueError(
                    "Cannot assign automatic covariance matrices because no "
                    "external packages path has been defined.")
            # Need updated info for covmats: includes renames
            updated_info = update_info(info)
            # Ideally, we use slow+sampled parameters to look for the covariance matrix
            # but since for that we'd need to initialise a model, we approximate that set
            # as theory+sampled
            from itertools import chain
            like_params = set(
                chain(*[
                    list(like[_params])
                    for like in updated_info[kinds.likelihood].values()
                ]))
            params_info = {
                p: v
                for p, v in updated_info[_params].items()
                if is_sampled_param(v) and p not in like_params
            }
            best_covmat = _get_best_covmat(os.path.abspath(packages_path),
                                           params_info,
                                           updated_info[kinds.likelihood])
            info[kinds.sampler][sampler]["covmat"] = os.path.join(
                best_covmat["folder"], best_covmat["name"])
        # Write the info for this job
        # Allow overwrite since often will want to regenerate grid with tweaks
        yaml_dump_file(jobItem.iniFile(),
                       sort_cosmetic(info),
                       error_if_exists=False)

        # Non-translated old code
        # if not start_at_bestfit:
        #     setMinimize(jobItem, ini)
        #     variant = '_minimize'
        #     ini.saveFile(jobItem.iniFile(variant))
        ## NOT IMPLEMENTED: start at best fit
        ##        ini.params['start_at_bestfit'] = start_at_bestfit
        # ---
        # for deffile in settings.defaults:
        #    ini.defaults.append(batch.commonPath + deffile)
        # if hasattr(settings, 'override_defaults'):
        #    ini.defaults = [batch.commonPath + deffile for deffile in settings.override_defaults] + ini.defaults
        # ---
        # # add ini files for importance sampling runs
        # for imp in jobItem.importanceJobs():
        #     if getattr(imp, 'importanceFilter', None): continue
        #     if batch.hasName(imp.name.replace('_post', '')):
        #         raise Exception('importance sampling something you already have?')
        #     for minimize in (False, True):
        #         if minimize and not getattr(imp, 'want_minimize', True): continue
        #         ini = IniFile()
        #         updateIniParams(ini, imp.importanceSettings, batch.commonPath)
        #         if cosmomcAction == 0 and not minimize:
        #             for deffile in settings.importanceDefaults:
        #                 ini.defaults.append(batch.commonPath + deffile)
        #             ini.params['redo_outroot'] = imp.chainRoot
        #             ini.params['action'] = 1
        #         else:
        #             ini.params['file_root'] = imp.chainRoot
        #         if minimize:
        #             setMinimize(jobItem, ini)
        #             variant = '_minimize'
        #         else:
        #             variant = ''
        #         ini.defaults.append(jobItem.iniFile())
        #         ini.saveFile(imp.iniFile(variant))
        #         if cosmomcAction != 0: break

    if not interactive:
        return batch
    print('Done... to run do: cobaya-grid-run %s' % batchPath)
Esempio n. 27
0
def run(
    info_or_yaml_or_file: Union[InputDict, str, os.PathLike],
    packages_path: Optional[str] = None,
    output: Union[str, LiteralFalse, None] = None,
    debug: Union[bool, int, None] = None,
    stop_at_error: Optional[bool] = None,
    resume: bool = False,
    force: bool = False,
    no_mpi: bool = False,
    test: bool = False,
    override: Optional[InputDict] = None,
) -> Union[InfoSamplerTuple, PostTuple]:
    """
    Run from an input dictionary, file name or yaml string, with optional arguments
    to override settings in the input as needed.

    :param info_or_yaml_or_file: input options dictionary, yaml file, or yaml text
    :param packages_path: path where external packages were installed
    :param output: path name prefix for output files, or False for no file output
    :param debug: true for verbose debug output, or a specific logging level
    :param stop_at_error: stop if an error is raised
    :param resume: continue an existing run
    :param force: overwrite existing output if it exists
    :param no_mpi: run without MPI
    :param test: only test initialization rather than actually running
    :param override: option dictionary to merge into the input one, overriding settings
       (but with lower precedence than the explicit keyword arguments)
    :return: (updated_info, sampler) tuple of options dictionary and Sampler instance,
              or (updated_info, results) if using "post" post-processing
    """

    # This function reproduces the model-->output-->sampler pipeline one would follow
    # when instantiating by hand, but alters the order to performs checks and dump info
    # as early as possible, e.g. to check if resuming possible or `force` needed.
    if no_mpi or test:
        mpi.set_mpi_disabled()

    with mpi.ProcessState("run"):
        info: InputDict = load_info_overrides(info_or_yaml_or_file, debug,
                                              stop_at_error, packages_path,
                                              override)

        if test:
            info["test"] = True
        # If any of resume|force given as cmd args, ignore those in the input file
        if resume or force:
            if resume and force:
                raise ValueError("'rename' and 'force' are exclusive options")
            info["resume"] = bool(resume)
            info["force"] = bool(force)
        if info.get("post"):
            if isinstance(output, str) or output is False:
                info["post"]["output"] = output or None
            return post(info)

        if isinstance(output, str) or output is False:
            info["output"] = output or None
        logger_setup(info.get("debug"), info.get("debug_file"))
        logger_run = get_logger(run.__name__)
        # MARKED FOR DEPRECATION IN v3.0
        # BEHAVIOUR TO BE REPLACED BY ERROR:
        check_deprecated_modules_path(info)
        # END OF DEPRECATION BLOCK
        # 1. Prepare output driver, if requested by defining an output_prefix
        # GetDist needs to know the original sampler, so don't overwrite if minimizer
        try:
            which_sampler = list(info["sampler"])[0]
        except (KeyError, TypeError):
            raise LoggedError(
                logger_run,
                "You need to specify a sampler using the 'sampler' key "
                "as e.g. `sampler: {mcmc: None}.`")
        infix = "minimize" if which_sampler == "minimize" else None
        with get_output(prefix=info.get("output"),
                        resume=info.get("resume"),
                        force=info.get("force"),
                        infix=infix) as out:
            # 2. Update the input info with the defaults for each component
            updated_info = update_info(info)
            if is_debug(logger_run):
                # Dump only if not doing output
                # (otherwise, the user can check the .updated file)
                if not out and mpi.is_main_process():
                    logger_run.info(
                        "Input info updated with defaults (dumped to YAML):\n%s",
                        yaml_dump(sort_cosmetic(updated_info)))
            # 3. If output requested, check compatibility if existing one, and dump.
            # 3.1 First: model only
            out.check_and_dump_info(info,
                                    updated_info,
                                    cache_old=True,
                                    ignore_blocks=["sampler"])
            # 3.2 Then sampler -- 1st get the last sampler mentioned in the updated.yaml
            # TODO: ideally, using Minimizer would *append* to the sampler block.
            #       Some code already in place, but not possible at the moment.
            try:
                last_sampler = list(updated_info["sampler"])[-1]
                last_sampler_info = {
                    last_sampler: updated_info["sampler"][last_sampler]
                }
            except (KeyError, TypeError):
                raise LoggedError(logger_run, "No sampler requested.")
            sampler_name, sampler_class = get_sampler_name_and_class(
                last_sampler_info)
            check_sampler_info((out.reload_updated_info(use_cache=True)
                                or {}).get("sampler"),
                               updated_info["sampler"],
                               is_resuming=out.is_resuming())
            # Dump again, now including sampler info
            out.check_and_dump_info(info, updated_info, check_compatible=False)
            # Check if resumable run
            sampler_class.check_force_resume(
                out, info=updated_info["sampler"][sampler_name])
            # 4. Initialize the posterior and the sampler
            with Model(updated_info["params"],
                       updated_info["likelihood"],
                       updated_info.get("prior"),
                       updated_info.get("theory"),
                       packages_path=info.get("packages_path"),
                       timing=updated_info.get("timing"),
                       allow_renames=False,
                       stop_at_error=info.get("stop_at_error",
                                              False)) as model:
                # Re-dump the updated info, now containing parameter routes and version
                updated_info = recursive_update(updated_info, model.info())
                out.check_and_dump_info(None,
                                        updated_info,
                                        check_compatible=False)
                sampler = sampler_class(
                    updated_info["sampler"][sampler_name],
                    model,
                    out,
                    name=sampler_name,
                    packages_path=info.get("packages_path"))
                # Re-dump updated info, now also containing updates from the sampler
                updated_info["sampler"][sampler_name] = \
                    recursive_update(updated_info["sampler"][sampler_name],
                                     sampler.info())
                out.check_and_dump_info(None,
                                        updated_info,
                                        check_compatible=False)
                mpi.sync_processes()
                if info.get("test", False):
                    logger_run.info(
                        "Test initialization successful! "
                        "You can probably run now without `--%s`.", "test")
                    return InfoSamplerTuple(updated_info, sampler)
                # Run the sampler
                sampler.run()

    return InfoSamplerTuple(updated_info, sampler)
Esempio n. 28
0
def run(info):
    assert hasattr(info, "keys"), (
        "The first argument must be a dictionary with the info needed for the run. "
        "If you were trying to pass the name of an input file instead, "
        "load it first with 'cobaya.input.load_input', "
        "or, if you were passing a yaml string, load it with 'cobaya.yaml.yaml_load'."
    )
    # Configure the logger ASAP
    # Just a dummy import before configuring the logger, until I fix root/individual level
    import getdist
    logger_setup(info.get(_debug), info.get(_debug_file))
    import logging
    # Initialize output, if required
    resume, force = info.get(_resume), info.get(_force)
    ignore_blocks = []
    # If minimizer, always try to re-use sample to get bestfit/covmat
    if list(info[_sampler])[0] == "minimize":
        resume = True
        force = False
    output = Output(output_prefix=info.get(_output_prefix),
                    resume=resume,
                    force_output=force)
    # Create the updated input information, including defaults for each module.
    updated_info = update_info(info)
    if output:
        updated_info[_output_prefix] = output.updated_output_prefix()
        updated_info[_resume] = output.is_resuming()
    if logging.root.getEffectiveLevel() <= logging.DEBUG:
        # Don't dump unless we are doing output, just in case something not serializable
        # May be fixed in the future if we find a way to serialize external functions
        if info.get(_output_prefix) and am_single_or_primary_process():
            logging.getLogger(__name__.split(".")[-1]).info(
                "Input info updated with defaults (dumped to YAML):\n%s",
                yaml_dump(updated_info))
    # TO BE DEPRECATED IN >1.2!!! #####################
    _force_reproducible = "force_reproducible"
    if _force_reproducible in info:
        info.pop(_force_reproducible)
        logging.getLogger(__name__.split(".")[-1]).warning(
            "Option '%s' is no longer necessary. Please remove it!" %
            _force_reproducible)
    # CHECK THAT THIS WARNING WORKS!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
    ###################################################
    # We dump the info now, before modules initialization, to better track errors and
    # to check if resuming is possible asap (old and new infos are consistent)
    output.dump_info(info, updated_info)
    # Initialize the posterior and the sampler
    with Model(updated_info[_params],
               updated_info[_likelihood],
               updated_info.get(_prior),
               updated_info.get(_theory),
               modules=info.get(_path_install),
               timing=updated_info.get(_timing),
               allow_renames=False) as model:
        # Update the updated info with the parameter routes
        keys = ([_likelihood, _theory]
                if _theory in updated_info else [_likelihood])
        updated_info.update(odict([[k, model.info()[k]] for k in keys]))
        output.dump_info(None, updated_info, check_compatible=False)
        with Sampler(updated_info[_sampler],
                     model,
                     output,
                     resume=updated_info.get(_resume),
                     modules=info.get(_path_install)) as sampler:
            sampler.run()
    # For scripted calls:
    # Restore the original output_prefix: the script has not changed folder!
    if _output_prefix in info:
        updated_info[_output_prefix] = info.get(_output_prefix)
    return updated_info, sampler.products()
Esempio n. 29
0
def post(info_or_yaml_or_file: Union[InputDict, str, os.PathLike],
         sample: Union[SampleCollection, List[SampleCollection], None] = None
         ) -> PostTuple:
    info = load_input_dict(info_or_yaml_or_file)
    logger_setup(info.get("debug"), info.get("debug_file"))
    log = get_logger(__name__)
    # MARKED FOR DEPRECATION IN v3.0
    if info.get("modules"):
        raise LoggedError(log, "The input field 'modules' has been deprecated."
                               "Please use instead %r", packages_path_input)
    # END OF DEPRECATION BLOCK
    info_post: PostDict = info.get("post") or {}
    if not info_post:
        raise LoggedError(log, "No 'post' block given. Nothing to do!")
    if mpi.is_main_process() and info.get("resume"):
        log.warning("Resuming not implemented for post-processing. Re-starting.")
    if not info.get("output") and info_post.get("output") \
            and not info.get("params"):
        raise LoggedError(log, "The input dictionary must have be a full option "
                               "dictionary, or have an existing 'output' root to load "
                               "previous settings from ('output' to read from is in the "
                               "main block not under 'post'). ")
    # 1. Load existing sample
    output_in = get_output(prefix=info.get("output"))
    if output_in:
        info_in = output_in.load_updated_info() or update_info(info)
    else:
        info_in = update_info(info)
    params_in: ExpandedParamsDict = info_in["params"]  # type: ignore
    dummy_model_in = DummyModel(params_in, info_in.get("likelihood", {}),
                                info_in.get("prior"))

    in_collections = []
    thin = info_post.get("thin", 1)
    skip = info_post.get("skip", 0)
    if info.get('thin') is not None or info.get('skip') is not None:  # type: ignore
        raise LoggedError(log, "'thin' and 'skip' should be "
                               "parameters of the 'post' block")

    if sample:
        # If MPI, assume for each MPI process post is passed in the list of
        # collections that should be processed by that process
        # (e.g. single chain output from sampler)
        if isinstance(sample, SampleCollection):
            in_collections = [sample]
        else:
            in_collections = sample
        for i, collection in enumerate(in_collections):
            if skip:
                if 0 < skip < 1:
                    skip = int(round(skip * len(collection)))
                collection = collection.filtered_copy(slice(skip, None))
            if thin != 1:
                collection = collection.thin_samples(thin)
            in_collections[i] = collection
    elif output_in:
        files = output_in.find_collections()
        numbered = files
        if not numbered:
            # look for un-numbered output files
            files = output_in.find_collections(name=False)
        if files:
            if mpi.size() > len(files):
                raise LoggedError(log, "Number of MPI processes (%s) is larger than "
                                       "the number of sample files (%s)",
                                  mpi.size(), len(files))
            for num in range(mpi.rank(), len(files), mpi.size()):
                in_collections += [SampleCollection(
                    dummy_model_in, output_in,
                    onload_thin=thin, onload_skip=skip, load=True, file_name=files[num],
                    name=str(num + 1) if numbered else "")]
        else:
            raise LoggedError(log, "No samples found for the input model with prefix %s",
                              os.path.join(output_in.folder, output_in.prefix))

    else:
        raise LoggedError(log, "No output from where to load from, "
                               "nor input collections given.")
    if any(len(c) <= 1 for c in in_collections):
        raise LoggedError(
            log, "Not enough samples for post-processing. Try using a larger sample, "
                 "or skipping or thinning less.")
    mpi.sync_processes()
    log.info("Will process %d sample points.", sum(len(c) for c in in_collections))

    # 2. Compare old and new info: determine what to do
    add = info_post.get("add") or {}
    if "remove" in add:
        raise LoggedError(log, "remove block should be under 'post', not 'add'")
    remove = info_post.get("remove") or {}
    # Add a dummy 'one' likelihood, to absorb unused parameters
    if not add.get("likelihood"):
        add["likelihood"] = {}
    add["likelihood"]["one"] = None
    # Expand the "add" info, but don't add new default sampled parameters
    orig_params = set(add.get("params") or [])
    add = update_info(add, add_aggr_chi2=False)
    add_params: ExpandedParamsDict = add["params"]  # type: ignore
    for p in set(add_params) - orig_params:
        if p in params_in:
            add_params.pop(p)

    # 2.1 Adding/removing derived parameters and changes in priors of sampled parameters
    out_combined_params = deepcopy_where_possible(params_in)
    remove_params = list(str_to_list(remove.get("params")) or [])
    for p in remove_params:
        pinfo = params_in.get(p)
        if pinfo is None or not is_derived_param(pinfo):
            raise LoggedError(
                log,
                "You tried to remove parameter '%s', which is not a derived parameter. "
                "Only derived parameters can be removed during post-processing.", p)
        out_combined_params.pop(p)
    # Force recomputation of aggregated chi2
    for p in list(out_combined_params):
        if p.startswith(get_chi2_name("")):
            out_combined_params.pop(p)
    prior_recompute_1d = False
    for p, pinfo in add_params.items():
        pinfo_in = params_in.get(p)
        if is_sampled_param(pinfo):
            if not is_sampled_param(pinfo_in):
                # No added sampled parameters (de-marginalisation not implemented)
                if pinfo_in is None:
                    raise LoggedError(
                        log, "You added a new sampled parameter %r (maybe accidentally "
                             "by adding a new likelihood that depends on it). "
                             "Adding new sampled parameters is not possible. Try fixing "
                             "it to some value.", p)
                else:
                    raise LoggedError(
                        log,
                        "You tried to change the prior of parameter '%s', "
                        "but it was not a sampled parameter. "
                        "To change that prior, you need to define as an external one.", p)
            # recompute prior if potentially changed sampled parameter priors
            prior_recompute_1d = True
        elif is_derived_param(pinfo):
            if p in out_combined_params:
                raise LoggedError(
                    log, "You tried to add derived parameter '%s', which is already "
                         "present. To force its recomputation, 'remove' it too.", p)
        elif is_fixed_or_function_param(pinfo):
            # Only one possibility left "fixed" parameter that was not present before:
            # input of new likelihood, or just an argument for dynamical derived (dropped)
            if pinfo_in and p in params_in and pinfo["value"] != pinfo_in.get("value"):
                raise LoggedError(
                    log,
                    "You tried to add a fixed parameter '%s: %r' that was already present"
                    " but had a different value or was not fixed. This is not allowed. "
                    "The old info of the parameter was '%s: %r'",
                    p, dict(pinfo), p, dict(pinfo_in))
        elif not pinfo_in:  # OK as long as we have known value for it
            raise LoggedError(log, "Parameter %s no known value. ", p)
        out_combined_params[p] = pinfo

    out_combined: InputDict = {"params": out_combined_params}  # type: ignore
    # Turn the rest of *derived* parameters into constants,
    # so that the likelihoods do not try to recompute them
    # But be careful to exclude *input* params that have a "derived: True" value
    # (which in "updated info" turns into "derived: 'lambda [x]: [x]'")
    # Don't assign to derived parameters to theories, only likelihoods, so they can be
    # recomputed if needed. If the theory does not need to be computed, it doesn't matter
    # if it is already assigned parameters in the usual way; likelihoods can get
    # the required derived parameters from the stored sample derived parameter inputs.
    out_params_with_computed = deepcopy_where_possible(out_combined_params)

    dropped_theory = set()
    for p, pinfo in out_params_with_computed.items():
        if (is_derived_param(pinfo) and "value" not in pinfo
                and p not in add_params):
            out_params_with_computed[p] = {"value": np.nan}
            dropped_theory.add(p)
    # 2.2 Manage adding/removing priors and likelihoods
    warn_remove = False
    kind: ModelBlock
    for kind in ("prior", "likelihood", "theory"):
        out_combined[kind] = deepcopy_where_possible(info_in.get(kind)) or {}
        for remove_item in str_to_list(remove.get(kind)) or []:
            try:
                out_combined[kind].pop(remove_item, None)
                if remove_item not in (add.get(kind) or []) and kind != "theory":
                    warn_remove = True
            except ValueError:
                raise LoggedError(
                    log, "Trying to remove %s '%s', but it is not present. "
                         "Existing ones: %r", kind, remove_item, list(out_combined[kind]))
        if kind != "theory" and kind in add:
            dups = set(add.get(kind) or []).intersection(out_combined[kind]) - {"one"}
            if dups:
                raise LoggedError(
                    log, "You have added %s '%s', which was already present. If you "
                         "want to force its recomputation, you must also 'remove' it.",
                    kind, dups)
            out_combined[kind].update(add[kind])

    if warn_remove and mpi.is_main_process():
        log.warning("You are removing a prior or likelihood pdf. "
                    "Notice that if the resulting posterior is much wider "
                    "than the original one, or displaced enough, "
                    "it is probably safer to explore it directly.")

    mlprior_names_add = minuslogprior_names(add.get("prior") or [])
    chi2_names_add = [get_chi2_name(name) for name in add["likelihood"] if
                      name != "one"]
    out_combined["likelihood"].pop("one", None)

    add_theory = add.get("theory")
    if add_theory:
        if len(add["likelihood"]) == 1 and not any(
                is_derived_param(pinfo) for pinfo in add_params.values()):
            log.warning("You are adding a theory, but this does not force recomputation "
                        "of any likelihood or derived parameters unless explicitly "
                        "removed+added.")
        # Inherit from the original chain (input|output_params, renames, etc)
        added_theory = add_theory.copy()
        for theory, theory_info in out_combined["theory"].items():
            if theory in list(added_theory):
                out_combined["theory"][theory] = \
                    recursive_update(theory_info, added_theory.pop(theory))
        out_combined["theory"].update(added_theory)

    # Prepare recomputation of aggregated chi2
    # (they need to be recomputed by hand, because auto-computation won't pick up
    #  old likelihoods for a given type)
    all_types = {like: str_to_list(opts.get("type") or [])
                 for like, opts in out_combined["likelihood"].items()}
    types = set(chain(*all_types.values()))
    inv_types = {t: [like for like, like_types in all_types.items() if t in like_types]
                 for t in sorted(types)}
    add_aggregated_chi2_params(out_combined_params, types)

    # 3. Create output collection
    # Use default prefix if it exists. If it does not, produce no output by default.
    # {post: {output: None}} suppresses output, and if it's a string, updates it.
    out_prefix = info_post.get("output", info.get("output"))
    if out_prefix:
        suffix = info_post.get("suffix")
        if not suffix:
            raise LoggedError(log, "You need to provide a '%s' for your output chains.",
                              "suffix")
        out_prefix += separator_files + "post" + separator_files + suffix
    output_out = get_output(prefix=out_prefix, force=info.get("force"))
    output_out.set_lock()

    if output_out and not output_out.force and output_out.find_collections():
        raise LoggedError(log, "Found existing post-processing output with prefix %r. "
                               "Delete it manually or re-run with `force: True` "
                               "(or `-f`, `--force` from the shell).", out_prefix)
    elif output_out and output_out.force and mpi.is_main_process():
        output_out.delete_infos()
        for _file in output_out.find_collections():
            output_out.delete_file_or_folder(_file)
    info_out = deepcopy_where_possible(info)
    info_post = info_post.copy()
    info_out["post"] = info_post
    # Updated with input info and extended (updated) add info
    info_out.update(info_in)  # type: ignore
    info_post["add"] = add

    dummy_model_out = DummyModel(out_combined_params, out_combined["likelihood"],
                                 info_prior=out_combined["prior"])
    out_func_parameterization = Parameterization(out_params_with_computed)

    # TODO: check allow_renames=False?
    model_add = Model(out_params_with_computed, add["likelihood"],
                      info_prior=add.get("prior"), info_theory=out_combined["theory"],
                      packages_path=(info_post.get(packages_path_input) or
                                     info.get(packages_path_input)),
                      allow_renames=False, post=True,
                      stop_at_error=info.get('stop_at_error', False),
                      skip_unused_theories=True, dropped_theory_params=dropped_theory)
    # Remove auxiliary "one" before dumping -- 'add' *is* info_out["post"]["add"]
    add["likelihood"].pop("one")
    out_collections = [SampleCollection(dummy_model_out, output_out, name=c.name,
                                        cache_size=OutputOptions.default_post_cache_size)
                       for c in in_collections]
    # TODO: should maybe add skip/thin to out_combined, so can tell post-processed?
    output_out.check_and_dump_info(info_out, out_combined, check_compatible=False)
    collection_in = in_collections[0]
    collection_out = out_collections[0]

    last_percent = None
    known_constants = dummy_model_out.parameterization.constant_params()
    known_constants.update(dummy_model_in.parameterization.constant_params())
    missing_params = dummy_model_in.parameterization.sampled_params().keys() - set(
        collection_in.columns)
    if missing_params:
        raise LoggedError(log, "Input samples do not contain expected sampled parameter "
                               "values: %s", missing_params)

    missing_priors = set(name for name in collection_out.minuslogprior_names if
                         name not in mlprior_names_add
                         and name not in collection_in.columns)
    if _minuslogprior_1d_name in missing_priors:
        prior_recompute_1d = True
    if prior_recompute_1d:
        missing_priors.discard(_minuslogprior_1d_name)
        mlprior_names_add.insert(0, _minuslogprior_1d_name)
    prior_regenerate: Optional[Prior]
    if missing_priors and "prior" in info_in:
        # in case there are input priors that are not stored in input samples
        # e.g. when postprocessing GetDist/CosmoMC-format chains
        in_names = minuslogprior_names(info_in["prior"])
        info_prior = {piname: inf for (piname, inf), in_name in
                      zip(info_in["prior"].items(), in_names) if
                      in_name in missing_priors}
        regenerated_prior_names = minuslogprior_names(info_prior)
        missing_priors.difference_update(regenerated_prior_names)
        prior_regenerate = Prior(dummy_model_in.parameterization, info_prior)
    else:
        prior_regenerate = None
        regenerated_prior_names = None
    if missing_priors:
        raise LoggedError(log, "Missing priors: %s", missing_priors)

    mpi.sync_processes()
    output_in.check_lock()

    # 4. Main loop! Loop over input samples and adjust as required.
    if mpi.is_main_process():
        log.info("Running post-processing...")
    difflogmax: Optional[float] = None
    to_do = sum(len(c) for c in in_collections)
    weights = []
    done = 0
    last_dump_time = time.time()
    for collection_in, collection_out in zip(in_collections, out_collections):
        importance_weights = []

        def set_difflogmax():
            nonlocal difflogmax
            difflog = (collection_in[OutPar.minuslogpost].to_numpy(
                dtype=np.float64)[:len(collection_out)]
                       - collection_out[OutPar.minuslogpost].to_numpy(dtype=np.float64))
            difflogmax = np.max(difflog)
            if abs(difflogmax) < 1:
                difflogmax = 0  # keep simple when e.g. very similar
            log.debug("difflogmax: %g", difflogmax)
            if mpi.more_than_one_process():
                difflogmax = max(mpi.allgather(difflogmax))
            if mpi.is_main_process():
                log.debug("Set difflogmax: %g", difflogmax)
            _weights = np.exp(difflog - difflogmax)
            importance_weights.extend(_weights)
            collection_out.reweight(_weights)

        for i, point in collection_in.data.iterrows():
            all_params = point.to_dict()
            for p in remove_params:
                all_params.pop(p, None)
            log.debug("Point: %r", point)
            sampled = np.array([all_params[param] for param in
                                dummy_model_in.parameterization.sampled_params()])
            all_params = out_func_parameterization.to_input(all_params).copy()

            # Add/remove priors
            if prior_recompute_1d:
                priors_add = [model_add.prior.logps_internal(sampled)]
                if priors_add[0] == -np.inf:
                    continue
            else:
                priors_add = []
            if model_add.prior.external:
                priors_add.extend(model_add.prior.logps_external(all_params))

            logpriors_add = dict(zip(mlprior_names_add, priors_add))
            logpriors_new = [logpriors_add.get(name, - point.get(name, 0))
                             for name in collection_out.minuslogprior_names]
            if prior_regenerate:
                regenerated = dict(zip(regenerated_prior_names,
                                       prior_regenerate.logps_external(all_params)))
                for _i, name in enumerate(collection_out.minuslogprior_names):
                    if name in regenerated_prior_names:
                        logpriors_new[_i] = regenerated[name]

            if is_debug(log):
                log.debug("New set of priors: %r",
                          dict(zip(dummy_model_out.prior, logpriors_new)))
            if -np.inf in logpriors_new:
                continue
            # Add/remove likelihoods and/or (re-)calculate derived parameters
            loglikes_add, output_derived = model_add._loglikes_input_params(
                all_params, return_output_params=True)
            loglikes_add = dict(zip(chi2_names_add, loglikes_add))
            output_derived = dict(zip(model_add.output_params, output_derived))
            loglikes_new = [loglikes_add.get(name, -0.5 * point.get(name, 0))
                            for name in collection_out.chi2_names]
            if is_debug(log):
                log.debug("New set of likelihoods: %r",
                          dict(zip(dummy_model_out.likelihood, loglikes_new)))
                if output_derived:
                    log.debug("New set of derived parameters: %r", output_derived)
            if -np.inf in loglikes_new:
                continue
            all_params.update(output_derived)

            all_params.update(out_func_parameterization.to_derived(all_params))
            derived = {param: all_params.get(param) for param in
                       dummy_model_out.parameterization.derived_params()}
            # We need to recompute the aggregated chi2 by hand
            for type_, likes in inv_types.items():
                derived[get_chi2_name(type_)] = sum(
                    -2 * lvalue for lname, lvalue
                    in zip(collection_out.chi2_names, loglikes_new)
                    if undo_chi2_name(lname) in likes)
            if is_debug(log):
                log.debug("New derived parameters: %r",
                          {p: derived[p]
                           for p in dummy_model_out.parameterization.derived_params()
                           if p in add["params"]})
            # Save to the collection (keep old weight for now)
            weight = point.get(OutPar.weight)
            mpi.check_errors()
            if difflogmax is None and i > OutputOptions.reweight_after and \
                    time.time() - last_dump_time > OutputOptions.output_inteveral_s / 2:
                set_difflogmax()
                collection_out.out_update()

            if difflogmax is not None:
                logpost_new = sum(logpriors_new) + sum(loglikes_new)
                importance_weight = np.exp(logpost_new + point.get(OutPar.minuslogpost)
                                           - difflogmax)
                weight = weight * importance_weight
                importance_weights.append(importance_weight)
                if time.time() - last_dump_time > OutputOptions.output_inteveral_s:
                    collection_out.out_update()
                    last_dump_time = time.time()

            if weight > 0:
                collection_out.add(sampled, derived=derived.values(), weight=weight,
                                   logpriors=logpriors_new, loglikes=loglikes_new)

            # Display progress
            percent = int(np.round((i + done) / to_do * 100))
            if percent != last_percent and not percent % 5:
                last_percent = percent
                progress_bar(log, percent, " (%d/%d)" % (i + done, to_do))

        if difflogmax is None:
            set_difflogmax()
        if not collection_out.data.last_valid_index():
            raise LoggedError(
                log, "No elements in the final sample. Possible causes: "
                     "added a prior or likelihood valued zero over the full sampled "
                     "domain, or the computation of the theory failed everywhere, etc.")
        collection_out.out_update()
        weights.append(np.array(importance_weights))
        done += len(collection_in)

    assert difflogmax is not None
    points = 0
    tot_weight = 0
    min_weight = np.inf
    max_weight = -np.inf
    max_output_weight = -np.inf
    sum_w2 = 0
    points_removed = 0
    for collection_in, collection_out, importance_weights in zip(in_collections,
                                                                 out_collections,
                                                                 weights):
        output_weights = collection_out[OutPar.weight]
        points += len(collection_out)
        tot_weight += np.sum(output_weights)
        points_removed += len(importance_weights) - len(output_weights)
        min_weight = min(min_weight, np.min(importance_weights))
        max_weight = max(max_weight, np.max(importance_weights))
        max_output_weight = max(max_output_weight, np.max(output_weights))
        sum_w2 += np.dot(output_weights, output_weights)

    (tot_weights, min_weights, max_weights, max_output_weights, sum_w2s, points_s,
     points_removed_s) = mpi.zip_gather(
        [tot_weight, min_weight, max_weight, max_output_weight, sum_w2,
         points, points_removed])

    if mpi.is_main_process():
        output_out.clear_lock()
        log.info("Finished! Final number of distinct sample points: %s", sum(points_s))
        log.info("Importance weight range: %.4g -- %.4g",
                 min(min_weights), max(max_weights))
        if sum(points_removed_s):
            log.info("Points deleted due to zero weight: %s", sum(points_removed_s))
        log.info("Effective number of single samples if independent (sum w)/max(w): %s",
                 int(sum(tot_weights) / max(max_output_weights)))
        log.info(
            "Effective number of weighted samples if independent (sum w)^2/sum(w^2): "
            "%s", int(sum(tot_weights) ** 2 / sum(sum_w2s)))
    products: PostResultDict = {"sample": value_or_list(out_collections),
                                "stats": {'min_importance_weight': (min(min_weights) /
                                                                    max(max_weights)),
                                          'points_removed': sum(points_removed_s),
                                          'tot_weight': sum(tot_weights),
                                          'max_weight': max(max_output_weights),
                                          'sum_w2': sum(sum_w2s),
                                          'points': sum(points_s)},
                                "logpost_weight_offset": difflogmax,
                                "weights": value_or_list(weights)}
    return PostTuple(info=out_combined, products=products)
Esempio n. 30
0
def install_script(args=None):
    """Command line script for the installer."""
    set_mpi_disabled()
    warn_deprecation()
    # Parse arguments
    import argparse
    parser = argparse.ArgumentParser(
        prog="cobaya install",
        description="Cobaya's installation tool for external packages.")
    parser.add_argument(
        "files_or_components",
        action="store",
        nargs="+",
        metavar="input_file.yaml|component_name",
        help="One or more input files or component names "
        "(or simply 'cosmo' to install all the requisites for basic"
        " cosmological runs)")
    parser.add_argument(
        "-" + packages_path_arg[0],
        "--" + packages_path_arg_posix,
        action="store",
        required=False,
        metavar="/packages/path",
        default=None,
        help="Desired path where to install external packages. "
        "Optional if one has been set globally or as an env variable"
        " (run with '--show_%s' to check)." % packages_path_arg_posix)
    output_show_packages_path = resolve_packages_path()
    if output_show_packages_path and os.environ.get(packages_path_env):
        output_show_packages_path += " (from env variable %r)" % packages_path_env
    elif output_show_packages_path:
        output_show_packages_path += " (from config file)"
    else:
        output_show_packages_path = "(Not currently set.)"
    parser.add_argument(
        "--show-" + packages_path_arg_posix,
        action="version",
        version=output_show_packages_path,
        help="Prints default external packages installation folder "
        "and exits.")
    parser.add_argument(
        "-" + "f",
        "--" + "force",
        action="store_true",
        default=False,
        help="Force re-installation of apparently installed packages.")
    parser.add_argument("--%s" % "test",
                        action="store_true",
                        default=False,
                        help="Just check whether components are installed.")
    parser.add_argument("--upgrade",
                        action="store_true",
                        default=False,
                        help="Force upgrade of obsolete components.")
    parser.add_argument("--skip",
                        action="store",
                        nargs="*",
                        metavar="keyword",
                        help=("Keywords of components that will be "
                              "skipped during installation."))
    parser.add_argument(
        "--skip-global",
        action="store_true",
        default=False,
        help="Skip installation of already-available Python modules.")
    parser.add_argument("-" + "d",
                        "--" + "debug",
                        action="store_true",
                        help="Produce verbose debug output.")
    group_just = parser.add_mutually_exclusive_group(required=False)
    group_just.add_argument("-C",
                            "--just-code",
                            action="store_false",
                            default=True,
                            help="Install code of the components.",
                            dest=data_path)
    group_just.add_argument("-D",
                            "--just-data",
                            action="store_false",
                            default=True,
                            help="Install data of the components.",
                            dest=code_path)
    parser.add_argument(
        "--no-progress-bars",
        action="store_true",
        default=False,
        help=("No progress bars shown; use when output is saved into a "
              "text file (e.g. when running on a cluster)."))
    parser.add_argument(
        "--no-set-global",
        action="store_true",
        default=False,
        help="Do not store the installation path for later runs.")
    arguments = parser.parse_args(args)
    # Configure the logger ASAP
    logger_setup(arguments.debug)
    logger = get_logger("install")
    # Gather requests
    infos: List[Union[InputDict, str]] = []
    for f in arguments.files_or_components:
        if f.lower() == "cosmo":
            logger.info("Installing basic cosmological packages.")
            from cobaya.cosmo_input import install_basic
            infos += [install_basic]
        elif f.lower() == "cosmo-tests":
            logger.info("Installing *tested* cosmological packages.")
            from cobaya.cosmo_input import install_tests
            infos += [install_tests]
        elif os.path.splitext(f)[1].lower() in Extension.yamls:
            from cobaya.input import load_input
            infos += [load_input(f)]
        else:  # a single component name, no kind specified
            infos += [f]
    # Launch installer
    install(*infos,
            path=getattr(arguments, packages_path_arg),
            logger=logger,
            **{
                arg: getattr(arguments, arg)
                for arg in [
                    "force", code_path, data_path, "no_progress_bars", "test",
                    "no_set_global", "skip", "skip_global", "debug", "upgrade"
                ]
            })