Example #1
0
    def get_defaults(cls, return_yaml=False, yaml_expand_defaults=True):
        """
        Return defaults for this module, with syntax:

        .. code::
           [kind]
             [module_name]:
               option: value
               [...]

           params:
             [...]  # if required

           prior:
             [...]  # if required

        If keyword `return_yaml` is set to True, it returns literally that,
        whereas if False (default), it returns the corresponding Python dict.
        """
        path_to_defaults = cls.get_yaml_file()
        if return_yaml:
            if yaml_expand_defaults:
                return yaml_dump(yaml_load_file(path_to_defaults))
            else:
                with open(path_to_defaults, "r") as filedef:
                    return "".join(filedef.readlines())
        else:
            return yaml_load_file(path_to_defaults)
Example #2
0
 def reload_updated_info(self,
                         cache=False,
                         use_cache=False) -> Optional[InputDict]:
     if mpi.is_main_process():
         if use_cache and hasattr(self, "_old_updated_info"):
             return self._old_updated_info
         try:
             if os.path.isfile(self.dump_file_updated):
                 loaded = load_info_dump(self.dump_file_updated)
             else:
                 loaded = yaml_load_file(self.file_updated)  # type: ignore
             if cache:
                 self._old_updated_info = deepcopy_where_possible(loaded)
             return loaded
         except IOError:
             if cache:
                 self._old_updated_info = None
             return None
     else:
         # Only cached possible when non main process
         if not use_cache:
             raise LoggedError(
                 self.log, "Cannot call `reload_updated_info` from "
                 "non-main process unless cached version "
                 "(`use_cache=True`) requested.")
         return getattr(self, "_old_updated_info", None)
Example #3
0
    def dump_info(self, input_info, full_info):
        """
        Saves the info in the chain folder twice:
           - the input info.
           - idem, populated with the modules' defaults.

        If resuming a sample, checks first that old and new infos are consistent.
        """
        # trim known params of each likelihood: for internal use only
        full_info_trimmed = deepcopy(full_info)
        for lik_info in full_info_trimmed.get(_likelihood, {}).values():
            if hasattr(lik_info, "pop"):
                lik_info.pop(_params, None)
        try:
            # We will test the old info agains the dumped+loaded new info.
            # This is because we can't actually check if python objects are the same as before.
            old_info = yaml_load_file(self.file_full)
            new_info = yaml_load(yaml_dump(full_info_trimmed))
            if not is_equal_info(old_info, new_info, strict=False):
                self.log.error(
                    "Old and new sample information not compatible! "
                    "Resuming not possible!")
                raise HandledException
        except IOError:
            # There was no previous chain
            pass
        # We write the new one anyway (maybe updated debug, resuming...)
        for f, info in [(self.file_input, input_info),
                        (self.file_full, full_info_trimmed)]:
            with open(f, "w") as f_out:
                try:
                    f_out.write(yaml_dump(info))
                except OutputError as e:
                    self.log.error(e.message)
                    raise HandledException
Example #4
0
def load_input(input_file: str) -> InputDict:
    """
    Loads general info, and splits it into the right parts.
    """
    file_name, extension = os.path.splitext(input_file)
    file_name = os.path.basename(file_name)
    info: InputDict
    if extension.lower() in Extension.yamls:
        info = yaml_load_file(input_file) or {}  # type: ignore
    elif extension == Extension.dill:
        info = load_info_dump(input_file) or {}
    else:
        raise LoggedError(log, "Extension of input file '%s' not recognized.",
                          input_file)

    # if output_prefix not defined, default to input_file name (sans ext.) as prefix;
    if "output" not in info:
        info["output"] = file_name
    # warn if no output, since we are in shell-invocation mode.
    elif info["output"] is None:
        log.warning("WARNING: Output explicitly suppressed with '%s: null'",
                    "output")
    # contained? Ensure that output is sent where it should
    if "CONTAINED" in os.environ:
        for out in ("output", "debug_file"):
            if info.get(out):
                if not info[out].startswith("/"):
                    info[out] = os.path.join(products_path, info[out])
    return info
def test_example(tmpdir):
    # temporarily change working directory to be able to run the files "as is"
    cwd = os.getcwd()
    os.chdir(docs_src_folder)
    info_yaml = yaml_load_file("gaussian.yaml")
    info_yaml.pop(_output_prefix)
    globals_example = {}
    exec(
        open(os.path.join(docs_src_folder, "create_info.py")).read(),
        globals_example)
    try:
        assert is_equal_info(info_yaml, globals_example["info"]), (
            "Inconsistent info between yaml and insteractive.")
        exec(
            open(os.path.join(docs_src_folder, "load_info.py")).read(),
            globals_example)
        globals_example["info_from_yaml"].pop(_output_prefix)
        assert is_equal_info(info_yaml, globals_example["info_from_yaml"]), (
            "Inconsistent info between interactive and *loaded* yaml.")
        # Run the chain -- constant seed so results are the same!
        globals_example["info"]["sampler"]["mcmc"] = (
            globals_example["info"]["sampler"]["mcmc"] or {})
        globals_example["info"]["sampler"]["mcmc"].update({"seed": 0})
        exec(
            open(os.path.join(docs_src_folder, "run.py")).read(),
            globals_example)
        # Analyze and plot -- capture print output
        stream = StringIO()
        with stdout_redirector(stream):
            exec(
                open(os.path.join(docs_src_folder, "analyze.py")).read(),
                globals_example)
        # Comparing text output
        out_filename = "analyze_out.txt"
        contents = "".join(
            open(os.path.join(docs_src_folder, out_filename)).readlines())
        # The endswith guarantees that getdist messages and warnings are ignored
        assert stream.getvalue().replace("\n", "").replace(" ", "").endswith(
            contents.replace("\n", "").replace(" ", "")), (
                "Text output does not coincide:\nwas\n%s\nand " % contents +
                "now it's\n%sstream.getvalue()" % stream.getvalue())
        # Comparing plot
        plot_filename = "example_quickstart_plot.png"
        test_filename = tmpdir.join(plot_filename)
        globals_example["gdplot"].export(str(test_filename))
        print("Plot created at '%s'" % str(test_filename))
        test_img = imread(str(test_filename)).astype(float)
        docs_img = imread(os.path.join(docs_img_folder,
                                       plot_filename)).astype(float)
        npixels = test_img.shape[0] * test_img.shape[1]
        assert (
            np.count_nonzero(test_img == docs_img) /
            (4 * npixels) >= pixel_tolerance), (
                "Images are too different. Maybe GetDist conventions changed?")
    except:
        raise
    finally:
        # Back to the working directory of the tests, just in case, and restart the rng
        os.chdir(cwd)
Example #6
0
def test_run_file(tmpdir):
    input_file = os.path.join(tmpdir, 'pars.yaml')
    root = os.path.join(tmpdir, 'test')
    yaml_dump_file(input_file, dict(test_info_common, output=root))
    run_script([input_file, '--force'])
    likname = list(test_info_common["likelihood"])[0]
    default_info = get_default_info(likname, "likelihood")
    updated_info = yaml_load_file(root + '.updated.yaml')
    assert updated_info["prior"] == default_info["prior"]
Example #7
0
    def __init__(self,
                 info_sampler,
                 model,
                 output,
                 resume=_resume_default,
                 modules=None):
        """
        Actual initialization of the class. Loads the default and input information and
        call the custom ``initialize`` method.

        [Do not modify this one.]
        """
        self.name = self.__class__.__name__
        self.set_logger()
        self.model = model
        self.output = output
        self.path_install = modules
        # Load info of the sampler
        for k in info_sampler:
            setattr(self, k, info_sampler[k])
        # Seed, if requested
        if getattr(self, "seed", None) is not None:
            self.log.warning("This run has been SEEDED with seed %d",
                             self.seed)
            try:
                np.random.seed(self.seed)
            except TypeError:
                raise LoggedError(
                    self.log,
                    "Seeds must be *integer*, but got %r with type %r",
                    self.seed, type(self.seed))
        # Load checkpoint info, if resuming
        self.resuming = resume
        if self.resuming and self.name != "minimize":
            try:
                checkpoint_info = yaml_load_file(self.checkpoint_filename())
                try:
                    for k, v in checkpoint_info[_sampler][self.name].items():
                        setattr(self, k, v)
                    self.resuming = True
                    if am_single_or_primary_process():
                        self.log.info("Resuming from previous sample!")
                except KeyError:
                    if am_single_or_primary_process():
                        raise LoggedError(
                            self.log, "Checkpoint file found at '%s' "
                            "but it corresponds to a different sampler.",
                            self.checkpoint_filename())
            except (IOError, TypeError):
                pass
        else:
            try:
                os.remove(self.checkpoint_filename())
                os.remove(self.progress_filename())
            except (OSError, TypeError):
                pass
        self.initialize()
Example #8
0
def load_config_file():
    """
    Returns the config info, stored in the config file, or an empty dict if not present.
    """
    # Just-in-time import to avoid recursion
    from cobaya.yaml import yaml_load_file
    try:
        return yaml_load_file(
            os.path.join(get_config_path(), _packages_path_config_file))
    except:
        return {}
Example #9
0
    def __init__(self,
                 info_sampler: SamplerDict,
                 model: Model,
                 output=Optional[Output],
                 packages_path: Optional[str] = None,
                 name: Optional[str] = None):
        """
        Actual initialization of the class. Loads the default and input information and
        call the custom ``initialize`` method.

        [Do not modify this one.]
        """
        self._model = model
        self._output = output
        self._updated_info = deepcopy_where_possible(info_sampler)
        super().__init__(info_sampler,
                         packages_path=packages_path,
                         name=name,
                         initialize=False,
                         standalone=False)
        if not model.parameterization.sampled_params():
            self.mpi_warning("No sampled parameters requested! "
                             "This will fail for non-mock samplers.")
        # Load checkpoint info, if resuming
        if self.output.is_resuming() and not isinstance(self, Minimizer):
            checkpoint_info = None
            if mpi.is_main_process():
                try:
                    checkpoint_info = yaml_load_file(
                        self.checkpoint_filename())

                    if self.get_name() not in checkpoint_info["sampler"]:
                        raise LoggedError(
                            self.log, "Checkpoint file found at '%s' "
                            "but it corresponds to a different sampler.",
                            self.checkpoint_filename())
                except (IOError, TypeError):
                    pass
            checkpoint_info = mpi.share_mpi(checkpoint_info)
            if checkpoint_info:
                self.set_checkpoint_info(checkpoint_info)
                self.mpi_info("Resuming from previous sample!")
        elif not isinstance(self, Minimizer) and mpi.is_main_process():
            try:
                output.delete_file_or_folder(self.checkpoint_filename())
                output.delete_file_or_folder(self.progress_filename())
            except (OSError, TypeError):
                pass
        self._set_rng()
        self.initialize()
        model.set_cache_size(self._get_requested_cache_size())
        # Add to the updated info some values which are
        # only available after initialisation
        self._updated_info["version"] = self.get_version()
Example #10
0
def test_external():

    os.chdir(base_dir)
    mapping_proj = ['ell_0', 'ell_2', 'ell_4']
    make_data_covariance(data_fn=data_fn,
                         covariance_fn=covariance_fn,
                         mapping_proj=mapping_proj)
    info = yaml_load_file('./test_cobaya.yaml')
    updated_info, sampler = run(info)
    assert 'a' in updated_info['params']
    assert 'sample' in sampler.products()
Example #11
0
 def reload_updated_info(self, cache=False, use_cache=False):
     if use_cache and getattr(self, "_old_updated_info", None):
         return self._old_updated_info
     try:
         loaded = yaml_load_file(self.file_updated)
         if cache:
             self._old_updated_info = loaded
         return deepcopy_where_possible(loaded)
     except IOError:
         if cache:
             self._old_updated_info = None
         return None
Example #12
0
def test_H0_docs(packages_path, skip_not_installed):
    like_info = yaml_load_file(os.path.join(
        os.path.dirname(__file__), "../docs/src_examples/H0/custom_likelihood.yaml"))
    like_name = list(like_info[kinds.likelihood])[0]
    like_info[kinds.likelihood][like_name]["external"] = \
        like_info[kinds.likelihood][like_name]["external"].replace(
            "mu_H0", str(fiducial_H0))
    like_info[kinds.likelihood][like_name]["external"] = \
        like_info[kinds.likelihood][like_name]["external"].replace(
            "sigma_H0", str(fiducial_H0_std))
    body_of_test(packages_path, like_info=like_info[kinds.likelihood],
                 skip_not_installed=skip_not_installed)
def test_example():
    # temporarily change working directory to be able to run the files "as is"
    cwd = os.getcwd()
    try:
        os.chdir(docs_src_folder)
        info_yaml = yaml_load_file("gaussian.yaml")
        info_yaml.pop("output")
        globals_example = {}
        exec(
            open(os.path.join(docs_src_folder, "create_info.py")).read(),
            globals_example)
        assert is_equal_info(info_yaml, globals_example["info"]), (
            "Inconsistent info between yaml and interactive.")
        exec(
            open(os.path.join(docs_src_folder, "load_info.py")).read(),
            globals_example)
        globals_example["info_from_yaml"].pop("output")
        assert is_equal_info(info_yaml, globals_example["info_from_yaml"]), (
            "Inconsistent info between interactive and *loaded* yaml.")
        # Run the chain -- constant seed so results are the same!
        globals_example["info"]["sampler"]["mcmc"] = (
            globals_example["info"]["sampler"]["mcmc"] or {})
        exec(
            open(os.path.join(docs_src_folder, "run.py")).read(),
            globals_example)
        # Run the minimizer -- output doesn't matter. Just checking that it does not fail
        exec(
            open(os.path.join(docs_src_folder, "run_min.py")).read(),
            globals_example)
        # Analyze and plot -- capture print output
        stream = StringIO()
        with stdout_redirector(stream):
            exec(
                open(os.path.join(docs_src_folder, "analyze.py")).read(),
                globals_example)
        # Checking results
        mean, covmat = [
            globals_example["info"]["likelihood"]["gaussian_mixture"][x]
            for x in ["means", "covs"]
        ]
        assert (KL_norm(m1=mean,
                        S1=covmat,
                        m2=globals_example["mean"],
                        S2=globals_example["covmat"]) <= KL_tolerance
                ), ("Sampling appears not to have worked too well. Run again?")
    finally:
        # Back to the working directory of the tests, just in case
        os.chdir(cwd)
Example #14
0
def get_default_info(module, kind):
    path_to_defaults = os.path.join(get_folder(module, kind), module + ".yaml")
    try:
        default_module_info = yaml_load_file(path_to_defaults)
    except IOError:
        # probably an external module
        default_module_info = {kind: {module: {}}}
        log.debug("Module %s:%s does not have a defaults file. " % (kind, module) +
                  "Maybe it is an external module.")
    try:
        default_module_info[kind][module]
    except KeyError:
        log.error("The defaults file for '%s' should be structured "
                  "as %s:%s:{[options]}.", module, kind, module)
        raise HandledException
    return default_module_info
def get_covmat_database(modules, cached=True):
    # Get folders with corresponding modules installed
    installed_folders = [
        folder for folder in covmat_folders
        if os.path.exists(folder.format(**{_path_install: modules}))
    ]
    covmats_database_fullpath = os.path.join(modules, _covmats_file)
    # Check if there is a usable cached one
    if cached:
        try:
            covmat_database = yaml_load_file(covmats_database_fullpath)
            assert set(covmat_database) == set(installed_folders)
            return covmat_database
        except:
            log.info(
                "No cached covmat database present, not usable or not up-to-date. "
                "Will be re-created and cached.")
            pass
    # Create it (again)
    covmat_database = odict()
    for folder in installed_folders:
        covmat_database[folder] = []
        folder_full = folder.format(**{
            _path_install: modules
        }).replace("/", os.sep)
        for filename in os.listdir(folder_full):
            try:
                with open(os.path.join(folder_full, filename)) as covmat:
                    header = covmat.readline()
                assert header.strip().startswith("#")
                params = header.strip().lstrip("#").split()
            except:
                continue
            covmat_database[folder].append({
                "name": filename,
                "params": params
            })
    if cached:
        yaml_dump_file(covmats_database_fullpath,
                       covmat_database,
                       error_if_exists=False)
    return covmat_database
Example #16
0
def load_input(input_file):
    """
    Loads general info, and splits it into the right parts.
    """
    file_name, extension = os.path.splitext(input_file)
    file_name = os.path.basename(file_name)
    if extension not in (".yaml", ".yml"):
        log.error("Extension of input file '%s' not recognized.", input_file)
        raise HandledException
    info = yaml_load_file(input_file) or {}
    # if output_prefix not defined, default to input_file name (sans ext.) as prefix;
    if _output_prefix not in info:
        info[_output_prefix] = file_name
    # warn if no output, since we are in shell-invocation mode.
    elif info[_output_prefix] is None:
        log.warning("WARNING: Output explicitly suppressed with 'output_prefix: null'")
    # contained? Ensure that output is sent where it should
    if "CONTAINED" in os.environ:
        for out in [_output_prefix, _debug_file]:
            if info.get(out):
                if not info[out].startswith("/"):
                    info[out] = os.path.join(_products_path, info[out])
    return info
Example #17
0
    def __init__(self,
                 info_sampler,
                 model,
                 output=None,
                 packages_path=None,
                 name=None):
        """
        Actual initialization of the class. Loads the default and input information and
        call the custom ``initialize`` method.

        [Do not modify this one.]
        """
        self.model = model
        self.output = output
        self._updated_info = deepcopy_where_possible(info_sampler)
        super().__init__(info_sampler,
                         packages_path=packages_path,
                         name=name,
                         initialize=False,
                         standalone=False)
        # Seed, if requested
        if getattr(self, "seed", None) is not None:
            if not isinstance(self.seed,
                              int) or not (0 <= self.seed <= 2**32 - 1):
                raise LoggedError(
                    self.log,
                    "Seeds must be a *positive integer* < 2**32 - 1, "
                    "but got %r with type %r", self.seed, type(self.seed))
            # MPI-awareness: sum the rank to the seed
            if more_than_one_process():
                self.seed += get_mpi_rank()
            self.mpi_warning("This run has been SEEDED with seed %d",
                             self.seed)
        # Load checkpoint info, if resuming
        if self.output.is_resuming() and not isinstance(self, Minimizer):
            try:
                checkpoint_info = yaml_load_file(self.checkpoint_filename())
                try:
                    for k, v in checkpoint_info[kinds.sampler][
                            self.get_name()].items():
                        setattr(self, k, v)
                    self.mpi_info("Resuming from previous sample!")
                except KeyError:
                    if is_main_process():
                        raise LoggedError(
                            self.log, "Checkpoint file found at '%s' "
                            "but it corresponds to a different sampler.",
                            self.checkpoint_filename())
            except (IOError, TypeError):
                pass
        else:
            try:
                os.remove(self.checkpoint_filename())
                os.remove(self.progress_filename())
            except (OSError, TypeError):
                pass
        self._set_rng()
        self.initialize()
        self._release_rng()
        self.model.set_cache_size(self._get_requested_cache_size())
        # Add to the updated info some values which are
        # only available after initialisation
        self._updated_info[_version] = self.get_version()
Example #18
0
from cobaya.yaml import yaml_load_file

info_from_yaml = yaml_load_file("lcdm_lmax800.yaml")

from cobaya.run import run

updated_info, sampler = run(info_from_yaml)
Example #19
0
from cobaya.yaml import yaml_load_file

info_from_yaml = yaml_load_file("omegak_lite.yaml")

from cobaya.run import run

updated_info, sampler = run(info_from_yaml)
from cobaya.yaml import yaml_load_file

info_from_yaml = yaml_load_file("planck_lcdm.yaml")

from cobaya.run import run

sampler = run(info_from_yaml)

from cobaya.yaml import yaml_load_file

info_from_yaml = yaml_load_file("omegak_lowEE+lowTT+highTTTEEE.yaml")

from cobaya.run import run

updated_info, sampler = run(info_from_yaml)
Example #22
0
    def get_defaults(cls,
                     return_yaml=False,
                     yaml_expand_defaults=True,
                     input_options=empty_dict):
        """
        Return defaults for this component_or_class, with syntax:

        .. code::

           option: value
           [...]

           params:
             [...]  # if required

           prior:
             [...]  # if required

        If keyword `return_yaml` is set to True, it returns literally that,
        whereas if False (default), it returns the corresponding Python dict.

        Note that in external components installed as zip_safe=True packages files cannot
        be accessed directly.
        In this case using !default .yaml includes currently does not work.

        Also note that if you return a dictionary it may be modified (return a deep copy
        if you want to keep it).

        if yaml_expand_defaults then !default: file includes will be expanded

        input_options may be a dictionary of input options, e.g. in case default params
        are dynamically dependent on an input variable
        """
        if 'class_options' in cls.__dict__:
            raise LoggedError(
                log, "class_options (in %s) should now be replaced by "
                "public attributes defined directly in the class" %
                cls.get_qualified_class_name())
        yaml_text = cls.get_associated_file_content('.yaml')
        options = cls.get_class_options(input_options=input_options)
        if options and yaml_text:
            raise LoggedError(
                log, "%s: any class can either have .yaml or class variables "
                "but not both (type declarations without values are fine "
                "also with yaml file). You have class attributes: %s",
                cls.get_qualified_class_name(), list(options))
        if return_yaml and not yaml_expand_defaults:
            return yaml_text or ""
        this_defaults = yaml_load_file(cls.get_yaml_file(), yaml_text) \
            if yaml_text else deepcopy_where_possible(options)
        # start with this one to keep the order such that most recent class options
        # near the top. Update below to actually override parameters with these.
        defaults = this_defaults.copy()
        if not return_yaml:
            for base in cls.__bases__:
                if issubclass(base, HasDefaults) and base is not HasDefaults:
                    defaults.update(
                        base.get_defaults(input_options=input_options))
        defaults.update(this_defaults)
        if return_yaml:
            return yaml_dump(defaults)
        else:
            return defaults
Example #23
0
from cobaya.yaml import yaml_load_file

info_from_yaml = yaml_load_file("lite_test.yaml")

from cobaya.run import run

updated_info, sampler = run(info_from_yaml)
from cobaya.yaml import yaml_load_file

info_from_yaml = yaml_load_file("lcdm_highTT+lowEE.yaml")

from cobaya.run import run

updated_info, sampler = run(info_from_yaml)
Example #25
0
def makeGrid(batchPath,
             settingName=None,
             settings=None,
             read_only=False,
             interactive=False,
             install_reqs_at=None,
             install_reqs_force=None):
    print("Generating grid...")
    batchPath = os.path.abspath(batchPath) + os.sep
    if not settings:
        if not settingName:
            raise NotImplementedError(
                "Re-using previous batch is work in progress...")
        #            if not pathIsGrid(batchPath):
        #                raise Exception('Need to give name of setting file if batchPath/config '
        #                                'does not exist')
        #            read_only = True
        #            sys.path.insert(0, batchPath + 'config')
        #            settings = __import__(IniFile(batchPath + 'config/config.ini').params['setting_file'].replace('.py', ''))
        elif os.path.splitext(settingName)[-1].lower() in _yaml_extensions:
            settings = yaml_load_file(settingName)
        else:
            raise NotImplementedError(
                "Using a python script is work in progress...")
            # In this case, info-as-dict would be passed
            # settings = __import__(settingName, fromlist=['dummy'])
    batch = batchjob.BatchJob(batchPath)
    # batch.skip = settings.get("skip", False)
    batch.makeItems(settings, messages=not read_only)
    if read_only:
        for jobItem in [b for b in batch.jobItems]:
            if not jobItem.chainExists():
                batch.jobItems.remove(jobItem)
        batch.save()
        print('OK, configured grid with %u existing chains' %
              (len(batch.jobItems)))
        return batch
    else:
        batch.makeDirectories(setting_file=None)
        batch.save()
    infos = {}
    components_used = {}
    # Default info
    defaults = copy.deepcopy(settings)
    grid_definition = defaults.pop("grid")
    models_definitions = grid_definition["models"]
    datasets_definitions = grid_definition["datasets"]
    for jobItem in batch.items(wantSubItems=False):
        # Model info
        jobItem.makeChainPath()
        try:
            model_info = copy.deepcopy(models_definitions[jobItem.param_set]
                                       or {})
        except KeyError:
            raise ValueError("Model '%s' must be defined." % jobItem.param_set)
        model_info = merge_info(defaults, model_info)
        # Dataset info
        try:
            dataset_info = copy.deepcopy(
                datasets_definitions[jobItem.data_set.tag])
        except KeyError:
            raise ValueError("Data set '%s' must be defined." %
                             jobItem.data_set.tag)
        # Combined info
        combined_info = merge_info(defaults, model_info, dataset_info)
        if "preset" in combined_info:
            preset = combined_info.pop("preset")
            combined_info = merge_info(create_input(**preset), combined_info)
        combined_info[_output_prefix] = jobItem.chainRoot
        # Requisites
        components_used = get_used_components(components_used, combined_info)
        if install_reqs_at:
            combined_info[_packages_path] = os.path.abspath(install_reqs_at)
        # Save the info (we will write it after installation:
        # we need to install to add auto covmats
        if jobItem.param_set not in infos:
            infos[jobItem.param_set] = {}
        infos[jobItem.param_set][jobItem.data_set.tag] = combined_info
    # Installing requisites
    if install_reqs_at:
        print("Installing required code and data for the grid.")
        from cobaya.log import logger_setup
        logger_setup()
        install_reqs(components_used,
                     path=install_reqs_at,
                     force=install_reqs_force)
    print("Adding covmats (if necessary) and writing input files")
    for jobItem in batch.items(wantSubItems=False):
        info = infos[jobItem.param_set][jobItem.data_set.tag]
        # Covariance matrices
        # We try to find them now, instead of at run time, to check if correctly selected
        try:
            sampler = list(info[kinds.sampler])[0]
        except KeyError:
            raise ValueError("No sampler has been chosen")
        if sampler == "mcmc" and info[kinds.sampler][sampler].get(
                "covmat", "auto"):
            packages_path = install_reqs_at or info.get(_packages_path, None)
            if not packages_path:
                raise ValueError(
                    "Cannot assign automatic covariance matrices because no "
                    "external packages path has been defined.")
            # Need updated info for covmats: includes renames
            updated_info = update_info(info)
            # Ideally, we use slow+sampled parameters to look for the covariance matrix
            # but since for that we'd need to initialise a model, we approximate that set
            # as theory+sampled
            from itertools import chain
            like_params = set(
                chain(*[
                    list(like[_params])
                    for like in updated_info[kinds.likelihood].values()
                ]))
            params_info = {
                p: v
                for p, v in updated_info[_params].items()
                if is_sampled_param(v) and p not in like_params
            }
            best_covmat = _get_best_covmat(os.path.abspath(packages_path),
                                           params_info,
                                           updated_info[kinds.likelihood])
            info[kinds.sampler][sampler]["covmat"] = os.path.join(
                best_covmat["folder"], best_covmat["name"])
        # Write the info for this job
        # Allow overwrite since often will want to regenerate grid with tweaks
        yaml_dump_file(jobItem.iniFile(),
                       sort_cosmetic(info),
                       error_if_exists=False)

        # Non-translated old code
        # if not start_at_bestfit:
        #     setMinimize(jobItem, ini)
        #     variant = '_minimize'
        #     ini.saveFile(jobItem.iniFile(variant))
        ## NOT IMPLEMENTED: start at best fit
        ##        ini.params['start_at_bestfit'] = start_at_bestfit
        # ---
        # for deffile in settings.defaults:
        #    ini.defaults.append(batch.commonPath + deffile)
        # if hasattr(settings, 'override_defaults'):
        #    ini.defaults = [batch.commonPath + deffile for deffile in settings.override_defaults] + ini.defaults
        # ---
        # # add ini files for importance sampling runs
        # for imp in jobItem.importanceJobs():
        #     if getattr(imp, 'importanceFilter', None): continue
        #     if batch.hasName(imp.name.replace('_post', '')):
        #         raise Exception('importance sampling something you already have?')
        #     for minimize in (False, True):
        #         if minimize and not getattr(imp, 'want_minimize', True): continue
        #         ini = IniFile()
        #         updateIniParams(ini, imp.importanceSettings, batch.commonPath)
        #         if cosmomcAction == 0 and not minimize:
        #             for deffile in settings.importanceDefaults:
        #                 ini.defaults.append(batch.commonPath + deffile)
        #             ini.params['redo_outroot'] = imp.chainRoot
        #             ini.params['action'] = 1
        #         else:
        #             ini.params['file_root'] = imp.chainRoot
        #         if minimize:
        #             setMinimize(jobItem, ini)
        #             variant = '_minimize'
        #         else:
        #             variant = ''
        #         ini.defaults.append(jobItem.iniFile())
        #         ini.saveFile(imp.iniFile(variant))
        #         if cosmomcAction != 0: break

    if not interactive:
        return batch
    print('Done... to run do: cobaya-grid-run %s' % batchPath)
Example #26
0
def makeGrid(batchPath,
             settingName=None,
             settings=None,
             read_only=False,
             interactive=False,
             install_reqs_at=None,
             install_reqs_force=None):
    batchPath = os.path.abspath(batchPath) + os.sep
    #    # 0: chains, 1: importance sampling, 2: best-fit, 3: best-fit and Hessian
    #    cosmomcAction = 0
    if not settings:
        if not settingName:
            raise NotImplementedError(
                "Re-using previous batch is work in progress...")
#            if not pathIsGrid(batchPath):
#                raise Exception('Need to give name of setting file if batchPath/config '
#                                'does not exist')
#            read_only = True
#            sys.path.insert(0, batchPath + 'config')
#            sys.modules['batchJob'] = batchjob  # old name
#            settings = __import__(IniFile(batchPath + 'config/config.ini').params['setting_file'].replace('.py', ''))
        elif os.path.splitext(settingName)[-1].lower() in (".yml", ".yaml"):
            settings = yaml_load_file(settingName)
        else:
            # ACTUALLY, in the scripted case a DICT or a YAML FILE NAME should be passed
            raise NotImplementedError(
                "Using a python script is work in progress...")
#            settings = __import__(settingName, fromlist=['dummy'])
    from cobaya.grid_tools import batchjob
    batch = batchjob.batchJob(batchPath, settings.get("yaml_dir", None))
    ###    batch.skip = settings.get("skip", False)
    if "skip" in settings:
        raise NotImplementedError("Skipping not implemented yet.")
    batch.makeItems(settings, messages=not read_only)
    if read_only:
        for jobItem in [b for b in batch.jobItems]:
            if not jobItem.chainExists():
                batch.jobItems.remove(jobItem)
        batch.save()
        print('OK, configured grid with %u existing chains' %
              (len(batch.jobItems)))
        return batch
    else:
        # WAS        batch.makeDirectories(settings.__file__)
        # WHY THE DIR OF settings AND NOT THE GRID DIR GIVEN???
        batch.makeDirectories(setting_file=None)
        batch.save()

# NOT IMPLEMENTED YET: start at best fit!!!
#    start_at_bestfit = getattr(settings, 'start_at_bestfit', False)

    defaults = copy.deepcopy(settings)
    modules_used = {}
    grid_definition = defaults.pop("grid")
    models_definitions = grid_definition["models"]
    datasets_definitions = grid_definition["datasets"]
    for jobItem in batch.items(wantSubItems=False):
        jobItem.makeChainPath()
        base_info = copy.deepcopy(defaults)
        try:
            model_info = models_definitions[jobItem.param_set] or {}
        except KeyError:
            raise ValueError("Model '%s' must be defined." % jobItem.param_set)

        # COVMATS NOT IMPLEMENTED YET!!!
        # cov_dir_name = getattr(settings, 'cov_dir', 'planck_covmats')
        # covdir = os.path.join(batch.basePath, cov_dir_name)
        # covmat = os.path.join(covdir, jobItem.name + '.covmat')
        # if not os.path.exists(covmat):
        #     covNameMappings = getattr(settings, 'covNameMappings', None)
        #     mapped_name_norm = jobItem.makeNormedName(covNameMappings)[0]
        #     covmat_normed = os.path.join(covdir, mapped_name_norm + '.covmat')
        #     covmat = covmat_normed
        #     if not os.path.exists(covmat) and hasattr(jobItem.data_set,
        #                                               'covmat'): covmat = batch.basePath + jobItem.data_set.covmat
        #     if not os.path.exists(covmat) and hasattr(settings, 'covmat'): covmat = batch.basePath + settings.covmat
        # else:
        #     covNameMappings = None
        # if os.path.exists(covmat):
        #     ini.params['propose_matrix'] = covmat
        #     if getattr(settings, 'newCovmats', True): ini.params['MPI_Max_R_ProposeUpdate'] = 20
        # else:
        #     hasCov = False
        #     ini.params['MPI_Max_R_ProposeUpdate'] = 20
        #     covmat_try = []
        #     if 'covRenamer' in dir(settings):
        #         covmat_try += settings.covRenamer(jobItem.name)
        #         covmat_try += settings.covRenamer(mapped_name_norm)
        #     if hasattr(settings, 'covrenames'):
        #         for aname in [jobItem.name, mapped_name_norm]:
        #             covmat_try += [aname.replace(old, new, 1) for old, new in settings.covrenames if old in aname]
        #             for new1, old1 in settings.covrenames:
        #                 if old1 in aname:
        #                     name = aname.replace(old1, new1, 1)
        #                     covmat_try += [name.replace(old, new, 1) for old, new in settings.covrenames if old in name]
        #     if 'covWithoutNameOrder' in dir(settings):
        #         if covNameMappings:
        #             removes = copy.deepcopy(covNameMappings)
        #         else:
        #             removes = dict()
        #         for name in settings.covWithoutNameOrder:
        #             if name in jobItem.data_set.names:
        #                 removes[name] = ''
        #                 covmat_try += [jobItem.makeNormedName(removes)[0]]
        #     covdir2 = os.path.join(batch.basePath, getattr(settings, 'cov_dir_fallback', cov_dir_name))
        #     for name in covmat_try:
        #         covmat = os.path.join(batch.basePath, covdir2, name + '.covmat')
        #         if os.path.exists(covmat):
        #             ini.params['propose_matrix'] = covmat
        #             print('covmat ' + jobItem.name + ' -> ' + name)
        #             hasCov = True
        #             break
        #     if not hasCov: print('WARNING: no matching specific covmat for ' + jobItem.name)

        ## NOT IMPLEMENTED: start at best fit
        ##        ini.params['start_at_bestfit'] = start_at_bestfit

        try:
            dataset_info = datasets_definitions[jobItem.data_set.tag]
        except KeyError:
            raise ValueError("Data set '%s' must be defined." %
                             jobItem.data_set.tag)
        combined_info = merge_info(base_info, model_info, dataset_info)
        combined_info[_output_prefix] = jobItem.chainRoot

        # ???
        #        for deffile in settings.defaults:
        #            ini.defaults.append(batch.commonPath + deffile)
        #        if hasattr(settings, 'override_defaults'):
        #            ini.defaults = [batch.commonPath + deffile for deffile in settings.override_defaults] + ini.defaults

        # requisites
        modules_used = get_modules(modules_used, combined_info)
        if install_reqs_at:
            combined_info[_path_install] = os.path.abspath(install_reqs_at)
        # Write the info for this job
        yaml_dump_file(combined_info, jobItem.iniFile())

        # if not start_at_bestfit:
        #     setMinimize(jobItem, ini)
        #     variant = '_minimize'
        #     ini.saveFile(jobItem.iniFile(variant))

        # # add ini files for importance sampling runs
        # for imp in jobItem.importanceJobs():
        #     if getattr(imp, 'importanceFilter', None): continue
        #     if batch.hasName(imp.name.replace('_post', '')):
        #         raise Exception('importance sampling something you already have?')
        #     for minimize in (False, True):
        #         if minimize and not getattr(imp, 'want_minimize', True): continue
        #         ini = IniFile()
        #         updateIniParams(ini, imp.importanceSettings, batch.commonPath)
        #         if cosmomcAction == 0 and not minimize:
        #             for deffile in settings.importanceDefaults:
        #                 ini.defaults.append(batch.commonPath + deffile)
        #             ini.params['redo_outroot'] = imp.chainRoot
        #             ini.params['action'] = 1
        #         else:
        #             ini.params['file_root'] = imp.chainRoot
        #         if minimize:
        #             setMinimize(jobItem, ini)
        #             variant = '_minimize'
        #         else:
        #             variant = ''
        #         ini.defaults.append(jobItem.iniFile())
        #         ini.saveFile(imp.iniFile(variant))
        #         if cosmomcAction != 0: break

    # Installing requisites
    print("Installing required code and data for the grid.")
    if install_reqs_at:
        install_reqs(modules_used,
                     path=install_reqs_at,
                     force=install_reqs_force)
    if not interactive:
        return batch
    print('Done... to run do: cobaya-grid-run %s' % batchPath)
from cobaya.yaml import yaml_load_file

info_from_yaml = yaml_load_file(
    "omegak_lowEE+lowTT+highTTTEEE+Pantheon+lens.yaml")

from cobaya.run import run

updated_info, sampler = run(info_from_yaml)
Example #28
0
from cobaya.yaml import yaml_load_file

info_from_yaml = yaml_load_file("gaussian.yaml")
Example #29
0
 def reload_updated_info(self):
     return yaml_load_file(self.file_updated)
from cobaya.yaml import yaml_load_file

info_from_yaml = yaml_load_file("log10ac_v2.yaml")

from cobaya.run import run

sampler = run(info_from_yaml)