예제 #1
0
 def refresh_display(self, info):
     QApplication.setOverrideCursor(Qt.WaitCursor)
     try:
         comments = info.pop(input_database._comment, None)
         comments_text = "\n# " + "\n# ".join(comments)
     except (
             TypeError,  # No comments
             AttributeError
     ):  # Failed to generate info (returned str instead)
         comments_text = ""
     self.display["python"].setText("info = " + pformat(info) +
                                    comments_text)
     self.display["yaml"].setText(
         yaml_dump(sort_cosmetic(info)) + comments_text)
     self.display["bibliography"].setText(
         prettyprint_bib(*get_bib_info(info)))
     # Display covmat
     packages_path = resolve_packages_path()
     if not packages_path:
         self.covmat_text.setText(
             "\nIn order to find a covariance matrix, you need to define an external "
             "packages installation path, e.g. via the env variable %r.\n" %
             _packages_path_env)
     elif any(not os.path.isdir(d.format(**{_packages_path: packages_path}))
              for d in covmat_folders):
         self.covmat_text.setText(
             "\nThe external cosmological packages appear not to be installed where "
             "expected: %r\n" % packages_path)
     else:
         covmat_data = get_best_covmat(info, packages_path=packages_path)
         self.current_params_in_covmat = covmat_data["params"]
         self.current_covmat = covmat_data["covmat"]
         _, corrmat = cov_to_std_and_corr(self.current_covmat)
         self.covmat_text.setText(
             "\nCovariance file: %r\n\n"
             "NB: you do *not* need to save or copy this covariance matrix: "
             "it will be selected automatically.\n" % covmat_data["name"])
         self.covmat_table.setRowCount(len(self.current_params_in_covmat))
         self.covmat_table.setColumnCount(len(
             self.current_params_in_covmat))
         self.covmat_table.setHorizontalHeaderLabels(
             list(self.current_params_in_covmat))
         self.covmat_table.setVerticalHeaderLabels(
             list(self.current_params_in_covmat))
         for i, pi in enumerate(self.current_params_in_covmat):
             for j, pj in enumerate(self.current_params_in_covmat):
                 self.covmat_table.setItem(
                     i, j,
                     QTableWidgetItem("%g" % self.current_covmat[i, j]))
                 if i != j:
                     color = [
                         256 * c
                         for c in cmap_corr(corrmat[i, j] / 2 + 0.5)[:3]
                     ]
                 else:
                     color = [255.99] * 3
                 self.covmat_table.item(i, j).setBackground(QColor(*color))
                 self.covmat_table.item(i, j).setForeground(QColor(0, 0, 0))
     QApplication.restoreOverrideCursor()
예제 #2
0
def test_cosmo_docs_basic():
    flag = True
    for theo in ["camb", "classy"]:
        info_new = create_input(preset=preset_pre + theo)
        info_yaml_new = yaml_dump(sort_cosmetic(info_new))
        file_path = os.path.join(path, file_pre + theo + ".yaml")
        with open(file_path) as docs_file:
            info_yaml_docs = "".join(docs_file.readlines())
        info_docs = yaml_load(info_yaml_docs)
        if not is_equal_info(
                info_new, info_docs, strict=True, print_not_log=True):
            with open(file_path, "w") as docs_file:
                docs_file.write(info_yaml_new)
            flag = False
            print("OLD:\n%s" % info_yaml_docs)
            print("----------------------------------------")
            print("NEW:\n%s" % info_yaml_new)
            sys.stdout.flush()
    assert flag, ("Differences in example input file. "
                  "Files have been re-generated; check out your git diff.")
예제 #3
0
파일: run.py 프로젝트: Pablo-Lemos/cobaya
def run(
    info_or_yaml_or_file: Union[InputDict, str, os.PathLike],
    packages_path: Optional[str] = None,
    output: Union[str, LiteralFalse, None] = None,
    debug: Union[bool, int, None] = None,
    stop_at_error: Optional[bool] = None,
    resume: bool = False,
    force: bool = False,
    no_mpi: bool = False,
    test: bool = False,
    override: Optional[InputDict] = None,
) -> Union[InfoSamplerTuple, PostTuple]:
    """
    Run from an input dictionary, file name or yaml string, with optional arguments
    to override settings in the input as needed.

    :param info_or_yaml_or_file: input options dictionary, yaml file, or yaml text
    :param packages_path: path where external packages were installed
    :param output: path name prefix for output files, or False for no file output
    :param debug: true for verbose debug output, or a specific logging level
    :param stop_at_error: stop if an error is raised
    :param resume: continue an existing run
    :param force: overwrite existing output if it exists
    :param no_mpi: run without MPI
    :param test: only test initialization rather than actually running
    :param override: option dictionary to merge into the input one, overriding settings
       (but with lower precedence than the explicit keyword arguments)
    :return: (updated_info, sampler) tuple of options dictionary and Sampler instance,
              or (updated_info, results) if using "post" post-processing
    """

    # This function reproduces the model-->output-->sampler pipeline one would follow
    # when instantiating by hand, but alters the order to performs checks and dump info
    # as early as possible, e.g. to check if resuming possible or `force` needed.
    if no_mpi or test:
        mpi.set_mpi_disabled()

    with mpi.ProcessState("run"):
        info: InputDict = load_info_overrides(info_or_yaml_or_file, debug,
                                              stop_at_error, packages_path,
                                              override)

        if test:
            info["test"] = True
        # If any of resume|force given as cmd args, ignore those in the input file
        if resume or force:
            if resume and force:
                raise ValueError("'rename' and 'force' are exclusive options")
            info["resume"] = bool(resume)
            info["force"] = bool(force)
        if info.get("post"):
            if isinstance(output, str) or output is False:
                info["post"]["output"] = output or None
            return post(info)

        if isinstance(output, str) or output is False:
            info["output"] = output or None
        logger_setup(info.get("debug"), info.get("debug_file"))
        logger_run = get_logger(run.__name__)
        # MARKED FOR DEPRECATION IN v3.0
        # BEHAVIOUR TO BE REPLACED BY ERROR:
        check_deprecated_modules_path(info)
        # END OF DEPRECATION BLOCK
        # 1. Prepare output driver, if requested by defining an output_prefix
        # GetDist needs to know the original sampler, so don't overwrite if minimizer
        try:
            which_sampler = list(info["sampler"])[0]
        except (KeyError, TypeError):
            raise LoggedError(
                logger_run,
                "You need to specify a sampler using the 'sampler' key "
                "as e.g. `sampler: {mcmc: None}.`")
        infix = "minimize" if which_sampler == "minimize" else None
        with get_output(prefix=info.get("output"),
                        resume=info.get("resume"),
                        force=info.get("force"),
                        infix=infix) as out:
            # 2. Update the input info with the defaults for each component
            updated_info = update_info(info)
            if is_debug(logger_run):
                # Dump only if not doing output
                # (otherwise, the user can check the .updated file)
                if not out and mpi.is_main_process():
                    logger_run.info(
                        "Input info updated with defaults (dumped to YAML):\n%s",
                        yaml_dump(sort_cosmetic(updated_info)))
            # 3. If output requested, check compatibility if existing one, and dump.
            # 3.1 First: model only
            out.check_and_dump_info(info,
                                    updated_info,
                                    cache_old=True,
                                    ignore_blocks=["sampler"])
            # 3.2 Then sampler -- 1st get the last sampler mentioned in the updated.yaml
            # TODO: ideally, using Minimizer would *append* to the sampler block.
            #       Some code already in place, but not possible at the moment.
            try:
                last_sampler = list(updated_info["sampler"])[-1]
                last_sampler_info = {
                    last_sampler: updated_info["sampler"][last_sampler]
                }
            except (KeyError, TypeError):
                raise LoggedError(logger_run, "No sampler requested.")
            sampler_name, sampler_class = get_sampler_name_and_class(
                last_sampler_info)
            check_sampler_info((out.reload_updated_info(use_cache=True)
                                or {}).get("sampler"),
                               updated_info["sampler"],
                               is_resuming=out.is_resuming())
            # Dump again, now including sampler info
            out.check_and_dump_info(info, updated_info, check_compatible=False)
            # Check if resumable run
            sampler_class.check_force_resume(
                out, info=updated_info["sampler"][sampler_name])
            # 4. Initialize the posterior and the sampler
            with Model(updated_info["params"],
                       updated_info["likelihood"],
                       updated_info.get("prior"),
                       updated_info.get("theory"),
                       packages_path=info.get("packages_path"),
                       timing=updated_info.get("timing"),
                       allow_renames=False,
                       stop_at_error=info.get("stop_at_error",
                                              False)) as model:
                # Re-dump the updated info, now containing parameter routes and version
                updated_info = recursive_update(updated_info, model.info())
                out.check_and_dump_info(None,
                                        updated_info,
                                        check_compatible=False)
                sampler = sampler_class(
                    updated_info["sampler"][sampler_name],
                    model,
                    out,
                    name=sampler_name,
                    packages_path=info.get("packages_path"))
                # Re-dump updated info, now also containing updates from the sampler
                updated_info["sampler"][sampler_name] = \
                    recursive_update(updated_info["sampler"][sampler_name],
                                     sampler.info())
                out.check_and_dump_info(None,
                                        updated_info,
                                        check_compatible=False)
                mpi.sync_processes()
                if info.get("test", False):
                    logger_run.info(
                        "Test initialization successful! "
                        "You can probably run now without `--%s`.", "test")
                    return InfoSamplerTuple(updated_info, sampler)
                # Run the sampler
                sampler.run()

    return InfoSamplerTuple(updated_info, sampler)
예제 #4
0
def run(info):
    # This function reproduces the model-->output-->sampler pipeline one would follow
    # when instantiating by hand, but alters the order to performs checks and dump info
    # as early as possible, e.g. to check if resuming possible or `force` needed.
    assert isinstance(info, Mapping), (
        "The first argument must be a dictionary with the info needed for the run. "
        "If you were trying to pass the name of an input file instead, "
        "load it first with 'cobaya.input.load_input', "
        "or, if you were passing a yaml string, load it with 'cobaya.yaml.yaml_load'."
    )
    logger_setup(info.get(_debug), info.get(_debug_file))
    logger_run = logging.getLogger(__name__.split(".")[-1])
    # MARKED FOR DEPRECATION IN v3.0
    # BEHAVIOUR TO BE REPLACED BY ERROR:
    check_deprecated_modules_path(info)
    # END OF DEPRECATION BLOCK
    # 1. Prepare output driver, if requested by defining an output_prefix
    output = get_output(output_prefix=info.get(_output_prefix),
                        resume=info.get(_resume),
                        force=info.get(_force))
    # 2. Update the input info with the defaults for each component
    updated_info = update_info(info)
    if logging.root.getEffectiveLevel() <= logging.DEBUG:
        # Dump only if not doing output (otherwise, the user can check the .updated file)
        if not output and is_main_process():
            logger_run.info(
                "Input info updated with defaults (dumped to YAML):\n%s",
                yaml_dump(sort_cosmetic(updated_info)))
    # 3. If output requested, check compatibility if existing one, and dump.
    # 3.1 First: model only
    output.check_and_dump_info(info,
                               updated_info,
                               cache_old=True,
                               ignore_blocks=[kinds.sampler])
    # 3.2 Then sampler -- 1st get the last sampler mentioned in the updated.yaml
    # TODO: ideally, using Minimizer would *append* to the sampler block.
    #       Some code already in place, but not possible at the moment.
    try:
        last_sampler = list(updated_info[kinds.sampler])[-1]
        last_sampler_info = {
            last_sampler: updated_info[kinds.sampler][last_sampler]
        }
    except (KeyError, TypeError):
        raise LoggedError(logger_run, "No sampler requested.")
    sampler_name, sampler_class = get_sampler_name_and_class(last_sampler_info)
    check_sampler_info((output.reload_updated_info(use_cache=True)
                        or {}).get(kinds.sampler),
                       updated_info[kinds.sampler],
                       is_resuming=output.is_resuming())
    # Dump again, now including sampler info
    output.check_and_dump_info(info, updated_info, check_compatible=False)
    # Check if resumable run
    sampler_class.check_force_resume(
        output, info=updated_info[kinds.sampler][sampler_name])
    # 4. Initialize the posterior and the sampler
    with Model(updated_info[_params], updated_info[kinds.likelihood],
               updated_info.get(_prior), updated_info.get(kinds.theory),
               packages_path=info.get(_packages_path), timing=updated_info.get(_timing),
               allow_renames=False, stop_at_error=info.get("stop_at_error", False)) \
            as model:
        # Re-dump the updated info, now containing parameter routes and version info
        updated_info = recursive_update(updated_info, model.info())
        output.check_and_dump_info(None, updated_info, check_compatible=False)
        sampler = sampler_class(updated_info[kinds.sampler][sampler_name],
                                model,
                                output,
                                packages_path=info.get(_packages_path))
        # Re-dump updated info, now also containing updates from the sampler
        updated_info[kinds.sampler][sampler.get_name()] = \
            recursive_update(
                updated_info[kinds.sampler][sampler.get_name()], sampler.info())
        # TODO -- maybe also re-dump model info, now possibly with measured speeds
        # (waiting until the camb.transfers issue is solved)
        output.check_and_dump_info(None, updated_info, check_compatible=False)
        if info.get(_test_run, False):
            logger_run.info(
                "Test initialization successful! "
                "You can probably run now without `--%s`.", _test_run)
            return updated_info, sampler
        # Run the sampler
        sampler.run()
    return updated_info, sampler
예제 #5
0
    def check_and_dump_info(self, input_info, updated_info, check_compatible=True,
                            cache_old=False, use_cache_old=False, ignore_blocks=()):
        """
        Saves the info in the chain folder twice:
           - the input info.
           - idem, populated with the components' defaults.

        If resuming a sample, checks first that old and new infos and versions are
        consistent.
        """
        # trim known params of each likelihood: for internal use only
        self.check_lock()
        updated_info_trimmed = deepcopy_where_possible(updated_info)
        updated_info_trimmed["version"] = get_version()
        for like_info in updated_info_trimmed.get("likelihood", {}).values():
            (like_info or {}).pop("params", None)
        if check_compatible:
            # We will test the old info against the dumped+loaded new info.
            # This is because we can't actually check if python objects do change
            try:
                old_info = self.reload_updated_info(cache=cache_old,
                                                    use_cache=use_cache_old)
            except InputImportError:
                # for example, when there's a dynamically generated class that cannot
                # be found by the yaml loader (could use yaml loader that ignores them)
                old_info = None
            if old_info:
                # use consistent yaml read-in types
                # TODO: could probably just compare full infos here, with externals?
                #  for the moment cautiously keeping old behaviour
                old_info = yaml_load(yaml_dump(old_info))  # type: ignore
                new_info = yaml_load(yaml_dump(updated_info_trimmed))
                if not is_equal_info(old_info, new_info, strict=False,
                                     ignore_blocks=list(ignore_blocks) + [
                                         "output"]):
                    raise LoggedError(
                        self.log, "Old and new run information not compatible! "
                                  "Resuming not possible!")
                # Deal with version comparison separately:
                # - If not specified now, take the one used in resume info
                # - If specified both now and before, check new older than old one
                # (For Cobaya's own version, prefer new one always)
                old_version = old_info.get("version")
                new_version = new_info.get("version")
                if isinstance(old_version, str) and isinstance(new_version, str):
                    if version.parse(old_version) > version.parse(new_version):
                        raise LoggedError(
                            self.log, "You are trying to resume a run performed with a "
                                      "newer version of Cobaya: %r (you are using %r). "
                                      "Please, update your Cobaya installation.",
                            old_version, new_version)
                for k in set(kinds).intersection(updated_info):
                    if k in ignore_blocks or updated_info[k] is None:
                        continue
                    for c in updated_info[k]:
                        new_version = updated_info[k][c].get("version")
                        old_version = old_info[k][c].get("version")  # type: ignore
                        if new_version is None:
                            updated_info[k][c]["version"] = old_version
                            updated_info_trimmed[k][c]["version"] = old_version
                        elif old_version is not None:
                            cls = get_resolved_class(
                                c, k, None_if_not_found=True,
                                class_name=updated_info[k][c].get("class"))
                            if cls and cls.compare_versions(
                                    old_version, new_version, equal=False):
                                raise LoggedError(
                                    self.log, "You have requested version %r for "
                                              "%s:%s, but you are trying to resume a "
                                              "run that used a newer version: %r.",
                                    new_version, k, c, old_version)
        # If resuming, we don't want to to *partial* dumps
        if ignore_blocks and self.is_resuming():
            return
        # Work on a copy of the input info, since we are updating the prefix
        # (the updated one is already a copy)
        if input_info is not None:
            input_info = deepcopy_where_possible(input_info)
        # Write the new one
        for f, info in [(self.file_input, input_info),
                        (self.file_updated, updated_info_trimmed)]:
            if info:
                for k in ignore_blocks:
                    info.pop(k, None)
                info.pop("debug", None)
                info.pop("force", None)
                info.pop("resume", None)
                # make sure the dumped output_prefix does only contain the file prefix,
                # not the folder, since it's already been placed inside it
                info["output"] = self.updated_prefix()
                with open(f, "w", encoding="utf-8") as f_out:
                    try:
                        f_out.write(yaml_dump(sort_cosmetic(info)))
                    except OutputError as e:
                        raise LoggedError(self.log, str(e))
        if updated_info_trimmed and has_non_yaml_reproducible(updated_info_trimmed):
            try:
                import dill
            except ImportError:
                self.mpi_info('Install "dill" to save reproducible options file.')
            else:
                import pickle
                try:
                    with open(self.dump_file_updated, 'wb') as f:
                        dill.dump(sort_cosmetic(updated_info_trimmed), f,
                                  pickle.HIGHEST_PROTOCOL)
                except pickle.PicklingError as e:
                    os.remove(self.dump_file_updated)
                    self.mpi_info('Options file cannot be pickled %s', e)
예제 #6
0
def makeGrid(batchPath,
             settingName=None,
             settings=None,
             read_only=False,
             interactive=False,
             install_reqs_at=None,
             install_reqs_force=None):
    print("Generating grid...")
    batchPath = os.path.abspath(batchPath) + os.sep
    if not settings:
        if not settingName:
            raise NotImplementedError(
                "Re-using previous batch is work in progress...")
        #            if not pathIsGrid(batchPath):
        #                raise Exception('Need to give name of setting file if batchPath/config '
        #                                'does not exist')
        #            read_only = True
        #            sys.path.insert(0, batchPath + 'config')
        #            settings = __import__(IniFile(batchPath + 'config/config.ini').params['setting_file'].replace('.py', ''))
        elif os.path.splitext(settingName)[-1].lower() in _yaml_extensions:
            settings = yaml_load_file(settingName)
        else:
            raise NotImplementedError(
                "Using a python script is work in progress...")
            # In this case, info-as-dict would be passed
            # settings = __import__(settingName, fromlist=['dummy'])
    batch = batchjob.BatchJob(batchPath)
    # batch.skip = settings.get("skip", False)
    batch.makeItems(settings, messages=not read_only)
    if read_only:
        for jobItem in [b for b in batch.jobItems]:
            if not jobItem.chainExists():
                batch.jobItems.remove(jobItem)
        batch.save()
        print('OK, configured grid with %u existing chains' %
              (len(batch.jobItems)))
        return batch
    else:
        batch.makeDirectories(setting_file=None)
        batch.save()
    infos = {}
    components_used = {}
    # Default info
    defaults = copy.deepcopy(settings)
    grid_definition = defaults.pop("grid")
    models_definitions = grid_definition["models"]
    datasets_definitions = grid_definition["datasets"]
    for jobItem in batch.items(wantSubItems=False):
        # Model info
        jobItem.makeChainPath()
        try:
            model_info = copy.deepcopy(models_definitions[jobItem.param_set]
                                       or {})
        except KeyError:
            raise ValueError("Model '%s' must be defined." % jobItem.param_set)
        model_info = merge_info(defaults, model_info)
        # Dataset info
        try:
            dataset_info = copy.deepcopy(
                datasets_definitions[jobItem.data_set.tag])
        except KeyError:
            raise ValueError("Data set '%s' must be defined." %
                             jobItem.data_set.tag)
        # Combined info
        combined_info = merge_info(defaults, model_info, dataset_info)
        if "preset" in combined_info:
            preset = combined_info.pop("preset")
            combined_info = merge_info(create_input(**preset), combined_info)
        combined_info[_output_prefix] = jobItem.chainRoot
        # Requisites
        components_used = get_used_components(components_used, combined_info)
        if install_reqs_at:
            combined_info[_packages_path] = os.path.abspath(install_reqs_at)
        # Save the info (we will write it after installation:
        # we need to install to add auto covmats
        if jobItem.param_set not in infos:
            infos[jobItem.param_set] = {}
        infos[jobItem.param_set][jobItem.data_set.tag] = combined_info
    # Installing requisites
    if install_reqs_at:
        print("Installing required code and data for the grid.")
        from cobaya.log import logger_setup
        logger_setup()
        install_reqs(components_used,
                     path=install_reqs_at,
                     force=install_reqs_force)
    print("Adding covmats (if necessary) and writing input files")
    for jobItem in batch.items(wantSubItems=False):
        info = infos[jobItem.param_set][jobItem.data_set.tag]
        # Covariance matrices
        # We try to find them now, instead of at run time, to check if correctly selected
        try:
            sampler = list(info[kinds.sampler])[0]
        except KeyError:
            raise ValueError("No sampler has been chosen")
        if sampler == "mcmc" and info[kinds.sampler][sampler].get(
                "covmat", "auto"):
            packages_path = install_reqs_at or info.get(_packages_path, None)
            if not packages_path:
                raise ValueError(
                    "Cannot assign automatic covariance matrices because no "
                    "external packages path has been defined.")
            # Need updated info for covmats: includes renames
            updated_info = update_info(info)
            # Ideally, we use slow+sampled parameters to look for the covariance matrix
            # but since for that we'd need to initialise a model, we approximate that set
            # as theory+sampled
            from itertools import chain
            like_params = set(
                chain(*[
                    list(like[_params])
                    for like in updated_info[kinds.likelihood].values()
                ]))
            params_info = {
                p: v
                for p, v in updated_info[_params].items()
                if is_sampled_param(v) and p not in like_params
            }
            best_covmat = _get_best_covmat(os.path.abspath(packages_path),
                                           params_info,
                                           updated_info[kinds.likelihood])
            info[kinds.sampler][sampler]["covmat"] = os.path.join(
                best_covmat["folder"], best_covmat["name"])
        # Write the info for this job
        # Allow overwrite since often will want to regenerate grid with tweaks
        yaml_dump_file(jobItem.iniFile(),
                       sort_cosmetic(info),
                       error_if_exists=False)

        # Non-translated old code
        # if not start_at_bestfit:
        #     setMinimize(jobItem, ini)
        #     variant = '_minimize'
        #     ini.saveFile(jobItem.iniFile(variant))
        ## NOT IMPLEMENTED: start at best fit
        ##        ini.params['start_at_bestfit'] = start_at_bestfit
        # ---
        # for deffile in settings.defaults:
        #    ini.defaults.append(batch.commonPath + deffile)
        # if hasattr(settings, 'override_defaults'):
        #    ini.defaults = [batch.commonPath + deffile for deffile in settings.override_defaults] + ini.defaults
        # ---
        # # add ini files for importance sampling runs
        # for imp in jobItem.importanceJobs():
        #     if getattr(imp, 'importanceFilter', None): continue
        #     if batch.hasName(imp.name.replace('_post', '')):
        #         raise Exception('importance sampling something you already have?')
        #     for minimize in (False, True):
        #         if minimize and not getattr(imp, 'want_minimize', True): continue
        #         ini = IniFile()
        #         updateIniParams(ini, imp.importanceSettings, batch.commonPath)
        #         if cosmomcAction == 0 and not minimize:
        #             for deffile in settings.importanceDefaults:
        #                 ini.defaults.append(batch.commonPath + deffile)
        #             ini.params['redo_outroot'] = imp.chainRoot
        #             ini.params['action'] = 1
        #         else:
        #             ini.params['file_root'] = imp.chainRoot
        #         if minimize:
        #             setMinimize(jobItem, ini)
        #             variant = '_minimize'
        #         else:
        #             variant = ''
        #         ini.defaults.append(jobItem.iniFile())
        #         ini.saveFile(imp.iniFile(variant))
        #         if cosmomcAction != 0: break

    if not interactive:
        return batch
    print('Done... to run do: cobaya-grid-run %s' % batchPath)
예제 #7
0
파일: output.py 프로젝트: yufdu/cobaya
    def check_and_dump_info(self,
                            input_info,
                            updated_info,
                            check_compatible=True,
                            cache_old=False,
                            use_cache_old=False,
                            ignore_blocks=()):
        """
        Saves the info in the chain folder twice:
           - the input info.
           - idem, populated with the components' defaults.

        If resuming a sample, checks first that old and new infos and versions are
        consistent.
        """
        # trim known params of each likelihood: for internal use only
        updated_info_trimmed = deepcopy_where_possible(updated_info)
        updated_info_trimmed[_version] = __version__
        for like_info in updated_info_trimmed.get(kinds.likelihood,
                                                  {}).values():
            (like_info or {}).pop(_params, None)
        if check_compatible:
            # We will test the old info against the dumped+loaded new info.
            # This is because we can't actually check if python objects do change
            old_info = self.reload_updated_info(cache=cache_old,
                                                use_cache=use_cache_old)
            if old_info:
                new_info = yaml_load(yaml_dump(updated_info_trimmed))
                if not is_equal_info(
                        old_info,
                        new_info,
                        strict=False,
                        ignore_blocks=list(ignore_blocks) + [_output_prefix]):
                    raise LoggedError(
                        self.log,
                        "Old and new run information not compatible! "
                        "Resuming not possible!")
                # Deal with version comparison separately:
                # - If not specified now, take the one used in resume info
                # - If specified both now and before, check new older than old one
                # (For Cobaya's own version, prefer new one always)
                old_version = old_info.get(_version, None)
                new_version = new_info.get(_version, None)
                if old_version:
                    if version.parse(old_version) > version.parse(new_version):
                        raise LoggedError(
                            self.log,
                            "You are trying to resume a run performed with a "
                            "newer version of Cobaya: %r (you are using %r). "
                            "Please, update your Cobaya installation.",
                            old_version, new_version)
                for k in (kind for kind in kinds if kind in updated_info):
                    if k in ignore_blocks:
                        continue
                    for c in updated_info[k]:
                        new_version = updated_info[k][c].get(_version)
                        old_version = old_info[k][c].get(_version)
                        if new_version is None:
                            updated_info[k][c][_version] = old_version
                            updated_info_trimmed[k][c][_version] = old_version
                        elif old_version is not None:
                            cls = get_class(c, k, None_if_not_found=True)
                            if cls and cls.compare_versions(
                                    old_version, new_version, equal=False):
                                raise LoggedError(
                                    self.log,
                                    "You have requested version %r for "
                                    "%s:%s, but you are trying to resume a "
                                    "run that used a newer version: %r.",
                                    new_version, k, c, old_version)
        # If resuming, we don't want to to *partial* dumps
        if ignore_blocks and self.is_resuming():
            return
        # Work on a copy of the input info, since we are updating the prefix
        # (the updated one is already a copy)
        if input_info is not None:
            input_info = deepcopy_where_possible(input_info)
        # Write the new one
        for f, info in [(self.file_input, input_info),
                        (self.file_updated, updated_info_trimmed)]:
            if info:
                for k in ignore_blocks:
                    info.pop(k, None)
                info.pop(_debug, None)
                info.pop(_force, None)
                info.pop(_resume, None)
                # make sure the dumped output_prefix does only contain the file prefix,
                # not the folder, since it's already been placed inside it
                info[_output_prefix] = self.updated_prefix()
                with open(f, "w", encoding="utf-8") as f_out:
                    try:
                        f_out.write(yaml_dump(sort_cosmetic(info)))
                    except OutputError as e:
                        raise LoggedError(self.log, str(e))