コード例 #1
0
    def dump_info(self, input_info, full_info):
        """
        Saves the info in the chain folder twice:
           - the input info.
           - idem, populated with the modules' defaults.

        If resuming a sample, checks first that old and new infos are consistent.
        """
        # trim known params of each likelihood: for internal use only
        full_info_trimmed = deepcopy(full_info)
        for lik_info in full_info_trimmed.get(_likelihood, {}).values():
            if hasattr(lik_info, "pop"):
                lik_info.pop(_params, None)
        try:
            # We will test the old info agains the dumped+loaded new info.
            # This is because we can't actually check if python objects are the same as before.
            old_info = yaml_load_file(self.file_full)
            new_info = yaml_load(yaml_dump(full_info_trimmed))
            if not is_equal_info(old_info, new_info, strict=False):
                self.log.error(
                    "Old and new sample information not compatible! "
                    "Resuming not possible!")
                raise HandledException
        except IOError:
            # There was no previous chain
            pass
        # We write the new one anyway (maybe updated debug, resuming...)
        for f, info in [(self.file_input, input_info),
                        (self.file_full, full_info_trimmed)]:
            with open(f, "w") as f_out:
                try:
                    f_out.write(yaml_dump(info))
                except OutputError as e:
                    self.log.error(e.message)
                    raise HandledException
コード例 #2
0
def initiate_model(info_text):
    info = yaml_load(info_txt)
    info['packages_path'] = '/home/moon/mniemeyer/cobaya_modules'
    model = get_model(info)
    point = dict(
        zip(model.parameterization.sampled_params(),
            model.prior.sample(ignore_external=True)[0]))
    return model, point
コード例 #3
0
ファイル: input.py プロジェクト: CobayaSampler/cobaya
def load_input_dict(info_or_yaml_or_file: Union[InputDict, str, os.PathLike]
                    ) -> InputDict:
    if isinstance(info_or_yaml_or_file, os.PathLike):
        return load_input_file(info_or_yaml_or_file)
    elif isinstance(info_or_yaml_or_file, str):
        if "\n" in info_or_yaml_or_file:
            return yaml_load(info_or_yaml_or_file)  # type: ignore
        else:
            return load_input_file(info_or_yaml_or_file)
    elif isinstance(info_or_yaml_or_file, (dict, Mapping)):
        return deepcopy_where_possible(info_or_yaml_or_file)
    else:
        raise ValueError("The first argument must be a dictionary, file name or "
                         "yaml string with the required input options.")
コード例 #4
0
def body_of_test(modules, data, theory):
    assert modules, "I need a modules folder!"
    info = {_path_install: modules,
            _theory: {theory: None},
            _sampler: {"evaluate": None}}
    info[_likelihood] = {"des_1yr_"+data: None}
    info[_params] = {
        _theory: {
            "H0": 68.81,
            "ombh2": 0.0468 * 0.6881 ** 2,
            "omch2": (0.295 - 0.0468) * 0.6881 ** 2 - 0.0006155,
            "YHe": 0.245341,
            "tau": 0.08,
            "As": 2.260574e-09,
            "ns": 0.9676
        }}
    if data in ["shear", "galaxy_galaxylensing"]:
        info[_params].update(yaml_load(test_params_shear)[_params])
    if data in ["clustering", "galaxy_galaxylensing"]:
        info[_params].update(yaml_load(test_params_clustering)[_params])


    # UPDATE WITH BOTH ANYWAY FOR NOW!!!!!
    info[_params].update(yaml_load(test_params_shear)[_params])
    info[_params].update(yaml_load(test_params_clustering)[_params])
    
    
    reference_value = 650.872548
    abs_tolerance = 0.1
    if theory == "classy":
        info[_params][_theory].update(baseline_cosmology_classy_extra)
        abs_tolerance += 2
        print("WE SHOULD NOT HAVE TO LOWER THE TOLERANCE THAT MUCH!!!")
    updated_info, products = run(info)
    # print products["sample"]
    computed_value = products["sample"]["chi2__"+list(info[_likelihood].keys())[0]].values[0]
    assert (computed_value-reference_value) < abs_tolerance
コード例 #5
0
def get_demo_lensing_model(theory):
    if theory == "camb":
        info_yaml = r"""
        likelihood:
            soliket.LensingLiteLikelihood:
                stop_at_error: True

        theory:
            camb:
                extra_args:
                    lens_potential_accuracy: 1

        params:
            ns:
                prior:
                  min: 0.8
                  max: 1.2
            H0:
                prior:
                  min: 40
                  max: 100        
        """
    elif theory == "classy":
        info_yaml = r"""
        likelihood:
            soliket.LensingLiteLikelihood:
                stop_at_error: True

        theory:
            classy:
                extra_args:
                    output: lCl, tCl
                path: global

        params:
            n_s:
                prior:
                  min: 0.8
                  max: 1.2
            H0:
                prior:
                  min: 40
                  max: 100        

        """

    info = yaml_load(info_yaml)
    model = get_model(info)
    return model
コード例 #6
0
ファイル: output.py プロジェクト: rancesol/cobaya
    def dump_info(self, input_info, updated_info, check_compatible=True):
        """
        Saves the info in the chain folder twice:
           - the input info.
           - idem, populated with the modules' defaults.

        If resuming a sample, checks first that old and new infos are consistent.
        """
        # trim known params of each likelihood: for internal use only
        updated_info_trimmed = deepcopy_where_possible(updated_info)
        for lik_info in updated_info_trimmed.get(_likelihood, {}).values():
            if hasattr(lik_info, "pop"):
                lik_info.pop(_params, None)
        if check_compatible:
            try:
                # We will test the old info against the dumped+loaded new info.
                # This is because we can't actually check if python objects do change
                old_info = self.reload_updated_info()
                new_info = yaml_load(yaml_dump(updated_info_trimmed))
                ignore_blocks = []
                if list(new_info.get(_sampler, [None]))[0] == "minimize":
                    ignore_blocks = [_sampler]
                if not is_equal_info(old_info, new_info, strict=False,
                                     ignore_blocks=ignore_blocks):
                    # HACK!!! NEEDS TO BE FIXED
                    if list(updated_info.get(_sampler, [None]))[0] == "minimize":
                        raise LoggedError(
                            self.log, "Old and new sample information not compatible! "
                            "At this moment it is not possible to 'force' deletion of "
                            "and old 'minimize' run. Please delete it by hand. "
                            "We are working on fixing this very soon!")
                    raise LoggedError(
                        self.log, "Old and new sample information not compatible! "
                        "Resuming not possible!")
            except IOError:
                # There was no previous chain
                pass
        # We write the new one anyway (maybe updated debug, resuming...)
        for f, info in [(self.file_input, input_info),
                        (self.file_updated, updated_info_trimmed)]:
            if not info:
                pass
            with open(f, "w") as f_out:
                try:
                    f_out.write(yaml_dump(info))
                except OutputError as e:
                    raise LoggedError(self.log, str(e))
コード例 #7
0
def test_mcmc_sync():
    info: InputDict = yaml_load(yaml)
    logger.info('Test end synchronization')

    if mpi.rank() == 1:
        max_samples = 200
    else:
        max_samples = 600
    # simulate asynchronous ending sampling loop
    info['sampler']['mcmc'] = {'max_samples': max_samples}

    updated_info, sampler = run(info)
    assert len(sampler.products()["sample"]) == max_samples

    logger.info('Test error synchronization')
    if mpi.rank() == 0:
        info['sampler']['mcmc'] = {'max_samples': 'none'}  # 'none' not valid
        with NoLogging(logging.ERROR), pytest.raises(TypeError):
            run(info)
    else:
        with pytest.raises(mpi.OtherProcessError):
            run(info)

    logger.info('Test one-process hang abort')

    aborted = False

    def test_abort():
        nonlocal aborted
        aborted = True

    # test error converted into MPI_ABORT after timeout
    # noinspection PyTypeChecker
    with pytest.raises(
        (LoggedError, mpi.OtherProcessError)), NoLogging(logging.ERROR):
        with mpi.ProcessState('test',
                              time_out_seconds=0.5,
                              timeout_abort_proc=test_abort):
            if mpi.rank() != 1:
                time.sleep(0.6)  # fake hang
            else:
                raise LoggedError(logger, 'Expected test error')
    if mpi.rank() == 1:
        assert aborted
コード例 #8
0
def test_cosmo_docs_basic():
    flag = True
    for theo in ["camb", "classy"]:
        info_new = create_input(preset=preset_pre + theo)
        info_yaml_new = yaml_dump(info_new)
        file_path = os.path.join(path, file_pre + theo + ".yaml")
        with open(file_path) as docs_file:
            info_yaml_docs = "".join(docs_file.readlines())
        info_docs = yaml_load(info_yaml_docs)
        if not is_equal_info(
                info_new, info_docs, strict=True, print_not_log=True):
            with open(file_path, "w") as docs_file:
                docs_file.write(info_yaml_new)
            flag = False
            print("OLD:\n%s" % info_yaml_docs)
            print("----------------------------------------")
            print("NEW:\n%s" % info_yaml_new)
    assert flag, ("Differences in example input file. "
                  "Files have been re-generated; check out your git diff.")
コード例 #9
0
 def __init__(self, kind, component, parent=None):
     super().__init__()
     self.clipboard = parent.clipboard
     self.setWindowTitle("%s : %s" % (kind, component))
     self.setGeometry(0, 0, 500, 500)
     # noinspection PyArgumentList
     self.move(QApplication.desktop().screenGeometry().center() -
               self.rect().center())
     self.show()
     # Main layout
     self.layout = QVBoxLayout()
     self.setLayout(self.layout)
     self.display_tabs = QTabWidget()
     self.display = {}
     for k in ["yaml", "python", "bibliography"]:
         self.display[k] = QTextEdit()
         self.display[k].setLineWrapMode(QTextEdit.NoWrap)
         self.display[k].setFontFamily("mono")
         self.display[k].setCursorWidth(0)
         self.display[k].setReadOnly(True)
         self.display_tabs.addTab(self.display[k], k)
     self.layout.addWidget(self.display_tabs)
     # Fill text
     defaults_txt = get_default_info(component, kind, return_yaml=True)
     _indent = "  "
     defaults_txt = (kind + ":\n" + _indent + component + ":\n" +
                     2 * _indent +
                     ("\n" + 2 * _indent).join(defaults_txt.split("\n")))
     from cobaya.yaml import yaml_load
     self.display["python"].setText(pformat(yaml_load(defaults_txt)))
     self.display["yaml"].setText(defaults_txt)
     self.display["bibliography"].setText(get_bib_component(
         component, kind))
     # Buttons
     self.buttons = QHBoxLayout()
     self.close_button = QPushButton('Close', self)
     self.copy_button = QPushButton('Copy to clipboard', self)
     self.buttons.addWidget(self.close_button)
     self.buttons.addWidget(self.copy_button)
     self.close_button.released.connect(self.close)
     self.copy_button.released.connect(self.copy_clipb)
     self.layout.addLayout(self.buttons)
コード例 #10
0
def test_mcmc_drag_results():
    info: InputDict = yaml_load(yaml_drag)
    info['likelihood'] = {
        'g1': {
            'external': GaussLike
        },
        'g2': {
            'external': GaussLike2
        }
    }
    updated_info, sampler = run(info)
    products = sampler.products()
    from getdist.mcsamples import MCSamplesFromCobaya
    products["sample"] = mpi.allgather(products["sample"])
    gdample = MCSamplesFromCobaya(updated_info,
                                  products["sample"],
                                  ignore_rows=0.2)
    assert abs(gdample.mean('a') - 0.2) < 0.03
    assert abs(gdample.mean('b')) < 0.03
    assert abs(gdample.std('a') - 0.293) < 0.03
    assert abs(gdample.std('b') - 0.4) < 0.03
コード例 #11
0
 def test_cobaya(self):
     from cobaya.yaml import yaml_load
     from cobaya.model import get_model
     last_bib = None
     for name in [
             'test_package.TestLike', 'test_package.test_like.TestLike',
             'test_package.sub_module.test_like2.TestLike2',
             'test_package.sub_module.test_like2'
     ]:
         info_yaml = r"""
         likelihood:
           %s:
         params:
           H0: 72
         """ % name
         info = yaml_load(info_yaml)
         model = get_model(info)
         self.assertAlmostEqual(-2 * model.loglikes({})[0][0], 3.614504, 4)
         bib = model.likelihood[name].get_bibtex()
         self.assertTrue('Lewis' in bib if last_bib is None else bib ==
                         last_bib)
コード例 #12
0
ファイル: gui.py プロジェクト: rancesol/cobaya
 def __init__(self, kind, module, parent=None):
     super(DefaultsDialog, self).__init__()
     self.clipboard = parent.clipboard
     self.setWindowTitle("%s : %s" % (kind, module))
     self.setGeometry(0, 0, 500, 500)
     self.move(
         QApplication.desktop().screenGeometry().center() - self.rect().center())
     self.show()
     # Main layout
     self.layout = QVBoxLayout()
     self.setLayout(self.layout)
     self.display_tabs = QTabWidget()
     self.display = {}
     for k in ["yaml", "python", "bibliography"]:
         self.display[k] = QTextEdit()
         self.display[k].setLineWrapMode(QTextEdit.NoWrap)
         self.display[k].setFontFamily("mono")
         self.display[k].setCursorWidth(0)
         self.display[k].setReadOnly(True)
         self.display_tabs.addTab(self.display[k], k)
     self.layout.addWidget(self.display_tabs)
     # Fill text
     defaults_txt = get_default_info(
         module, kind, return_yaml=True, fail_if_not_found=True)
     from cobaya.yaml import yaml_load
     self.display["python"].setText(
         "from collections import OrderedDict\n\ninfo = " +
         pformat(yaml_load(defaults_txt)))
     self.display["yaml"].setText(defaults_txt)
     self.display["bibliography"].setText(get_bib_module(module, kind))
     # Buttons
     self.buttons = QHBoxLayout()
     self.close_button = QPushButton('Close', self)
     self.copy_button = QPushButton('Copy to clipboard', self)
     self.buttons.addWidget(self.close_button)
     self.buttons.addWidget(self.copy_button)
     self.close_button.released.connect(self.close)
     self.copy_button.released.connect(self.copy_clipb)
     self.layout.addLayout(self.buttons)
コード例 #13
0
def test_cobaya():
    """Test the Cobaya interface to the ACT likelihood."""
    from cobaya.yaml import yaml_load
    from cobaya.model import get_model

    info_yaml = r"""
        likelihood:
            pyactlike.ACTPol_lite_DR4:
                components: 
                    - tt
                    - te
                    - ee
                lmax: 6000

        theory:
            camb:
                extra_args:
                    lens_potential_accuracy: 1

        params:
            ns:
                prior:
                  min: 0.8
                  max: 1.2
            H0:
                prior:
                  min: 40
                  max: 100       
            yp2:
                prior:
                    min: 0.5
                    max: 1.5       
        """
    info = yaml_load(info_yaml)
    model = get_model(info)
    assert np.isfinite(model.loglike({"ns": 1.0, "H0": 70, "yp2": 1.0})[0])
コード例 #14
0
 yaml_load("""
    # Fixed to number
    a: 0.01
    # Fixed to function, non-explicitly requested as derived
    b: "%s"
    # Fixed to function, explicitly requested as derived
    c:
      value: "%s"
      derived: True
    # Sampled, dynamically defined from functions
    bprime:
      prior:
        min: -1
        max:  1
      drop: True
      proposal: 0.0001
    cprime:
      prior:
        min: -1
        max:  1
      drop: True
      proposal: 0.0001
    # Simple sampled parameter
    d:
      prior:
        min: -1
        max:  1
      proposal: 0.0001
    # Simple derived parameter
    e:
    # Dynamical derived parameter
    f:
      derived: "%s"
    # Dynamical derived parameter, needing non-mentioned output parameter (x)
    g:
      derived: "%s"
 """ % (b_func, c_func, f_func, g_func))
コード例 #15
0
ファイル: test_runs.py プロジェクト: paganol/SOLikeT
def test_evaluate(lhood):
    info = yaml_load(pkgutil.get_data("soliket", f"tests/test_{lhood}.yaml"))
    info["force"] = True
    info['sampler'] = {'evaluate': {}}

    updated_info, sampler = run(info)
コード例 #16
0
ファイル: test_runs.py プロジェクト: paganol/SOLikeT
def test_mcmc(lhood):
    info = yaml_load(pkgutil.get_data("soliket", f"tests/test_{lhood}.yaml"))
    info["force"] = True
    info['sampler'] = {'mcmc': {'max_samples': 10}}

    updated_info, sampler = run(info)
コード例 #17
0
 yaml_load("""
    # Fixed to number
    a: 0.01
    # Fixed to function, non-explicitly requested as derived
    b: "%s"
    # Fixed to function, explicitly requested as derived
    c:
      value: "%s"
      derived: True
    # Sampled, dynamically defined from functions
    bprime:
      prior:
        min: -1
        max:  1
      drop: True
      proposal: 0.0001
    cprime:
      prior:
        min: -1
        max:  1
      drop: True
      proposal: 0.0001
    # Simple sampled parameter
    d:
      prior:
        min: -1
        max:  1
      proposal: 0.0001
    # Simple derived parameter
    e:
    # Dynamical derived parameter
    f:
      derived: "%s"
    # Dynamical derived parameter, needing non-mentioned output parameter (x)
    g:
      derived: "%s"
    # Fixing parameter whose only role is being an argument for a different one
    h: "%s"
    i: 2
    # Multi-layer: input parameter of "2nd order", i.e. dep on another dyn input
    j: "%s"
    # Multi-layer derived parameter of "2nd order", i.e. depe on another dyn derived
    k:
      derived: "%s"
 """ % (b_func, c_func, f_func, g_func, h_func, j_func, k_func))
コード例 #18
0
ファイル: output.py プロジェクト: williamjameshandley/cobaya
    def check_and_dump_info(self, input_info, updated_info, check_compatible=True,
                            cache_old=False, use_cache_old=False, ignore_blocks=()):
        """
        Saves the info in the chain folder twice:
           - the input info.
           - idem, populated with the components' defaults.

        If resuming a sample, checks first that old and new infos and versions are
        consistent.
        """
        # trim known params of each likelihood: for internal use only
        self.check_lock()
        updated_info_trimmed = deepcopy_where_possible(updated_info)
        updated_info_trimmed["version"] = get_version()
        for like_info in updated_info_trimmed.get("likelihood", {}).values():
            (like_info or {}).pop("params", None)
        if check_compatible:
            # We will test the old info against the dumped+loaded new info.
            # This is because we can't actually check if python objects do change
            try:
                old_info = self.reload_updated_info(cache=cache_old,
                                                    use_cache=use_cache_old)
            except InputImportError:
                # for example, when there's a dynamically generated class that cannot
                # be found by the yaml loader (could use yaml loader that ignores them)
                old_info = None
            if old_info:
                # use consistent yaml read-in types
                # TODO: could probably just compare full infos here, with externals?
                #  for the moment cautiously keeping old behaviour
                old_info = yaml_load(yaml_dump(old_info))  # type: ignore
                new_info = yaml_load(yaml_dump(updated_info_trimmed))
                if not is_equal_info(old_info, new_info, strict=False,
                                     ignore_blocks=list(ignore_blocks) + [
                                         "output"]):
                    raise LoggedError(
                        self.log, "Old and new run information not compatible! "
                                  "Resuming not possible!")
                # Deal with version comparison separately:
                # - If not specified now, take the one used in resume info
                # - If specified both now and before, check new older than old one
                # (For Cobaya's own version, prefer new one always)
                old_version = old_info.get("version")
                new_version = new_info.get("version")
                if isinstance(old_version, str) and isinstance(new_version, str):
                    if version.parse(old_version) > version.parse(new_version):
                        raise LoggedError(
                            self.log, "You are trying to resume a run performed with a "
                                      "newer version of Cobaya: %r (you are using %r). "
                                      "Please, update your Cobaya installation.",
                            old_version, new_version)
                for k in set(kinds).intersection(updated_info):
                    if k in ignore_blocks or updated_info[k] is None:
                        continue
                    for c in updated_info[k]:
                        new_version = updated_info[k][c].get("version")
                        old_version = old_info[k][c].get("version")  # type: ignore
                        if new_version is None:
                            updated_info[k][c]["version"] = old_version
                            updated_info_trimmed[k][c]["version"] = old_version
                        elif old_version is not None:
                            cls = get_resolved_class(
                                c, k, None_if_not_found=True,
                                class_name=updated_info[k][c].get("class"))
                            if cls and cls.compare_versions(
                                    old_version, new_version, equal=False):
                                raise LoggedError(
                                    self.log, "You have requested version %r for "
                                              "%s:%s, but you are trying to resume a "
                                              "run that used a newer version: %r.",
                                    new_version, k, c, old_version)
        # If resuming, we don't want to to *partial* dumps
        if ignore_blocks and self.is_resuming():
            return
        # Work on a copy of the input info, since we are updating the prefix
        # (the updated one is already a copy)
        if input_info is not None:
            input_info = deepcopy_where_possible(input_info)
        # Write the new one
        for f, info in [(self.file_input, input_info),
                        (self.file_updated, updated_info_trimmed)]:
            if info:
                for k in ignore_blocks:
                    info.pop(k, None)
                info.pop("debug", None)
                info.pop("force", None)
                info.pop("resume", None)
                # make sure the dumped output_prefix does only contain the file prefix,
                # not the folder, since it's already been placed inside it
                info["output"] = self.updated_prefix()
                with open(f, "w", encoding="utf-8") as f_out:
                    try:
                        f_out.write(yaml_dump(sort_cosmetic(info)))
                    except OutputError as e:
                        raise LoggedError(self.log, str(e))
        if updated_info_trimmed and has_non_yaml_reproducible(updated_info_trimmed):
            try:
                import dill
            except ImportError:
                self.mpi_info('Install "dill" to save reproducible options file.')
            else:
                import pickle
                try:
                    with open(self.dump_file_updated, 'wb') as f:
                        dill.dump(sort_cosmetic(updated_info_trimmed), f,
                                  pickle.HIGHEST_PROTOCOL)
                except pickle.PicklingError as e:
                    os.remove(self.dump_file_updated)
                    self.mpi_info('Options file cannot be pickled %s', e)
コード例 #19
0
ファイル: output.py プロジェクト: yufdu/cobaya
    def check_and_dump_info(self,
                            input_info,
                            updated_info,
                            check_compatible=True,
                            cache_old=False,
                            use_cache_old=False,
                            ignore_blocks=()):
        """
        Saves the info in the chain folder twice:
           - the input info.
           - idem, populated with the components' defaults.

        If resuming a sample, checks first that old and new infos and versions are
        consistent.
        """
        # trim known params of each likelihood: for internal use only
        updated_info_trimmed = deepcopy_where_possible(updated_info)
        updated_info_trimmed[_version] = __version__
        for like_info in updated_info_trimmed.get(kinds.likelihood,
                                                  {}).values():
            (like_info or {}).pop(_params, None)
        if check_compatible:
            # We will test the old info against the dumped+loaded new info.
            # This is because we can't actually check if python objects do change
            old_info = self.reload_updated_info(cache=cache_old,
                                                use_cache=use_cache_old)
            if old_info:
                new_info = yaml_load(yaml_dump(updated_info_trimmed))
                if not is_equal_info(
                        old_info,
                        new_info,
                        strict=False,
                        ignore_blocks=list(ignore_blocks) + [_output_prefix]):
                    raise LoggedError(
                        self.log,
                        "Old and new run information not compatible! "
                        "Resuming not possible!")
                # Deal with version comparison separately:
                # - If not specified now, take the one used in resume info
                # - If specified both now and before, check new older than old one
                # (For Cobaya's own version, prefer new one always)
                old_version = old_info.get(_version, None)
                new_version = new_info.get(_version, None)
                if old_version:
                    if version.parse(old_version) > version.parse(new_version):
                        raise LoggedError(
                            self.log,
                            "You are trying to resume a run performed with a "
                            "newer version of Cobaya: %r (you are using %r). "
                            "Please, update your Cobaya installation.",
                            old_version, new_version)
                for k in (kind for kind in kinds if kind in updated_info):
                    if k in ignore_blocks:
                        continue
                    for c in updated_info[k]:
                        new_version = updated_info[k][c].get(_version)
                        old_version = old_info[k][c].get(_version)
                        if new_version is None:
                            updated_info[k][c][_version] = old_version
                            updated_info_trimmed[k][c][_version] = old_version
                        elif old_version is not None:
                            cls = get_class(c, k, None_if_not_found=True)
                            if cls and cls.compare_versions(
                                    old_version, new_version, equal=False):
                                raise LoggedError(
                                    self.log,
                                    "You have requested version %r for "
                                    "%s:%s, but you are trying to resume a "
                                    "run that used a newer version: %r.",
                                    new_version, k, c, old_version)
        # If resuming, we don't want to to *partial* dumps
        if ignore_blocks and self.is_resuming():
            return
        # Work on a copy of the input info, since we are updating the prefix
        # (the updated one is already a copy)
        if input_info is not None:
            input_info = deepcopy_where_possible(input_info)
        # Write the new one
        for f, info in [(self.file_input, input_info),
                        (self.file_updated, updated_info_trimmed)]:
            if info:
                for k in ignore_blocks:
                    info.pop(k, None)
                info.pop(_debug, None)
                info.pop(_force, None)
                info.pop(_resume, None)
                # make sure the dumped output_prefix does only contain the file prefix,
                # not the folder, since it's already been placed inside it
                info[_output_prefix] = self.updated_prefix()
                with open(f, "w", encoding="utf-8") as f_out:
                    try:
                        f_out.write(yaml_dump(sort_cosmetic(info)))
                    except OutputError as e:
                        raise LoggedError(self.log, str(e))
コード例 #20
0
def test_sdss_dr12_consensus_final_camb(modules):
    lik = "sdss_dr12_consensus_final"
    info_likelihood = {lik: {}}
    info_theory = {"camb": {"use_planck_names": True}}
    body_of_test(modules, best_fit_base, info_likelihood, info_theory,
                 chi2_sdss_dr12_consensus_final)


def test_sdss_dr12_consensus_final_classy(modules):
    lik = "sdss_dr12_consensus_final"
    info_likelihood = {lik: {}}
    info_theory = {"classy": {"use_planck_names": True}}
    chi2_classy = deepcopy(chi2_sdss_dr12_consensus_final)
    chi2_classy["tolerance"] += 1.2
    body_of_test(modules, best_fit_base, info_likelihood, info_theory,
                 chi2_classy)


# BEST FIT AND REFERENCE VALUES ##########################################################

best_fit_base = yaml_load(baseline_cosmology)
best_fit_base.update({k:v for k,v in params_lowTEB_highTTTEEE.items()
                      if k in baseline_cosmology or k == "H0"})

chi2_sdss_dr12_consensus_bao = {
    "sdss_dr12_consensus_bao": 40.868, "tolerance": 0.04}
chi2_sdss_dr12_consensus_full_shape = {
    "sdss_dr12_consensus_full_shape": 28.782, "tolerance": 0.02}
chi2_sdss_dr12_consensus_final = {
    "sdss_dr12_consensus_final": 26.150, "tolerance": 0.02}
コード例 #21
0
ファイル: test_lensing.py プロジェクト: simonsobs/SOLikeT
def get_demo_lensing_model(theory):
    if theory == "camb":
        info_yaml = r"""
        likelihood:
            soliket.LensingLikelihood:
                stop_at_error: True

        theory:
            camb:
                extra_args:
                    lens_potential_accuracy: 1

        params:
            ns:
                prior:
                  min: 0.8
                  max: 1.2
            H0:
                prior:
                  min: 40
                  max: 100        
        """
    elif theory == "classy":
        info_yaml = r"""
        likelihood:
            soliket.LensingLikelihood:
                stop_at_error: True

        theory:
            classy:
                extra_args:
                    output: lCl, tCl
                path: global

        params:
            n_s:
                prior:
                  min: 0.8
                  max: 1.2
            H0:
                prior:
                  min: 40
                  max: 100        

        """

    info = yaml_load(info_yaml)

    from cobaya.install import install
    install(info, path=packages_path, skip_global=True)

    test_point = {}
    for par, pdict in info["params"].items():
        if not isinstance(pdict, dict):
            continue

        if "ref" in pdict:
            try:
                value = float(pdict["ref"])
            except TypeError:
                value = (pdict["ref"]["min"] + pdict["ref"]["max"]) / 2
            test_point[par] = value
        elif "prior" in pdict:
            value = (pdict["prior"]["min"] + pdict["prior"]["max"]) / 2
            test_point[par] = value

    model = get_model(info)
    return model, test_point
コード例 #22
0
def body_of_test(info_logpdf, kind, tmpdir, derived=False, manual=False):
    # For pytest's handling of tmp dirs
    if hasattr(tmpdir, "dirpath"):
        tmpdir = tmpdir.dirname
    prefix = os.path.join(tmpdir, "%d" % round(1e8 * random())) + os.sep
    if os.path.exists(prefix):
        shutil.rmtree(prefix)
    # build updated info
    info = {
        _output_prefix: prefix,
        _params: {
            "x": {
                _prior: {
                    "min": 0,
                    "max": 1
                },
                "proposal": 0.05
            },
            "y": {
                _prior: {
                    "min": -1,
                    "max": 1
                },
                "proposal": 0.05
            }
        },
        kinds.sampler: {
            "mcmc": {
                "max_samples": (10 if not manual else 5000),
                "learn_proposal": False
            }
        }
    }
    if derived:
        info[_params].update({
            "r": {
                "min": 0,
                "max": 1
            },
            "theta": {
                "min": -0.5,
                "max": 0.5
            }
        })
    # Complete according to kind
    if kind == _prior:
        info.update({_prior: info_logpdf, kinds.likelihood: {"one": None}})
    elif kind == kinds.likelihood:
        info.update({kinds.likelihood: info_logpdf})
    else:
        raise ValueError("Kind of test not known.")
    # If there is an ext function that is not a string, don't write output!
    stringy = {k: v for k, v in info_logpdf.items() if isinstance(v, str)}
    if stringy != info_logpdf:
        info.pop(_output_prefix)
    # Run
    updated_info, sampler = run(info)
    products = sampler.products()
    # Test values
    logprior_base = -np.log(
        (info[_params]["x"][_prior]["max"] - info[_params]["x"][_prior]["min"])
        * (info[_params]["y"][_prior]["max"] -
           info[_params]["y"][_prior]["min"]))
    logps = {
        name: logpdf(
            **{
                arg: products["sample"][arg].values
                for arg in getfullargspec(logpdf)[0]
            })
        for name, logpdf in {
            "half_ring": half_ring_func,
            "gaussian_y": gaussian_func
        }.items()
    }
    # Test #1: values of logpdf's
    if kind == _prior:
        columns_priors = [
            c for c in products["sample"].data.columns
            if c.startswith("minuslogprior")
        ]
        assert np.allclose(
            products["sample"][columns_priors[0]].values,
            np.sum(products["sample"][columns_priors[1:]].values, axis=-1)), (
                "The single prior values do not add up to the total one.")
        assert np.allclose(
            logprior_base + sum(logps[p] for p in info_logpdf),
            -products["sample"]["minuslogprior"].values), (
                "The value of the total prior is not reproduced correctly.")
    elif kind == kinds.likelihood:
        for lik in info[kinds.likelihood]:
            assert np.allclose(
                -2 * logps[lik], products["sample"][_get_chi2_name(lik)].values
            ), ("The value of the likelihood '%s' is not reproduced correctly."
                % lik)
    assert np.allclose(
        logprior_base + sum(logps[p] for p in info_logpdf),
        -products["sample"]["minuslogpost"].values), (
            "The value of the posterior is not reproduced correctly.")
    # Test derived parameters, if present -- for now just for "r"
    if derived:
        derived_values = {
            param:
            func(**{arg: products["sample"][arg].values
                    for arg in ["x", "y"]})
            for param, func in derived_funcs.items()
        }
        assert all(
            np.allclose(v, products["sample"][p].values)
            for p, v in derived_values.items()
        ), ("The value of the derived parameters is not reproduced correctly.")
    # Test updated info -- scripted
    if kind == _prior:
        assert info[_prior] == updated_info[_prior], (
            "The prior information has not been updated correctly.")
    elif kind == kinds.likelihood:
        # Transform the likelihood info to the "external" convention and add defaults
        info_likelihood = deepcopy(info[kinds.likelihood])
        for lik, value in list(info_likelihood.items()):
            if not hasattr(value, "get"):
                info_likelihood[lik] = {_external: value}
            info_likelihood[lik].update({
                k: v
                for k, v in Likelihood.get_defaults().items()
                if k not in info_likelihood[lik]
            })
            for k in [_input_params, _output_params]:
                info_likelihood[lik].pop(k, None)
                updated_info[kinds.likelihood][lik].pop(k)
        assert info_likelihood == updated_info[kinds.likelihood], (
            "The likelihood information has not been updated correctly\n %r vs %r"
            % (info_likelihood, updated_info[kinds.likelihood]))
    # Test updated info -- yaml
    # For now, only if ALL external pdfs are given as strings,
    # since the YAML load fails otherwise
    if stringy == info_logpdf:
        updated_output_file = os.path.join(prefix, _updated_suffix + ".yaml")
        with open(updated_output_file) as updated:
            updated_yaml = yaml_load("".join(updated.readlines()))
        for k, v in stringy.items():
            to_test = updated_yaml[kind][k]
            if kind == kinds.likelihood:
                to_test = to_test[_external]
            assert to_test == info_logpdf[k], (
                "The updated external pdf info has not been written correctly."
            )
コード例 #23
0
ファイル: 1.py プロジェクト: cmbant/cobaya-test
    prior: {min: 40, max: 100}
    ref: {dist: norm, loc: 70, scale: 2}
    proposal: 2
    latex: H_0
  omega_b:
    prior: {min: 0.005, max: 0.1}
    ref: {dist: norm, loc: 0.0221, scale: 0.0001}
    proposal: 0.0001
    latex: \Omega_\mathrm{b} h^2
  omega_cdm:
    prior: {min: 0.001, max: 0.99}
    ref: {dist: norm, loc: 0.12, scale: 0.001}
    proposal: 0.0005
    latex: \Omega_\mathrm{c} h^2
  m_ncdm: {renames: mnu, value: 0.06}
  Omega_Lambda: {latex: \Omega_\Lambda}
  YHe: {latex: 'Y_\mathrm{P}'}
  tau_reio:
    prior: {min: 0.01, max: 0.8}
    ref: {dist: norm, loc: 0.06, scale: 0.01}
    proposal: 0.005
    latex: \tau_\mathrm{reio}
"""

from cobaya.yaml import yaml_load

info = yaml_load(info_txt)

# Add your external packages installation folder
info['packages_path'] = '/path/to/packages'
コード例 #24
0
def body_of_test(modules,
                 best_fit,
                 info_likelihood,
                 info_theory,
                 ref_chi2,
                 best_fit_derived=None):
    assert modules, "I need a modules folder!"
    info = {
        _path_install: modules,
        _theory: info_theory,
        _likelihood: info_likelihood,
        _sampler: {
            "evaluate": None
        }
    }
    # Add best fit
    info[_params] = merge_params_info(*([yaml_load(baseline_cosmology)] + [
        get_default_info(lik, _likelihood).get(_params, {})
        for lik in info[_likelihood]
    ]))
    for p in best_fit:
        try:
            if _prior in info[_params].get(p, {}):
                info[_params][p]["ref"] = best_fit[p]
        except:  # was fixed or derived
            info[_params][p] = best_fit[p]
    # We'll pop some derived parameters, so copy
    derived = deepcopy(baseline_cosmology_derived)
    best_fit_derived = deepcopy(best_fit_derived)
    if list(info[_theory].keys())[0] == "classy":
        # More stuff that CLASS needs for the Planck model
        info[_params].update(baseline_cosmology_classy_extra)
        # Remove "cosmomc_theta" in favour of "H0" (remove it from derived then!)
        info[_params].pop("cosmomc_theta")
        info[_params].pop("theta")
        info[_params]["H0"] = {
            _prior: {
                "min": 0,
                "max": 100
            },
            _p_ref: best_fit["H0"]
        }
        derived.pop("H0")
        if best_fit_derived is not None:
            best_fit_derived.pop("H0", None)
            # Don't test those that have not been implemented yet
            for p in [
                    "zstar",
                    "rstar",
                    "thetastar",
                    "DAstar",
                    "zdrag",
                    "kd",
                    "thetad",
                    "zeq",
                    "keq",
                    "thetaeq",
                    "thetarseq",
                    # BBN!!!
                    "DH",
                    "Y_p"
            ]:
                derived.pop(p)
                best_fit_derived.pop(p, None)
    else:
        info[_theory]["camb"] = info[_theory]["camb"] or {}
        info[_theory]["camb"].update({"stop_at_error": True})
    # Add derived
    if best_fit_derived is not None:
        info[_params].update(derived)
    updated_info, products = run(info)
    # Check value of likelihoods
    for lik in info[_likelihood]:
        chi2 = products["sample"][_chi2 + separator + lik][0]
        assert abs(chi2 - ref_chi2[lik]) < ref_chi2["tolerance"], (
            "Testing likelihood '%s': | %g - %g | = %g >= %g" %
            (lik, chi2, ref_chi2[lik], abs(chi2 - ref_chi2[lik]),
             ref_chi2["tolerance"]))
    # Check value of derived parameters
    not_tested = []
    not_passed = []
    for p in best_fit_derived or {}:
        if best_fit_derived[p][0] is None or p not in best_fit_derived:
            not_tested += [p]
            continue
        rel = (abs(products["sample"][_derived_pre + p][0] -
                   best_fit_derived[p][0]) / best_fit_derived[p][1])
        if rel > tolerance_derived * (2 if p in ("YHe", "Y_p", "DH", "sigma8",
                                                 "s8omegamp5") else 1):
            not_passed += [(p, rel)]
    print("Derived parameters not tested because not implemented: %r" %
          not_tested)
    assert not not_passed, "Some derived parameters were off: %r" % not_passed
コード例 #25
0
ファイル: test_xcorr.py プロジェクト: Pablo-Lemos/SOLikeT
def get_demo_xcorr_model(theory):
    if theory == "camb":
        info_yaml = r"""
        likelihood:
            soliket.XcorrLikelihood:
                stop_at_error: True
                datapath: soliket/tests/data/unwise_g-so_kappa.sim.sacc.fits
                k_tracer_name: ck_so
                gc_tracer_name: gc_unwise

        theory:
            camb:
                extra_args:
                  lens_potential_accuracy: 1

        params:
            tau: 0.05
            mnu: 0.0
            nnu: 3.046
            b1:
                prior:
                    min: 0.
                    max: 10.
                ref:
                    min: 1.
                    max: 4.
                proposal: 0.1
            s1:
                prior:
                    min: 0.1
                    max: 1.0
                proposal: 0.1
        """
    elif theory == "classy":
        info_yaml = r"""
        likelihood:
            soliket.XcorrLikelihood:
                stop_at_error: True
                datapath: soliket/tests/data/unwise_g-so_kappa.sim.sacc.fits
                k_tracer_name: ck_so
                gc_tracer_name: gc_unwise

        theory:
            classy:
                extra_args:
                    output: lCl, tCl
                path: global

        params:
            b1:
                prior:
                    min: 0.
                    max: 10.
                ref:
                    min: 1.
                    max: 4.
                proposal: 0.1
            s1:
                prior:
                    min: 0.1
                    max: 1.0
                proposal: 0.1

        """

    info = yaml_load(info_yaml)
    model = get_model(info)
    return model
コード例 #26
0
    _likelihood: {
        "test_lik": loglik
    },
    _sampler: {
        "evaluate": None
    },
    _params:
    yaml_load("""
       a: 0.01
       b: "%s"
       bprime:
         prior:
           min: -1
           max:  1
         drop: True
       c:
         prior:
           min: -1
           max:  1
       d:
       e:
         derived: "%s"
       f:
         derived: "%s"
    """ % (b_func, e_func, f_func))
}


def test_parametrization():
    updated_info, products = run(info)
    sample = products["sample"]
    for i, point in sample:
コード例 #27
0
def body_of_test(info_logpdf, kind, tmpdir, derived=False, manual=False):
    # For pytest's handling of tmp dirs
    if hasattr(tmpdir, "dirpath"):
        tmpdir = tmpdir.dirname
    prefix = os.path.join(tmpdir, "%d" % round(1e8 * random())) + os.sep
    if os.path.exists(prefix):
        shutil.rmtree(prefix)
    # build full info
    info = {
        _output_prefix: prefix,
        _params: {
            "x": {
                _prior: {
                    "min": 0,
                    "max": 1
                },
                "proposal": 0.05
            },
            "y": {
                _prior: {
                    "min": -1,
                    "max": 1
                },
                "proposal": 0.05
            }
        },
        _sampler: {
            "mcmc": {
                "max_samples": (10 if not manual else 5000),
                "learn_proposal": False
            }
        }
    }
    if derived:
        info[_params].update({
            "r": {
                "min": 0,
                "max": 1
            },
            "theta": {
                "min": -0.5,
                "max": 0.5
            }
        })
    # Complete according to kind
    if kind == _prior:
        info.update({
            _prior: info_logpdf,
            _likelihood: {
                "one": {
                    "prefix": ""
                }
            }
        })
    elif kind == _likelihood:
        info.update({_likelihood: info_logpdf})
    else:
        raise ValueError("Kind of test not known.")
    # If there is an ext function that is not a string, don't write output!
    stringy = dict([(k, v) for k, v in info_logpdf.items()
                    if isinstance(v, six.string_types)])
    if stringy != info_logpdf:
        info.pop(_output_prefix)
    # Run
    updated_info, products = run(info)
    # Test values
    logprior_base = -np.log(
        (info[_params]["x"][_prior]["max"] - info[_params]["x"][_prior]["min"])
        * (info[_params]["y"][_prior]["max"] -
           info[_params]["y"][_prior]["min"]))
    logps = dict([(name,
                   logpdf(**dict([(arg, products["sample"][arg].values)
                                  for arg in inspect.getargspec(logpdf)[0]])))
                  for name, logpdf in {
                      "half_ring": half_ring_func,
                      "gaussian_y": gaussian_func
                  }.items()])
    # Test #1: values of logpdf's
    if kind == _prior:
        assert np.allclose(
            logprior_base + sum(logps[p] for p in info_logpdf),
            -products["sample"]["minuslogprior"].values), (
                "The value of the prior is not reproduced correctly.")
    elif kind == _likelihood:
        for lik in info[_likelihood]:
            assert np.allclose(
                -2 * logps[lik],
                products["sample"][_chi2 + separator + lik].values
            ), ("The value of the likelihood '%s' is not reproduced correctly."
                % lik)
    assert np.allclose(
        logprior_base + sum(logps[p] for p in info_logpdf),
        -products["sample"]["minuslogpost"].values), (
            "The value of the posterior is not reproduced correctly.")
    # Test derived parameters, if present -- for now just for "r"
    if derived:
        derived_values = dict([(param,
                                func(**dict([(arg,
                                              products["sample"][arg].values)
                                             for arg in ["x", "y"]])))
                               for param, func in derived_funcs.items()])
        assert np.all([
            np.allclose(v, products["sample"]["derived__" + p].values)
            for p, v in derived_values.items()
        ]), (
            "The value of the derived parameters is not reproduced correctly.")
    # Test updated info -- scripted
    if kind == _prior:
        assert info[_prior] == updated_info[_prior], (
            "The prior information has not been updated correctly.")
    elif kind == _likelihood:
        # Transform the likelihood info to the "external" convention and add defaults
        info_likelihood = deepcopy(info[_likelihood])
        for lik, value in list(info_likelihood.items()):
            if not hasattr(value, "get"):
                info_likelihood[lik] = {_external: value}
            info_likelihood[lik].update({
                k: v
                for k, v in class_options.items()
                if not k in info_likelihood[lik]
            })
        assert info_likelihood == updated_info[_likelihood], (
            "The likelihood information has not been updated correctly.")
    # Test updated info -- yaml
    # For now, only if ALL external pdfs are given as strings, since the YAML load fails otherwise
    if stringy == info_logpdf:
        full_output_file = os.path.join(prefix, _full_suffix + ".yaml")
        with open(full_output_file) as full:
            updated_yaml = yaml_load("".join(full.readlines()))
        for k, v in stringy.items():
            to_test = updated_yaml[kind][k]
            if kind == _likelihood:
                to_test = to_test[_external]
            assert to_test == info_logpdf[k], (
                "The updated external pdf info has not been written correctly."
            )
コード例 #28
0
def test_parameterization_dependencies():
    class TestLike(Likelihood):
        params = {'a': None, 'b': None}

        def get_can_provide_params(self):
            return ['D']

        def logp(self, **params_values):
            a = params_values['a']
            b = params_values['b']
            params_values['_derived']['D'] = -7
            return a + 100 * b

    info_yaml = r"""
    params:
      aa:  
        prior: [2,4]
      bb:
        prior: [0,1]
        ref: [0.5, 0.1]
      c:
        value: "lambda aa, bb: aa+bb"  
      a: 
        value: "lambda c, aa: c*aa"  
      b: 1
      D:
      E:
       derived: "lambda D,c,a,aa: D*c/a+aa"      
    prior:
      pr: "lambda bb, a: bb-10*a"

    stop_at_error: True
    """
    test_info = yaml_load(info_yaml)
    test_info["likelihood"] = {"Like": TestLike}

    model = get_model(test_info)
    assert np.isclose(model.loglike({'bb': 0.5, 'aa': 2})[0], 105)
    assert np.isclose(
        model.logposterior({
            'bb': 0.5,
            'aa': 2
        }).logpriors[1], -49.5)
    test_info['params']['b'] = {'value': 'lambda a, c, bb: a*c*bb'}
    like, derived = get_model(test_info).loglike({'bb': 0.5, 'aa': 2})
    assert np.isclose(like, 630)
    assert derived == [2.5, 5.0, 6.25, -7, -1.5]
    assert np.isclose(
        model.logposterior({
            'bb': 0.5,
            'aa': 2
        }).logpriors[1], -49.5)
    test_info['params']['aa'] = 2
    test_info['params']['bb'] = 0.5
    like, derived = get_model(test_info).loglike()
    assert np.isclose(like, 630)
    assert derived == [2.5, 5.0, 6.25, -7, -1.5]

    test_info["prior"]["on_derived"] = "lambda f: 5*f"
    with pytest.raises(LoggedError) as e:
        get_model(test_info)
    assert "found and don't have a default value either" in str(e.value)

    # currently don't allow priors on derived parameters
    test_info["prior"]["on_derived"] = "lambda E: 5*E"
    with pytest.raises(LoggedError) as e:
        get_model(test_info)
    assert "that are output derived parameters" in str(e.value)