Beispiel #1
0
def get_scipy_1d_pdf(info):
    """Generates 1d priors from scipy's pdf's from input info."""
    param = list(info)[0]
    info2 = deepcopy(info[param])
    if not info2:
        raise LoggedError(log, "No specific prior info given for "
                               "sampled parameter '%s'." % param)
    # What distribution?
    try:
        dist = info2.pop(partag.dist).lower()
    # Not specified: uniform by default
    except KeyError:
        dist = "uniform"
    # Number: uniform with 0 width
    except AttributeError:
        dist = "uniform"
        info2 = {"loc": info2, "scale": 0}
    try:
        pdf_dist = getattr(import_module("scipy.stats", dist), dist)
    except AttributeError:
        raise LoggedError(
            log, "Error creating the prior for parameter '%s': "
                 "The distribution '%s' is unknown to 'scipy.stats'. "
                 "Check the list of allowed possibilities in the docs.", param, dist)
    # Recover loc,scale from min,max
    # For coherence with scipy.stats, defaults are min,max=0,1
    if "min" in info2 or "max" in info2:
        if "loc" in info2 or "scale" in info2:
            raise LoggedError(
                log, "You cannot use the 'loc/scale' convention and the 'min/max' "
                     "convention at the same time. Either use one or the other.")
        minmaxvalues = {"min": 0, "max": 1}
        for limit in minmaxvalues:
            try:
                value = info2.pop(limit, minmaxvalues[limit])
                minmaxvalues[limit] = np.float(value)
            except (TypeError, ValueError):
                raise LoggedError(
                    log, "Invalid value '%s: %r' in param '%s' (it must be a number)",
                    limit, value, param)
        info2["loc"] = minmaxvalues["min"]
        info2["scale"] = minmaxvalues["max"] - minmaxvalues["min"]

    for x in ["loc", "scale", "min", "max"]:
        if isinstance(info2.get(x), str):
            raise LoggedError(log, "%s should be a number (got '%s')", x, info2.get(x))
    # Check for improper priors
    if not np.all(np.isfinite([info2.get(x, 0) for x in ["loc", "scale", "min", "max"]])):
        raise LoggedError(log, "Improper prior for parameter '%s'.", param)
    # Generate and return the frozen distribution
    try:
        return pdf_dist(**info2)
    except TypeError as tp:
        raise LoggedError(
            log,
            "'scipy.stats' produced an error: <<%r>>. "
            "This probably means that the distribution '%s' "
            "does not recognize the parameter mentioned in the 'scipy' error above.",
            str(tp), dist)
Beispiel #2
0
def resolve_packages_path(infos=None):
    # noinspection PyStatementEffect
    """
    Gets the external packages installation path given some infos.
    If more than one occurrence of the external packages path in the infos,
    raises an error.

    If there is no external packages path defined in the given infos,
    defaults to the env variable `%s`, and in its absence to that stored
    in the config file.

    If no path at all could be found, returns `None`.
    """ % packages_path_env
    if not infos:
        infos = []
    elif isinstance(infos, Mapping):
        infos = [infos]
    # MARKED FOR DEPRECATION IN v3.0
    for info in infos:
        if info.get("modules"):
            raise LoggedError(log, "The input field 'modules' has been deprecated."
                                   "Please use instead %r", packages_path_input)
    # END OF DEPRECATION BLOCK
    paths = set(os.path.realpath(p) for p in
                [info.get(packages_path_input) for info in infos] if p)
    if len(paths) == 1:
        return list(paths)[0]
    elif len(paths) > 1:
        raise LoggedError(
            log, "More than one packages installation path defined in the given infos. "
                 "Cannot resolve a unique one to use. "
                 "Maybe specify one via a command line argument '-%s [...]'?",
            packages_path_arg[0])
    path_env = os.environ.get(packages_path_env)
    # MARKED FOR DEPRECATION IN v3.0
    old_env = "COBAYA_MODULES"
    path_old_env = os.environ.get(old_env)
    if path_old_env and not path_env:
        raise LoggedError(log, "The env var %r has been deprecated in favor of %r",
                          old_env, packages_path_env)
    # END OF DEPRECATION BLOCK
    if path_env:
        return path_env
    return load_packages_path_from_config_file()
Beispiel #3
0
 def __init__(self, info, name, timing=None):
     Theory.__init__(self, info, name=name, timing=timing, standalone=False)
     # Store the external function and assign its arguments
     self.external_function = get_external_function(info[_external], name=name)
     self._self_arg = "_self"
     argspec = getfullargspec(self.external_function)
     if info.get(_input_params, []):
         setattr(self, _input_params, str_to_list(info.get(_input_params)))
     else:
         ignore_args = [self._self_arg]
         # MARKED FOR DEPRECATION IN v3.0
         ignore_args += ["_derived", "_theory"]
         # END OF DEPRECATION BLOCK
         setattr(self, _input_params,
                 [p for p in argspec.args if p not in ignore_args])
     # MARKED FOR DEPRECATION IN v3.0
     self._derived_through_arg = "_derived" in argspec.args
     # END OF DEPRECATION BLOCK
     if info.get(_output_params, []):
         setattr(self, _output_params, str_to_list(info.get(_output_params)))
     # MARKED FOR DEPRECATION IN v3.0
     elif self._derived_through_arg:
         self.log.warning(
             "The use of a `_derived` argument to deal with derived parameters will be"
             " deprecated in a future version. From now on please list your derived "
             "parameters in a list as the value of %r in the likelihood info (see "
             "documentation) and have your function return a tuple "
             "`(logp, {derived_param_1: value_1, ...})`.", _output_params)
         # BEHAVIOUR TO BE REPLACED BY ERROR:
         derived_kw_index = argspec.args[-len(argspec.defaults):].index("_derived")
         setattr(self, _output_params, argspec.defaults[derived_kw_index])
     # END OF DEPRECATION BLOCK
     else:
         setattr(self, _output_params, [])
     # Required quantities from other components
     self._uses_self_arg = self._self_arg in argspec.args
     if info.get(_requires) and not self._uses_self_arg:
         raise LoggedError(
             self.log, "If a likelihood has external requirements, declared under %r, "
                       "it needs to accept a keyword argument %r.", _requires,
             self._self_arg)
     # MARKED FOR DEPRECATION IN v3.0
     self._uses_old_theory = "_theory" in argspec.args
     if self._uses_old_theory:
         self.log.warning(
             "The use of a `_theory` argument to deal with requirements will be"
             " deprecated in a future version. From now on please indicate your "
             "requirements as the value of field %r in the likelihood info (see "
             "documentation) and have your function take a parameter `_self`.",
             _requires)
         # BEHAVIOUR TO BE REPLACED BY ERROR:
         info[_requires] = argspec.defaults[
             argspec.args[-len(argspec.defaults):].index("_theory")]
     # END OF DEPRECATION BLOCK
     self._requirements = info.get(_requires, {}) or {}
     self.log.info("Initialized external likelihood.")
Beispiel #4
0
 def __init__(self, model, output=None,
              initial_size=enlargement_size, name=None, extension=None, file_name=None,
              resuming=False, load=False, onload_skip=0, onload_thin=1):
     super().__init__(model, name)
     self._value_dict = {p: np.nan for p in self.columns}
     # Create/load the main data frame and the tracking indices
     # Create the DataFrame structure
     if output:
         self.file_name, self.driver = output.prepare_collection(
             name=self.name, extension=extension)
         if file_name:
             self.file_name = file_name
     else:
         self.driver = "dummy"
     if resuming or load:
         if output:
             try:
                 self._out_load(skip=onload_skip, thin=onload_thin)
                 if set(self.data.columns) != set(self.columns):
                     raise LoggedError(
                         self.log,
                         "Unexpected column names!\nLoaded: %s\nShould be: %s",
                         list(self.data.columns), self.columns)
                 self._n = self.data.shape[0]
                 self._n_last_out = self._n
             except IOError:
                 if resuming:
                     self.log.info(
                         "Could not find a chain to resume. "
                         "Maybe burn-in didn't finish. Creating new chain file!")
                     resuming = False
                 elif load:
                     raise
         else:
             raise LoggedError(self.log,
                               "No continuation possible if there is no output.")
     else:
         self._out_delete()
     if not resuming and not load:
         self.reset(columns=self.columns, index=range(initial_size))
         # TODO: the following 2 lines should go into the `reset` method.
         if output:
             self._n_last_out = 0
Beispiel #5
0
 def covmat(self, ignore_external=False):
     """
     Returns:
        The covariance matrix of the prior.
     """
     if not ignore_external and self.external:
         raise LoggedError(
             self.log,
             "It is not possible to get the covariance matrix from an external prior.")
     return np.diag([pdf.var() for pdf in self.pdf]).T
Beispiel #6
0
    def load_dataset_file(self, filename, dataset_params):
        self.l_max = self.l_max or int(50000 * self.acc)  # lmax here is an internal parameter for transforms
        if filename.endswith(".fits"):

            if dataset_params:
                raise LoggedError(
                    self.log, "'dataset_params' can only be specified "
                              "for .dataset (not .fits) file.")
            try:

                self.load_fits_data(filename)
            except IOError:
                raise LoggedError(
                    self.log, "The data file '%s' could not be found'. "
                              "Check your paths!", filename)

        else:
            super(_des_prototype, self).load_dataset_file(filename, dataset_params)
        self.initialize_postload()
Beispiel #7
0
def prepare_data_script():
    warn_deprecation()
    logger_setup()
    if "CONTAINED" not in os.environ:
        raise LoggedError(log, "This command should only be run within a container. "
                               "Run 'cobaya-install' instead.")
    parser = argparse.ArgumentParser(
        prog="cobaya prepare-data",
        description="Cobaya's installation tool for the data needed by a container.")
    parser.add_argument("-f", "--force", action="store_true", default=False,
                        help="Force re-installation of apparently installed packages.")
    arguments = parser.parse_args()
    try:
        info = load_input(requirements_file_path)
    except IOError:
        raise LoggedError(log, "Cannot find the requirements file. "
                               "This should not be happening.")
    install(info, path="packages_path", force=arguments.force,
            **{code_path: False, data_path: True})
Beispiel #8
0
 def _dump_slice__txt(self, n_min=None, n_max=None):
     if n_min is None or n_max is None:
         raise LoggedError(self.log, "Needs to specify the limit n's to dump.")
     if self._n_last_out == n_max:
         return
     self._n_last_out = n_max
     if not getattr(self, "_txt_formatters", False):
         n_float = 8
         # Add to this 7 places: sign, leading 0's, exp with sign and 3 figures.
         width_col = lambda col: max(7 + n_float, len(col))
         fmts = ["{:" + "{}.{}".format(width_col(col), n_float) + "g}"
                 for col in self.data.columns]
         # `fmt` as a kwarg with default value is needed to force substitution of var.
         # lambda is defined as a string to allow picklability (also header formatter)
         self._txt_formatters = {
             col: eval("lambda x, fmt=fmt: fmt.format(x)")
             for col, fmt in zip(self.data.columns, fmts)}
         self._header_formatter = [
             eval(
                 'lambda s, w=width_col(col): ("{:>" + "{}".format(w) + "s}").format(s)',
                 {'width_col': width_col, 'col': col})
             for col in self.data.columns]
     do_header = not n_min
     if do_header:
         # TODO: this should be done with file locks instead
         if os.path.exists(self.file_name):
             raise LoggedError(
                 self.log, "The output file %s already exists. You may be running "
                           "multiple jobs with the same output when you intended to "
                           "run with MPI. Check that mpi4py is correctly installed and"
                           " configured; e.g. try the test at "
                           "https://cobaya.readthedocs.io/en/latest/installation."
                           "html#mpi-parallelization-optional-but-encouraged",
                 self.file_name)
         with open(self.file_name, "a", encoding="utf-8") as out:
             out.write("#" + " ".join(
                 f(col) for f, col
                 in zip(self._header_formatter, self.data.columns))[1:] + "\n")
     with open(self.file_name, "a", encoding="utf-8") as out:
         lines = self.data[n_min:n_max].to_string(
             header=False, index=False, na_rep="nan", justify="right",
             formatters=self._txt_formatters)
         out.write(lines + "\n")
Beispiel #9
0
 def initialize_with_params(self):
     # Check that the parameters are the right ones
     differences = are_different_params_lists(
         self.input_params,
         self.expected_params_fg + self.expected_params_nuis,
         name_A="given",
         name_B="expected",
     )
     if differences:
         raise LoggedError(self.log, "Configuration error in parameters: %r.", differences)
Beispiel #10
0
 def cast(x):
     try:
         if dtype == int:
             # in case ints are given in exponential notation, make int(float())
             return int(float(x))
         else:
             return float(x)
     except ValueError:
         raise LoggedError(log, "Could not convert '%r' to a number.",
                           x)
Beispiel #11
0
 def _to_sampled_array(self, params_values):
     """
     Internal method to interact with the prior.
     Needs correct (not renamed) parameter names.
     """
     if hasattr(params_values, "keys"):
         params_values_array = np.array(list(params_values.values()))
     else:
         params_values_array = np.atleast_1d(params_values)
         if params_values_array.shape[0] != self.prior.d():
             raise LoggedError(
                 self.log,
                 "Wrong dimensionality: it's %d and it should be %d.",
                 len(params_values_array), self.prior.d())
     if len(params_values_array.shape) >= 2:
         raise LoggedError(
             self.log,
             "Cannot take arrays of points as inputs, just single points.")
     return params_values_array
Beispiel #12
0
 def create_folder(self, folder):
     """
     Creates the given folder (MPI-aware).
     """
     try:
         if not os.path.exists(folder):
             os.makedirs(folder)
     except Exception as e:
         raise LoggedError(
             self.log, "Could not create folder %r. Reason: %r", folder, str(e))
Beispiel #13
0
 def __init__(self, model, output=None, cache_size=_default_cache_size, name=None,
              extension=None, file_name=None, resuming=False, load=False,
              onload_skip=0, onload_thin=1):
     super().__init__(model, name)
     self.cache_size = cache_size
     # Create/load the main data frame and the tracking indices
     # Create the DataFrame structure
     if output:
         self.file_name, self.driver = output.prepare_collection(
             name=self.name, extension=extension)
         if file_name:
             self.file_name = file_name
     else:
         self.driver = "dummy"
     if resuming or load:
         if output:
             try:
                 self._out_load(skip=onload_skip, thin=onload_thin)
                 if set(self.data.columns) != set(self.columns):
                     raise LoggedError(
                         self.log,
                         "Unexpected column names!\nLoaded: %s\nShould be: %s",
                         list(self.data.columns), self.columns)
                 self._n_last_out = len(self)
             except IOError:
                 if resuming:
                     self.log.info(
                         "Could not find a chain to resume. "
                         "Maybe burn-in didn't finish. Creating new chain file!")
                     resuming = False
                 elif load:
                     raise
         else:
             raise LoggedError(self.log,
                               "No continuation possible if there is no output.")
     else:
         self._out_delete()
     if not resuming and not load:
         self.reset()
     # Prepare fast numpy cache
     self._icol = {col: i for i, col in enumerate(self.columns)}
     self._cache_reset()
Beispiel #14
0
 def add(self,
         values,
         derived=None,
         weight=1,
         logpost=None,
         logpriors=None,
         loglikes=None):
     self._enlarge_if_needed()
     self.data.at[self._n, _weight] = weight
     if logpost is None:
         try:
             logpost = sum(logpriors) + sum(loglikes)
         except ValueError:
             raise LoggedError(
                 self.log,
                 "If a log-posterior is not specified, you need to pass "
                 "a log-likelihood and a log-prior.")
     self.data.at[self._n, _minuslogpost] = -logpost
     if logpriors is not None:
         for name, value in zip(self.minuslogprior_names, logpriors):
             self.data.at[self._n, name] = -value
         self.data.at[self._n, _minuslogprior] = -sum(logpriors)
     if loglikes is not None:
         for name, value in zip(self.chi2_names, loglikes):
             self.data.at[self._n, name] = -2 * value
         self.data.at[self._n, _chi2] = -2 * sum(loglikes)
     if len(values) != len(self.sampled_params):
         raise LoggedError(
             self.log,
             "Got %d values for the sampled parameters. Should be %d.",
             len(values), len(self.sampled_params))
     for name, value in zip(self.sampled_params, values):
         self.data.at[self._n, name] = value
     if derived is not None:
         if len(derived) != len(self.derived_params):
             raise LoggedError(
                 self.log,
                 "Got %d values for the dervied parameters. Should be %d.",
                 len(derived), len(self.derived_params))
         for name, value in zip(self.derived_params, derived):
             self.data.at[self._n, name] = value
     self._n += 1
Beispiel #15
0
    def __init__(self, info_likelihood, packages_path=None, timing=None, theory=None):
        super().__init__()
        self.set_logger("likelihood")
        self.theory = theory
        # Get the individual likelihood classes
        for name, info in info_likelihood.items():
            if isinstance(name, Theory):
                name, info = name.get_name(), info
            if isinstance(info, Theory):
                self.add_instance(name, info)
            elif _external in info:
                if isinstance(info[_external], Theory):
                    self.add_instance(name, info[_external])
                elif inspect.isclass(info[_external]):
                    if not is_LikelihoodInterface(info[_external]) or \
                            not issubclass(info[_external], Theory):
                        raise LoggedError(self.log, "%s: external class likelihood must "
                                                    "be a subclass of Theory and have "
                                                    "logp, current_logp attributes",
                                          info[_external].__name__)
                    self.add_instance(name,
                                      info[_external](info, packages_path=packages_path,
                                                      timing=timing,
                                                      standalone=False,
                                                      name=name))
                else:
                    # If it has an "external" key, wrap it up. Else, load it up
                    self.add_instance(name, LikelihoodExternalFunction(info, name,
                                                                       timing=timing))
            else:
                like_class = get_resolved_class(
                    name, kind=kinds.likelihood,
                    component_path=info.pop(_component_path, None),
                    class_name=info.get(_class_name))
                self.add_instance(name, like_class(info, packages_path=packages_path,
                                                   timing=timing, standalone=False,
                                                   name=name))

            if not is_LikelihoodInterface(self[name]):
                raise LoggedError(self.log, "'Likelihood' %s is not actually a "
                                            "likelihood (no current_logp attribute)",
                                  name)
Beispiel #16
0
def check_sampler_info(info_old: Optional[SamplersDict],
                       info_new: SamplersDict,
                       is_resuming=False):
    """
    Checks compatibility between the new sampler info and that of a pre-existing run.

    Done separately from `Output.check_compatible_and_dump` because there may be
    multiple samplers mentioned in an `updated.yaml` file, e.g. `MCMC` + `Minimize`.
    """
    logger_sampler = get_logger(__name__)
    if not info_old:
        return
    # TODO: restore this at some point: just append minimize info to the old one
    # There is old info, but the new one is Minimizer and the old one is not
    # if (len(info_old) == 1 and list(info_old) != ["minimize"] and
    #      list(info_new) == ["minimize"]):
    #     # In-place append of old+new --> new
    #     aux = info_new.pop("minimize")
    #     info_new.update(info_old)
    #     info_new.update({"minimize": aux})
    #     info_old = {}
    #     keep_old = {}
    if list(info_old) != list(info_new) and list(info_new) == ["minimize"]:
        return
    if list(info_old) == list(info_new):
        # Restore some selected old values for some classes
        keep_old = get_preferred_old_values({"sampler": info_old})
        info_new = recursive_update(info_new, keep_old.get("sampler", {}))
    if not is_equal_info({"sampler": info_old}, {"sampler": info_new},
                         strict=False):
        if is_resuming:
            raise LoggedError(
                logger_sampler,
                "Old and new Sampler information not compatible! "
                "Resuming not possible!")
        else:
            raise LoggedError(
                logger_sampler,
                "Found old Sampler information which is not compatible "
                "with the new one. Delete the previous output manually, "
                "or automatically with either "
                "'-f', '--force', 'force: True'")
Beispiel #17
0
    def logp(self, **param_values):
        expected_cls = self.theory.get_Cl(ell_factor=True)['tt'][:self.lmax +
                                                                 1]
        binned_expected = np.dot(self.binmat, expected_cls)

        diff = binned_expected - self.binned_data_vector
        chisq = np.dot(diff, np.dot(self.inv_binned_cov, diff))
        if np.isinf(chisq) or np.isnan(chisq):
            raise LoggedError(self.log, 'Invalid chisq: {}'.format(chisq))

        return self.loglike_norm - chisq / 2
Beispiel #18
0
 def check_dropped(self, external_dependence):
     # some error control, given external_dependence from prior
     # only raise error after checking not used by prior
     if self._dropped_not_directly_used.difference(external_dependence):
         raise LoggedError(
             self.log,
             "Parameters %r are sampled but not passed to a likelihood or theory "
             "code, and never used as arguments for any prior or parameter "
             "functions. Check that you are not using "
             "the '%s' tag unintentionally.",
             list(self._dropped_not_directly_used), "drop")
Beispiel #19
0
def get_kind(name: str, allow_external=True) -> Kind:
    """
    Given a helpfully unique component name, tries to determine it's kind:
    ``sampler``, ``theory`` or ``likelihood``.
    """
    for i, kind in enumerate(kinds):
        cls = get_class(name,
                        kind,
                        allow_external=allow_external and i == len(kinds) - 1,
                        None_if_not_found=True)
        if cls is not None:
            break
    else:
        raise LoggedError(log, "Could not find component with name %r", name)
    for kind, tp in get_base_classes().items():
        if issubclass(cls, tp):
            return kind

    raise LoggedError(log, "Class %r is not a standard class type %r", name,
                      kinds)
Beispiel #20
0
    def __init__(self,
                 info_sampler: SamplerDict,
                 model: Model,
                 output=Optional[Output],
                 packages_path: Optional[str] = None,
                 name: Optional[str] = None):
        """
        Actual initialization of the class. Loads the default and input information and
        call the custom ``initialize`` method.

        [Do not modify this one.]
        """
        self._model = model
        self._output = output
        self._updated_info = deepcopy_where_possible(info_sampler)
        super().__init__(info_sampler,
                         packages_path=packages_path,
                         name=name,
                         initialize=False,
                         standalone=False)
        if not model.parameterization.sampled_params():
            self.mpi_warning("No sampled parameters requested! "
                             "This will fail for non-mock samplers.")
        # Load checkpoint info, if resuming
        if self.output.is_resuming() and not isinstance(self, Minimizer):
            checkpoint_info = None
            if mpi.is_main_process():
                try:
                    checkpoint_info = yaml_load_file(
                        self.checkpoint_filename())

                    if self.get_name() not in checkpoint_info["sampler"]:
                        raise LoggedError(
                            self.log, "Checkpoint file found at '%s' "
                            "but it corresponds to a different sampler.",
                            self.checkpoint_filename())
                except (IOError, TypeError):
                    pass
            checkpoint_info = mpi.share_mpi(checkpoint_info)
            if checkpoint_info:
                self.set_checkpoint_info(checkpoint_info)
                self.mpi_info("Resuming from previous sample!")
        elif not isinstance(self, Minimizer) and mpi.is_main_process():
            try:
                output.delete_file_or_folder(self.checkpoint_filename())
                output.delete_file_or_folder(self.progress_filename())
            except (OSError, TypeError):
                pass
        self._set_rng()
        self.initialize()
        model.set_cache_size(self._get_requested_cache_size())
        # Add to the updated info some values which are
        # only available after initialisation
        self._updated_info["version"] = self.get_version()
Beispiel #21
0
 def _get_z_dependent(self, quantity, z, pool=None):
     if pool is None:
         pool = self.collectors[quantity].z_pool
     try:
         i_kwarg_z = pool.find_indices(z)
     except ValueError:
         raise LoggedError(
             self.log, f"{quantity} not computed for all z requested. "
             f"Requested z are {z}, but computed ones are "
             f"{pool.values}.")
     return np.array(self.current_state[quantity], copy=True)[i_kwarg_z]
Beispiel #22
0
 def initialize_with_params(self):
     # Check that the parameters are the right ones
     differences = are_different_params_lists(self.input_params,
                                              self.expected_params,
                                              name_A="given",
                                              name_B="expected")
     if differences:
         raise LoggedError(
             self.log, "Configuration error in parameters: %r. "
             "If this has happened without you fiddling with the defaults, "
             "please open an issue in GitHub.", differences)
Beispiel #23
0
def is_installed_clik(path, log_and_fail=False, import_it=True):
    log = logging.getLogger("clik")
    clik_path = None
    try:
        clik_path = os.path.join(get_clik_source_folder(path), 'lib/python/site-packages')
    except FileNotFoundError:
        if log_and_fail:
            raise LoggedError(log, "The given folder does not exist: '%s'",
                              clik_path or path)
        return False
    sys.path.insert(0, clik_path)
    try:
        if import_it:
            import clik
        return True
    except:
        print('Failed to import clik')
        if log_and_fail:
            raise LoggedError(log, "Error importing click from: '%s'", clik_path)
        return False
Beispiel #24
0
def read_dnumber(n, d, dtype=float):
    """Reads number as multiples of a given dimension."""
    try:
        if isinstance(n, six.string_types):
            if n[-1].lower() == "d":
                if n.lower() == "d":
                    return d
                return dtype(n[:-1]) * d
            raise ValueError
    except ValueError:
        raise LoggedError(log, "Could not convert '%r' to a number.", n)
    return n
Beispiel #25
0
 def get_requirements(self):
     # Requisites
     if self.use_grid_2d:
         zs = {self.observable_1: np.array([self.redshift]),
               self.observable_2: np.array([self.redshift])}
     elif self.use_grid_3d:
         zs = {self.observable_1: np.array([self.redshift]),
               self.observable_2: np.array([self.redshift]),
               self.observable_3: np.array([self.redshift])
               }
     else:
         zs = {obs: self.data.loc[self.data["observable"] == obs, "z"].values
               for obs in self.data["observable"].unique()}
     theory_reqs = {
         "DV_over_rs": {
             "angular_diameter_distance": {"z": zs.get("DV_over_rs", None)},
             "Hubble": {"z": zs.get("DV_over_rs", None)},
             "rdrag": None},
         "rs_over_DV": {
             "angular_diameter_distance": {"z": zs.get("rs_over_DV", None)},
             "Hubble": {"z": zs.get("rs_over_DV", None)},
             "rdrag": None},
         "DM_over_rs": {
             "angular_diameter_distance": {"z": zs.get("DM_over_rs", None)},
             "rdrag": None},
         "DA_over_rs": {
             "angular_diameter_distance": {"z": zs.get("DA_over_rs", None)},
             "rdrag": None},
         "DH_over_rs": {
             "Hubble": {"z": zs.get("DH_over_rs", None)},
             "rdrag": None},
         "Hz_rs": {
             "Hubble": {"z": zs.get("Hz_rs", None)},
             "rdrag": None},
         "f_sigma8": {
             "fsigma8": {"z": zs.get("f_sigma8", None)},
         },
         "F_AP": {
             "angular_diameter_distance": {"z": zs.get("F_AP", None)},
             "Hubble": {"z": zs.get("F_AP", None)}}}
     obs_used_not_implemented = np.unique([obs for obs in self.data["observable"]
                                           if obs not in theory_reqs])
     if len(obs_used_not_implemented):
         raise LoggedError(
             self.log, "This likelihood refers to observables '%s' that have not been"
                       " implemented yet. Did you mean any of %s? "
                       "If you didn't, please, open an issue in github.",
             obs_used_not_implemented, list(theory_reqs))
     requisites = {}
     if self.has_type:
         for obs in self.data["observable"].unique():
             requisites.update(theory_reqs[obs])
     return requisites
Beispiel #26
0
    def check_no_repeated_input_extra(self):
        """
        Checks that there are no repeated parameters between input and extra.

        Should be called at initialisation, and at the end of every call to must_provide()
        """
        common = set(self.input_params).intersection(self.extra_args)
        if common:
            raise LoggedError(
                self.log, "The following parameters appear both as input parameters and "
                          "as extra arguments: %s. Please, remove one of the definitions "
                          "of each.", common)
Beispiel #27
0
 def _cmb_unit_factor(self, units, T_cmb):
     units_factors = {"1": 1,
                      "muK2": T_cmb * 1.e6,
                      "K2": T_cmb,
                      "FIRASmuK2": 2.7255e6,
                      "FIRASK2": 2.7255
                      }
     try:
         return units_factors[units]
     except KeyError:
         raise LoggedError(self.log, "Units '%s' not recognized. Use one of %s.",
                           units, list(units_factors))
Beispiel #28
0
 def check_force_resume(cls, output, info=None):
     """
     Performs the necessary checks on existing files if resuming or forcing
     (including deleting some output files when forcing).
     """
     if output.is_resuming():
         if mpi.is_main_process():
             raise LoggedError(
                 output.log, "Minimizer does not support resuming. "
                             "If you want to start over, force "
                             "('-f', '--force', 'force: True')")
     super().check_force_resume(output, info=info)
Beispiel #29
0
    def _get_wrapped_functions_evaluation_order(self):
        # get evaluation order for input and derived parameter function
        # and pre-prepare argument dicts

        wrapped_funcs: Tuple[Dict[str, _WrappedFunc],
                             Dict[str, _WrappedFunc]] = ({}, {})
        known = set(chain(self._constant, self._sampled))

        for derived, wrapped_func in zip((False, True), wrapped_funcs):
            if derived:
                inputs = self._derived_funcs.copy()
                input_args = self._derived_args
                known.update(self._output)
                output = self._derived
                dependencies = self._derived_dependencies
            else:
                inputs = self._input_funcs.copy()
                input_args = self._input_args
                output = self._input
                dependencies = self._input_dependencies

            while inputs:
                for p, func in inputs.items():
                    args = input_args[p]
                    if not known.issuperset(args):
                        continue
                    known.add(p)
                    dependencies[p] = set(
                        chain(args,
                              *(dependencies.get(arg, []) for arg in args)))

                    if set(args).issubset(self._constant):
                        # all inputs are constant, so output is constant and precomputed
                        self._constant[p] = \
                            self._call_param_func(p, func,
                                                  {arg: self._constant[arg] for arg in
                                                   args})
                        output[p] = self._constant[p]
                    else:
                        # Store function, argument dict with constants pre-filled,
                        # and unset args as tuple
                        wrapped_func[p] = \
                            (func, {arg: self._constant.get(arg) for arg in args},
                             [arg for arg in args if arg not in self._constant])
                    del inputs[p]
                    break
                else:
                    raise LoggedError(
                        self.log,
                        "Could not resolve arguments for parameters %s. "
                        "Maybe there is a circular dependency between derived "
                        "parameters?", list(inputs))
        return wrapped_funcs
Beispiel #30
0
 def _get_nz(self, cosmo, name, **pars):
     # Get an N(z) for tracer with name `name`
     z = self.bin_properties[name]['z_fid']
     nz = self.bin_properties[name]['nz_fid']
     if self.nz_model == 'NzShift':
         z = z + pars[self.input_params_prefix + '_' + name + '_dz']
         msk = z >= 0
         z = z[msk]
         nz = nz[msk]
     elif self.nz_model != 'NzNone':
         raise LoggedError(self.log, "Unknown Nz model %s" % self.nz_model)
     return (z, nz)