Esempio n. 1
0
 def __init__(self, info, name, timing=None):
     Theory.__init__(self, info, name=name, timing=timing, standalone=False)
     # Store the external function and assign its arguments
     self.external_function = get_external_function(info[_external],
                                                    name=name)
     self._self_arg = "_self"
     argspec = getfullargspec(self.external_function)
     if info.get(_input_params, []):
         setattr(self, _input_params, str_to_list(info.get(_input_params)))
     else:
         ignore_args = [self._self_arg]
         # MARKED FOR DEPRECATION IN v3.0
         ignore_args += ["_derived", "_theory"]
         # END OF DEPRECATION BLOCK
         setattr(self, _input_params,
                 [p for p in argspec.args if p not in ignore_args])
     # MARKED FOR DEPRECATION IN v3.0
     self._derived_through_arg = "_derived" in argspec.args
     # END OF DEPRECATION BLOCK
     if info.get(_output_params, []):
         setattr(self, _output_params,
                 str_to_list(info.get(_output_params)))
     # MARKED FOR DEPRECATION IN v3.0
     elif self._derived_through_arg:
         self.log.warning(
             "The use of a `_derived` argument to deal with derived parameters will be"
             " deprecated in a future version. From now on please list your derived "
             "parameters in a list as the value of %r in the likelihood info (see "
             "documentation) and have your function return a tuple "
             "`(logp, {derived_param_1: value_1, ...})`.", _output_params)
         # BEHAVIOUR TO BE REPLACED BY ERROR:
         derived_kw_index = argspec.args[-len(argspec.defaults):].index(
             "_derived")
         setattr(self, _output_params, argspec.defaults[derived_kw_index])
     # END OF DEPRECATION BLOCK
     else:
         setattr(self, _output_params, [])
     # Required quantities from other components
     self._uses_self_arg = self._self_arg in argspec.args
     if info.get(_requires) and not self._uses_self_arg:
         raise LoggedError(
             self.log,
             "If a likelihood has external requirements, declared under %r, "
             "it needs to accept a keyword argument %r.", _requires,
             self._self_arg)
     # MARKED FOR DEPRECATION IN v3.0
     self._uses_old_theory = "_theory" in argspec.args
     if self._uses_old_theory:
         self.log.warning(
             "The use of a `_theory` argument to deal with requirements will be"
             " deprecated in a future version. From now on please indicate your "
             "requirements as the value of field %r in the likelihood info (see "
             "documentation) and have your function take a parameter `_self`.",
             _requires)
         # BEHAVIOUR TO BE REPLACED BY ERROR:
         info[_requires] = argspec.defaults[
             argspec.args[-len(argspec.defaults):].index("_theory")]
     # END OF DEPRECATION BLOCK
     self._requirements = info.get(_requires, {}) or {}
     self.log.info("Initialized external likelihood.")
Esempio n. 2
0
    def __init__(self, info, name, timing=None):
        Theory.__init__(self, info, name=name, timing=timing, standalone=False)
        # Store the external function and assign its arguments
        self.external_function = get_external_function(info["external"],
                                                       name=name)
        self._self_arg = "_self"
        argspec = getfullargspec(self.external_function)
        self.input_params = str_to_list(self.input_params)
        ignore_args = [self._self_arg]
        if argspec.defaults:
            required_args = argspec.args[:-len(argspec.defaults)]
        else:
            required_args = argspec.args
        self.params = {p: None for p in required_args if p not in ignore_args}
        # MARKED FOR DEPRECATION IN v3.0
        if "_derived" in argspec.args:
            raise LoggedError(
                self.log,
                "The use of a `_derived` argument to deal with derived "
                "parameters has been deprecated. From now on please list your "
                "derived parameters in a list as the value of %r in the "
                "likelihood info (see documentation) and have your function "
                "return a tuple `(logp, {derived_param_1: value_1, ...})`.",
                "output_params")
        # END OF DEPRECATION BLOCK
        if self.output_params:
            self.output_params = str_to_list(self.output_params) or []
        # Required quantities from other components
        self._uses_self_arg = self._self_arg in argspec.args
        if info.get("requires") and not self._uses_self_arg:
            raise LoggedError(
                self.log,
                "If a likelihood has external requirements, declared under %r, "
                "it needs to accept a keyword argument %r.", "requires",
                self._self_arg)
        self._requirements = info.get("requires") or {}
        # MARKED FOR DEPRECATION IN v3.0
        if "_theory" in argspec.args:
            raise LoggedError(
                self.log,
                "The use of a `_theory` argument to deal with requirements has "
                "been deprecated. From now on please indicate your requirements"
                " as the value of field %r in the likelihood info (see "
                "documentation) and have your function take a parameter "
                "`_self`.", "requires")
        # END OF DEPRECATION BLOCK

        self._optional_args = \
            [p for p, val in chain(zip(argspec.args[-len(argspec.defaults):],
                                       argspec.defaults) if argspec.defaults else [],
                                   (argspec.kwonlydefaults or {}).items())
             if p not in ignore_args and
             (isinstance(val, numbers.Number) or val is None)]
        self._args = set(chain(self._optional_args, self.params))
        if argspec.varkw:
            self._args.update(self.input_params)
        self.log.info("Initialized external likelihood.")
Esempio n. 3
0
 def _extract_params(self, set_func):
     args = {}
     params = []
     pars = getfullargspec(set_func)
     for arg, v in zip(pars.args[1:], pars.defaults[1:]):
         if arg in self.extra_args:
             args[arg] = self.extra_args.pop(arg)
         elif isinstance(v, numbers.Number) or v is None:
             params.append(arg)
     return args, params
Esempio n. 4
0
 def _extract_params(self, set_func):
     args = {}
     params = []
     pars = getfullargspec(set_func)
     for arg in pars.args[1:len(pars.args) - len(pars.defaults or [])]:
         params.append(arg)
     if pars.defaults:
         for arg, v in zip(pars.args[len(pars.args) - len(pars.defaults):],
                           pars.defaults):
             if arg in self.extra_args:
                 args[arg] = self.extra_args.pop(arg)
             elif (isinstance(v, numbers.Number)
                   or v is None) and 'version' not in arg:
                 params.append(arg)
     return args, params
Esempio n. 5
0
 def set(self, params_values_dict, state):
     # Prepare parameters to be passed: this is called from the CambTransfers instance
     args = {self.translate_param(p): v for p, v in params_values_dict.items()}
     # Generate and save
     self.log.debug("Setting parameters: %r and %r",
                    dict(args), dict(self.extra_args))
     try:
         if not self._base_params:
             base_args = args.copy()
             base_args.update(self.extra_args)
             # Remove extra args that might
             # cause an error if the associated product is not requested
             if not self.extra_attrs["WantCls"]:
                 for not_needed in getfullargspec(
                         self.camb.CAMBparams.set_for_lmax).args[1:]:
                     base_args.pop(not_needed, None)
             self._reduced_extra_args = self.extra_args.copy()
             params = self.camb.set_params(**base_args)
             # pre-set the parameters that are not varying
             for non_param_func in ['set_classes', 'set_matter_power', 'set_for_lmax']:
                 for fixed_param in getfullargspec(
                         getattr(self.camb.CAMBparams, non_param_func)).args[1:]:
                     if fixed_param in args:
                         raise LoggedError(self.log,
                                           "Trying to sample fixed theory parameter %s",
                                           fixed_param)
                     self._reduced_extra_args.pop(fixed_param, None)
             if self.extra_attrs:
                 self.log.debug("Setting attributes of CAMBparams: %r",
                                self.extra_attrs)
             for attr, value in self.extra_attrs.items():
                 if hasattr(params, attr):
                     setattr(params, attr, value)
                 else:
                     raise LoggedError(
                         self.log,
                         "Some of the attributes to be set manually were not "
                         "recognized: %s=%s", attr, value)
             # Sources
             if getattr(self, "sources", None):
                 self.log.debug("Setting sources: %r", self.sources)
                 sources = self.camb.sources
                 source_windows = []
                 for source, window in self.sources.items():
                     function = window.pop("function", None)
                     if function == "spline":
                         source_windows.append(sources.SplinedSourceWindow(**window))
                     elif function == "gaussian":
                         source_windows.append(sources.GaussianSourceWindow(**window))
                     else:
                         raise LoggedError(self.log,
                                           "Unknown source window function type %r",
                                           function)
                     window["function"] = function
                 params.SourceWindows = source_windows
                 params.SourceTerms.limber_windows = self.limber
             self._base_params = params
         else:
             args.update(self._reduced_extra_args)
         return self.camb.set_params(self._base_params.copy(), **args)
     except self.camb.baseconfig.CAMBParamRangeError:
         if self.stop_at_error:
             raise LoggedError(self.log, "Out of bound parameters: %r",
                               params_values_dict)
         else:
             self.log.debug("Out of bounds parameters. "
                            "Assigning 0 likelihood and going on.")
     except (self.camb.baseconfig.CAMBValueError, self.camb.baseconfig.CAMBError):
         if self.stop_at_error:
             self.log.error(
                 "Error setting parameters (see traceback below)! "
                 "Parameters sent to CAMB: %r and %r.\n"
                 "To ignore this kind of error, make 'stop_at_error: False'.",
                 dict(state["params"]), dict(self.extra_args))
             raise
     except self.camb.baseconfig.CAMBUnknownArgumentError as e:
         raise LoggedError(
             self.log,
             "Some of the parameters passed to CAMB were not recognized: %s" % str(e))
     return False
Esempio n. 6
0
    def __init__(self,
                 parameterization: Parameterization,
                 info_prior: Optional[PriorsDict] = None):
        """
        Initializes the prior and reference pdf's from the input information.
        """
        self.set_logger()
        self._parameterization = parameterization
        sampled_params_info = parameterization.sampled_params_info()
        # pdf: a list of independent components
        # in principle, separable: one per parameter
        self.params = []
        self.pdf = []
        self.ref_pdf = []
        self._ref_is_pointlike = True
        self._bounds = np.zeros((len(sampled_params_info), 2))
        for i, p in enumerate(sampled_params_info):
            self.params += [p]
            prior = sampled_params_info[p].get("prior")
            self.pdf += [get_scipy_1d_pdf({p: prior})]
            fast_logpdf = fast_logpdfs.get(self.pdf[-1].dist.name)
            if fast_logpdf:
                self.pdf[-1].logpdf = MethodType(fast_logpdf, self.pdf[-1])
            # Get the reference (1d) pdf
            ref = sampled_params_info[p].get("ref")
            # Cases: number, pdf (something, but not a number), nothing
            if isinstance(ref, Sequence) and len(ref) == 2 and all(
                    isinstance(n, numbers.Number) for n in ref):
                ref = {"dist": "norm", "loc": ref[0], "scale": ref[1]}
            if isinstance(ref, numbers.Real):
                self.ref_pdf += [float(ref)]
            elif isinstance(ref, Mapping):
                self.ref_pdf += [get_scipy_1d_pdf({p: ref})]
                self._ref_is_pointlike = False
            elif ref is None:
                self.ref_pdf += [np.nan]
                self._ref_is_pointlike = False
            else:
                raise LoggedError(
                    self.log,
                    "'ref' for starting position should be None or a number"
                    ", a list of two numbers for normal mean and deviation,"
                    "or a dict with parameters for a scipy distribution.")

            self._bounds[i] = [-np.inf, np.inf]
            try:
                self._bounds[i] = self.pdf[-1].interval(1)
            except AttributeError:
                raise LoggedError(
                    self.log, "No bounds defined for parameter '%s' "
                    "(maybe not a scipy 1d pdf).", p)
        self._uniform_indices = np.array([
            i for i, pdf in enumerate(self.pdf) if pdf.dist.name == 'uniform'
        ],
                                         dtype=int)
        self._non_uniform_indices = np.array([
            i for i in range(len(self.pdf)) if i not in self._uniform_indices
        ],
                                             dtype=int)
        self._non_uniform_logpdf = [
            self.pdf[i].logpdf for i in self._non_uniform_indices
        ]
        self._upper_limits = self._bounds[:, 1].copy()
        self._lower_limits = self._bounds[:, 0].copy()
        self._uniform_logp = -np.sum(
            np.log(self._upper_limits[self._uniform_indices] -
                   self._lower_limits[self._uniform_indices]))

        # Process the external prior(s):
        self.external = {}
        self.external_dependence = set()
        info_prior = info_prior or {}
        for name in info_prior:
            if name == prior_1d_name:
                raise LoggedError(
                    self.log, "The name '%s' is a reserved prior name. "
                    "Please use a different one.", prior_1d_name)
            self.log.debug("Loading external prior '%s' from: '%s'", name,
                           info_prior[name])
            logp = get_external_function(info_prior[name], name=name)

            argspec = getfullargspec(logp)
            known = set(parameterization.input_params())
            params = [p for p in argspec.args if p in known]
            params_without_default = set(
                argspec.args[:(len(argspec.args) -
                               len(argspec.defaults or []))])
            unknown = params_without_default - known
            if unknown:
                if unknown.intersection(parameterization.derived_params()):
                    err = (
                        "External prior '%s' has arguments %s that are output derived "
                        "parameters, Priors must be functions of input parameters. "
                        "Use a separate 'likelihood' for the prior if needed.")
                else:
                    err = (
                        "Some of the arguments of the external prior '%s' cannot be "
                        "found and don't have a default value either: %s")
                raise LoggedError(self.log, err, name, list(unknown))
            self.external_dependence.update(params)
            self.external[name] = ExternalPrior(logp=logp, params=params)
            self.mpi_warning(
                "External prior '%s' loaded. "
                "Mind that it might not be normalized!", name)

        parameterization.check_dropped(self.external_dependence)
Esempio n. 7
0
def body_of_test(info_logpdf, kind, tmpdir, derived=False, manual=False):
    # For pytest's handling of tmp dirs
    if hasattr(tmpdir, "dirpath"):
        tmpdir = tmpdir.dirname
    prefix = os.path.join(tmpdir, "%d" % round(1e8 * random())) + os.sep
    if os.path.exists(prefix):
        shutil.rmtree(prefix)
    # build updated info
    info = {
        _output_prefix: prefix,
        _params: {
            "x": {
                _prior: {
                    "min": 0,
                    "max": 1
                },
                "proposal": 0.05
            },
            "y": {
                _prior: {
                    "min": -1,
                    "max": 1
                },
                "proposal": 0.05
            }
        },
        kinds.sampler: {
            "mcmc": {
                "max_samples": (10 if not manual else 5000),
                "learn_proposal": False
            }
        }
    }
    if derived:
        info[_params].update({
            "r": {
                "min": 0,
                "max": 1
            },
            "theta": {
                "min": -0.5,
                "max": 0.5
            }
        })
    # Complete according to kind
    if kind == _prior:
        info.update({_prior: info_logpdf, kinds.likelihood: {"one": None}})
    elif kind == kinds.likelihood:
        info.update({kinds.likelihood: info_logpdf})
    else:
        raise ValueError("Kind of test not known.")
    # If there is an ext function that is not a string, don't write output!
    stringy = {k: v for k, v in info_logpdf.items() if isinstance(v, str)}
    if stringy != info_logpdf:
        info.pop(_output_prefix)
    # Run
    updated_info, sampler = run(info)
    products = sampler.products()
    # Test values
    logprior_base = -np.log(
        (info[_params]["x"][_prior]["max"] - info[_params]["x"][_prior]["min"])
        * (info[_params]["y"][_prior]["max"] -
           info[_params]["y"][_prior]["min"]))
    logps = {
        name: logpdf(
            **{
                arg: products["sample"][arg].values
                for arg in getfullargspec(logpdf)[0]
            })
        for name, logpdf in {
            "half_ring": half_ring_func,
            "gaussian_y": gaussian_func
        }.items()
    }
    # Test #1: values of logpdf's
    if kind == _prior:
        columns_priors = [
            c for c in products["sample"].data.columns
            if c.startswith("minuslogprior")
        ]
        assert np.allclose(
            products["sample"][columns_priors[0]].values,
            np.sum(products["sample"][columns_priors[1:]].values, axis=-1)), (
                "The single prior values do not add up to the total one.")
        assert np.allclose(
            logprior_base + sum(logps[p] for p in info_logpdf),
            -products["sample"]["minuslogprior"].values), (
                "The value of the total prior is not reproduced correctly.")
    elif kind == kinds.likelihood:
        for lik in info[kinds.likelihood]:
            assert np.allclose(
                -2 * logps[lik], products["sample"][_get_chi2_name(lik)].values
            ), ("The value of the likelihood '%s' is not reproduced correctly."
                % lik)
    assert np.allclose(
        logprior_base + sum(logps[p] for p in info_logpdf),
        -products["sample"]["minuslogpost"].values), (
            "The value of the posterior is not reproduced correctly.")
    # Test derived parameters, if present -- for now just for "r"
    if derived:
        derived_values = {
            param:
            func(**{arg: products["sample"][arg].values
                    for arg in ["x", "y"]})
            for param, func in derived_funcs.items()
        }
        assert all(
            np.allclose(v, products["sample"][p].values)
            for p, v in derived_values.items()
        ), ("The value of the derived parameters is not reproduced correctly.")
    # Test updated info -- scripted
    if kind == _prior:
        assert info[_prior] == updated_info[_prior], (
            "The prior information has not been updated correctly.")
    elif kind == kinds.likelihood:
        # Transform the likelihood info to the "external" convention and add defaults
        info_likelihood = deepcopy(info[kinds.likelihood])
        for lik, value in list(info_likelihood.items()):
            if not hasattr(value, "get"):
                info_likelihood[lik] = {_external: value}
            info_likelihood[lik].update({
                k: v
                for k, v in Likelihood.get_defaults().items()
                if k not in info_likelihood[lik]
            })
            for k in [_input_params, _output_params]:
                info_likelihood[lik].pop(k, None)
                updated_info[kinds.likelihood][lik].pop(k)
        assert info_likelihood == updated_info[kinds.likelihood], (
            "The likelihood information has not been updated correctly\n %r vs %r"
            % (info_likelihood, updated_info[kinds.likelihood]))
    # Test updated info -- yaml
    # For now, only if ALL external pdfs are given as strings,
    # since the YAML load fails otherwise
    if stringy == info_logpdf:
        updated_output_file = os.path.join(prefix, _updated_suffix + ".yaml")
        with open(updated_output_file) as updated:
            updated_yaml = yaml_load("".join(updated.readlines()))
        for k, v in stringy.items():
            to_test = updated_yaml[kind][k]
            if kind == kinds.likelihood:
                to_test = to_test[_external]
            assert to_test == info_logpdf[k], (
                "The updated external pdf info has not been written correctly."
            )
Esempio n. 8
0
    def __init__(self, parameterization, info_prior=None):
        """
        Initializes the prior and reference pdf's from the input information.
        """
        self.set_logger()
        constant_params_info = parameterization.constant_params()
        sampled_params_info = parameterization.sampled_params_info()
        if not sampled_params_info:
            self.mpi_warning("No sampled parameters requested! "
                             "This will fail for non-mock samplers.")
        # pdf: a list of independent components
        # in principle, separable: one per parameter
        self.params = []
        self.pdf = []
        self.ref_pdf = []
        self._ref_is_pointlike = True
        self._bounds = np.zeros((len(sampled_params_info), 2))
        for i, p in enumerate(sampled_params_info):
            self.params += [p]
            prior = sampled_params_info[p].get(_prior)
            self.pdf += [get_scipy_1d_pdf({p: prior})]
            fast_logpdf = fast_logpdfs.get(self.pdf[-1].dist.name)
            if fast_logpdf:
                self.pdf[-1].logpdf = MethodType(fast_logpdf, self.pdf[-1])
            # Get the reference (1d) pdf
            ref = sampled_params_info[p].get(partag.ref)
            # Cases: number, pdf (something, but not a number), nothing
            if isinstance(ref, numbers.Real):
                self.ref_pdf += [float(ref)]
            elif ref is not None:
                self.ref_pdf += [get_scipy_1d_pdf({p: ref})]
                self._ref_is_pointlike = False
            else:
                self.ref_pdf += [np.nan]
                self._ref_is_pointlike = False
            self._bounds[i] = [-np.inf, np.inf]
            try:
                self._bounds[i] = self.pdf[-1].interval(1)
            except AttributeError:
                raise LoggedError(
                    self.log, "No bounds defined for parameter '%s' "
                    "(maybe not a scipy 1d pdf).", p)
        self._uniform_indices = np.array([
            i for i, pdf in enumerate(self.pdf) if pdf.dist.name == 'uniform'
        ],
                                         dtype=int)
        self._non_uniform_indices = np.array([
            i for i in range(len(self.pdf)) if i not in self._uniform_indices
        ],
                                             dtype=int)
        self._non_uniform_logpdf = [
            self.pdf[i].logpdf for i in self._non_uniform_indices
        ]
        self._upper_limits = self._bounds[:, 1].copy()
        self._lower_limits = self._bounds[:, 0].copy()
        self._uniform_logp = -np.sum(
            np.log(self._upper_limits[self._uniform_indices] -
                   self._lower_limits[self._uniform_indices]))

        # Process the external prior(s):
        self.external = {}
        for name in (info_prior if info_prior else {}):
            if name == _prior_1d_name:
                raise LoggedError(
                    self.log, "The name '%s' is a reserved prior name. "
                    "Please use a different one.", _prior_1d_name)
            self.log.debug("Loading external prior '%s' from: '%s'", name,
                           info_prior[name])
            opts = {"logp": get_external_function(info_prior[name], name=name)}
            self.external[name] = opts
            opts["argspec"] = (getfullargspec(opts["logp"]))
            opts["params"] = {
                p: list(sampled_params_info).index(p)
                for p in opts["argspec"].args if p in sampled_params_info
            }
            opts["constant_params"] = {
                p: constant_params_info[p]
                for p in opts["argspec"].args if p in constant_params_info
            }
            if (not (len(opts["params"]) + len(opts["constant_params"]))):
                raise LoggedError(
                    self.log,
                    "None of the arguments of the external prior '%s' "
                    "are known *fixed* or *sampled* parameters. "
                    "This prior recognizes: %r", name, opts["argspec"].args)
            params_without_default = opts["argspec"].args[:(
                len(opts["argspec"].args) -
                len(opts["argspec"].defaults or []))]
            if not all((p in opts["params"] or p in opts["constant_params"])
                       for p in params_without_default):
                raise LoggedError(
                    self.log,
                    "Some of the arguments of the external prior '%s' cannot "
                    "be found and don't have a default value either: %s", name,
                    list(
                        set(params_without_default).difference(
                            opts["params"]).difference(
                                opts["constant_params"])))
            self.mpi_warning(
                "External prior '%s' loaded. "
                "Mind that it might not be normalized!", name)
Esempio n. 9
0
    def __init__(self, info_params, allow_renames=True, ignore_unused_sampled=False):
        self.set_logger(lowercase=True)
        self.allow_renames = allow_renames
        # First, we load the parameters,
        # not caring about whether they are understood by any likelihood.
        # `input` contains the parameters (expected to be) understood by the likelihood,
        #   with its fixed value, its fixing function, or None if their value is given
        #   directly by the sampler.
        self._infos = {}
        self._input = {}
        self._input_funcs = {}
        self._input_args = {}
        self._output = {}
        self._constant = {}
        self._sampled = {}
        self._sampled_renames = {}
        self._derived = {}
        self._derived_funcs = {}
        self._derived_args = {}
        # Notice here that expand_info_param *always* adds a partag.derived:True tag
        # to infos without _prior or partag.value, and a partag.value field
        # to fixed params
        for p, info in info_params.items():
            self._infos[p] = deepcopy_where_possible(info)
            if is_fixed_param(info):
                if isinstance(info[partag.value], Number):
                    self._constant[p] = info[partag.value]
                    if not info.get(partag.drop, False):
                        self._input[p] = self._constant[p]
                else:
                    self._input[p] = None
                    self._input_funcs[p] = get_external_function(info[partag.value])
                    self._input_args[p] = getfullargspec(self._input_funcs[p]).args
            if is_sampled_param(info):
                self._sampled[p] = None
                if not info.get(partag.drop, False):
                    self._input[p] = None
                self._sampled_renames[p] = (
                    (lambda x: [x] if isinstance(x, str) else x)
                    (info.get(partag.renames, [])))
            if is_derived_param(info):
                self._derived[p] = deepcopy_where_possible(info)
                # Dynamical parameters whose value we want to save
                if info[partag.derived] is True and is_fixed_param(info):
                    info[partag.derived] = "lambda %s: %s" % (p, p)
                if info[partag.derived] is True:
                    self._output[p] = None
                else:
                    self._derived_funcs[p] = get_external_function(info[partag.derived])
                    self._derived_args[p] = getfullargspec(self._derived_funcs[p]).args
        # Check that the sampled and derived params are all valid python variable names
        for p in chain(self._sampled, self._derived):
            if not is_valid_variable_name(p):
                is_in = p in self._sampled
                eg_in = "  p_prime:\n    prior: ...\n  %s: 'lambda p_prime: p_prime'\n" % p
                eg_out = "  p_prime: 'lambda %s: %s'\n" % (p, p)
                raise LoggedError(
                    self.log, "Parameter name '%s' is not a valid Python variable name "
                              "(it needs to start with a letter or '_').\n"
                              "If this is an %s parameter of a likelihood or theory, "
                              "whose name you cannot change,%s define an associated "
                              "%s one with a valid name 'p_prime' as: \n\n%s",
                    p, "input" if is_in else "output",
                    "" if is_in else " remove it and",
                    "sampled" if is_in else "derived",
                    eg_in if is_in else eg_out)
        # Assume that the *un*known function arguments are likelihood/theory
        # output parameters
        for arg in (set(chain(*self._input_args.values()))
                            .union(chain(*self._derived_args.values()))
                    - set(self._constant) - set(self._input)
                    - set(self._sampled) - set(self._derived)):
            self._output[arg] = None

        # Useful sets: directly-sampled input parameters and directly "output-ed" derived
        self._directly_sampled = [p for p in self._input if p in self._sampled]
        self._directly_output = [p for p in self._derived if p in self._output]
        # Useful mapping: input params that vary if each sample is varied
        self._sampled_input_dependence = {s: [i for i in self._input
                                              if s in self._input_args.get(i, {})]
                                          for s in self._sampled}
        # From here on, some error control.
        dropped_but_never_used = (
            set(p for p, v in self._sampled_input_dependence.items() if not v)
                .difference(set(self._directly_sampled)))
        if dropped_but_never_used and not ignore_unused_sampled:
            raise LoggedError(
                self.log,
                "Parameters %r are sampled but not passed to a likelihood or theory "
                "code, and never used as arguments for any parameter functions. "
                "Check that you are not using the '%s' tag unintentionally.",
                list(dropped_but_never_used), partag.drop)
        # input params depend on input and sampled only, never on output/derived
        all_input_arguments = set(chain(*self._input_args.values()))
        bad_input_dependencies = all_input_arguments.difference(
            set(self.input_params()).union(set(self.sampled_params())).union(
                set(self.constant_params())))
        if bad_input_dependencies:
            raise LoggedError(
                self.log,
                "Input parameters defined as functions can only depend on other "
                "input parameters that are not defined as functions. "
                "In particular, an input parameter cannot depend on %r."
                "Use an explicit Theory calculator for more complex dependencies.",
                list(bad_input_dependencies))
        self._wrapped_input_funcs, self._wrapped_derived_funcs = \
            self._get_wrapped_functions_evaluation_order()
        # warn if repeated labels
        labels_inv_repeated = invert_dict(self.labels())
        for k in list(labels_inv_repeated):
            if len(labels_inv_repeated[k]) == 1:
                labels_inv_repeated.pop(k)
        if labels_inv_repeated:
            self.log.warn("There are repeated parameter labels: %r", labels_inv_repeated)
Esempio n. 10
0
    def __init__(self,
                 parameterization: Parameterization,
                 info_prior: Optional[PriorsDict] = None):
        """
        Initializes the prior and reference pdf's from the input information.
        """
        self.set_logger()
        self._parameterization = parameterization
        sampled_params_info = parameterization.sampled_params_info()
        # pdf: a list of independent components
        # in principle, separable: one per parameter
        self.params = []
        self.pdf = []
        self._bounds = np.zeros((len(sampled_params_info), 2))
        for i, p in enumerate(sampled_params_info):
            self.params += [p]
            prior = sampled_params_info[p].get("prior")
            self.pdf += [get_scipy_1d_pdf({p: prior})]
            fast_logpdf = fast_logpdfs.get(self.pdf[-1].dist.name)
            if fast_logpdf:
                self.pdf[-1].logpdf = MethodType(fast_logpdf, self.pdf[-1])
            self._bounds[i] = [-np.inf, np.inf]
            try:
                self._bounds[i] = self.pdf[-1].interval(1)
            except AttributeError:
                raise LoggedError(
                    self.log, "No bounds defined for parameter '%s' "
                    "(maybe not a scipy 1d pdf).", p)
        self._uniform_indices = np.array([
            i for i, pdf in enumerate(self.pdf) if pdf.dist.name == 'uniform'
        ],
                                         dtype=int)
        self._non_uniform_indices = np.array([
            i for i in range(len(self.pdf)) if i not in self._uniform_indices
        ],
                                             dtype=int)
        self._non_uniform_logpdf = [
            self.pdf[i].logpdf for i in self._non_uniform_indices
        ]
        self._upper_limits = self._bounds[:, 1].copy()
        self._lower_limits = self._bounds[:, 0].copy()
        self._uniform_logp = -np.sum(
            np.log(self._upper_limits[self._uniform_indices] -
                   self._lower_limits[self._uniform_indices]))
        # Set the reference pdf's
        self.set_reference(
            {p: v.get("ref")
             for p, v in sampled_params_info.items()})
        # Process the external prior(s):
        self.external = {}
        self.external_dependence = set()
        info_prior = info_prior or {}
        for name in info_prior:
            if name == prior_1d_name:
                raise LoggedError(
                    self.log, "The name '%s' is a reserved prior name. "
                    "Please use a different one.", prior_1d_name)
            self.log.debug("Loading external prior '%s' from: '%s'", name,
                           info_prior[name])
            logp = get_external_function(info_prior[name], name=name)
            argspec = getfullargspec(logp)
            known = set(parameterization.input_params())
            params = [p for p in argspec.args if p in known]
            params_without_default = set(
                argspec.args[:(len(argspec.args) -
                               len(argspec.defaults or []))])
            unknown = params_without_default - known
            if unknown:
                if unknown.intersection(parameterization.derived_params()):
                    err = (
                        "External prior '%s' has arguments %s that are output derived "
                        "parameters, Priors must be functions of input parameters. "
                        "Use a separate 'likelihood' for the prior if needed.")
                else:
                    err = (
                        "Some of the arguments of the external prior '%s' cannot be "
                        "found and don't have a default value either: %s")
                raise LoggedError(self.log, err, name, list(unknown))
            self.external_dependence.update(params)
            self.external[name] = ExternalPrior(logp=logp, params=params)
            self.mpi_warning(
                "External prior '%s' loaded. "
                "Mind that it might not be normalized!", name)

        parameterization.check_dropped(self.external_dependence)
Esempio n. 11
0
    def __init__(self,
                 info_params: Union[ParamsDict, ExpandedParamsDict],
                 allow_renames=True,
                 ignore_unused_sampled=False):
        self.set_logger()
        self.allow_renames = allow_renames
        # First, we load the parameters,
        # not caring about whether they are understood by any likelihood.
        # `input` contains the parameters (expected to be) understood by the likelihood,
        #   with its fixed value, its fixing function, or None if their value is given
        #   directly by the sampler.
        self._infos = {}
        self._input: ParamValuesDict = {}
        self._input_funcs = {}
        self._input_args = {}
        self._input_dependencies: Dict[str, Set[str]] = {}
        self._dropped: Set[str] = set()
        self._output: ParamValuesDict = {}
        self._constant: ParamValuesDict = {}
        self._sampled: ParamValuesDict = {}
        self._sampled_renames: Dict[str, List[str]] = {}
        self._derived: ParamValuesDict = {}
        self._derived_inputs = []
        self._derived_funcs = {}
        self._derived_args = {}
        self._derived_dependencies: Dict[str, Set[str]] = {}
        # Notice here that expand_info_param *always* adds a "derived":True tag
        # to infos without "prior" or "value", and a "value" field
        # to fixed params
        for p, info in info_params.items():
            if isinstance(info, Mapping) and not set(info).issubset(partags):
                raise LoggedError(self.log,
                                  "Parameter '%s' has unknown options %s", p,
                                  set(info).difference(partags))
            info = expand_info_param(info)
            self._infos[p] = info
            if is_fixed_or_function_param(info):
                if isinstance(info["value"], Real):
                    self._constant[p] = float(info["value"])
                    self._input[p] = self._constant[p]
                    if info.get("drop"):
                        self._dropped.add(p)
                else:
                    self._input[p] = np.nan
                    self._input_funcs[p] = get_external_function(info["value"])
                    self._input_args[p] = getfullargspec(
                        self._input_funcs[p]).args
            if is_sampled_param(info):
                self._sampled[p] = np.nan
                self._input[p] = np.nan
                if info.get("drop"):
                    self._dropped.add(p)
                self._sampled_renames[p] = str_to_list(
                    info.get("renames") or [])
            if is_derived_param(info):
                self._derived[p] = np.nan
                # Dynamical parameters whose value we want to save
                if info["derived"] is True and is_fixed_or_function_param(
                        info):
                    # parameters that are already known or computed by input funcs
                    self._derived_inputs.append(p)
                elif info["derived"] is True:
                    self._output[p] = np.nan
                else:
                    self._derived_funcs[p] = get_external_function(
                        info["derived"])
                    self._derived_args[p] = getfullargspec(
                        self._derived_funcs[p]).args
        # Check that the sampled and derived params are all valid python variable names
        for p in chain(self._sampled, self._derived):
            if not is_valid_variable_name(p):
                is_in = p in self._sampled
                eg_in = "  p_prime:\n    prior: ...\n  %s: " \
                        "'lambda p_prime: p_prime'\n" % p
                eg_out = "  p_prime: 'lambda %s: %s'\n" % (p, p)
                raise LoggedError(
                    self.log,
                    "Parameter name '%s' is not a valid Python variable name "
                    "(it needs to start with a letter or '_').\n"
                    "If this is an %s parameter of a likelihood or theory, "
                    "whose name you cannot change,%s define an associated "
                    "%s one with a valid name 'p_prime' as: \n\n%s", p,
                    "input" if is_in else "output",
                    "" if is_in else " remove it and",
                    "sampled" if is_in else "derived",
                    eg_in if is_in else eg_out)

        # input params depend on input and sampled only,
        # never on output/derived unless constant
        known_input = set(self._input)
        all_input_arguments = set(chain(*self._input_args.values()))
        bad_input_dependencies = all_input_arguments - known_input
        if bad_input_dependencies:
            raise LoggedError(
                self.log,
                "Input parameters defined as functions can only depend on other "
                "input parameters. In particular, an input parameter cannot depend on %r."
                " Use an explicit Theory calculator for more complex dependencies.\n"
                "If you intended to define a derived output parameter use derived: "
                "instead of value:", list(bad_input_dependencies))

        # Assume that the *un*known function arguments are likelihood/theory
        # output parameters
        for arg in (all_input_arguments.union(*self._derived_args.values()).
                    difference(known_input).difference(self._derived)):
            self._output[arg] = np.nan

        # Useful set: directly "output-ed" derived
        self._directly_output = [p for p in self._derived if p in self._output]

        self._wrapped_input_funcs, self._wrapped_derived_funcs = \
            self._get_wrapped_functions_evaluation_order()

        # Useful mapping: input params that vary if each sample is varied
        self._sampled_input_dependence = {
            s: [
                i for i in self._input
                if s in self._input_dependencies.get(i, {})
            ]
            for s in self._sampled
        }
        # From here on, some error control.
        # Only actually raise error after checking if used by prior.
        if not ignore_unused_sampled:
            self._dropped_not_directly_used = self._dropped.intersection(
                p for p, v in self._sampled_input_dependence.items() if not v)
        else:
            self._dropped_not_directly_used = set()

        # warn if repeated labels
        labels_inv_repeated = invert_dict(self.labels())
        labels_inv_repeated = {
            k: v
            for k, v in labels_inv_repeated.items() if len(v) > 1
        }
        if labels_inv_repeated:
            self.log.warning("There are repeated parameter labels: %r",
                             labels_inv_repeated)