def __init__(self, info, name, timing=None): Theory.__init__(self, info, name=name, timing=timing, standalone=False) # Store the external function and assign its arguments self.external_function = get_external_function(info[_external], name=name) self._self_arg = "_self" argspec = getfullargspec(self.external_function) if info.get(_input_params, []): setattr(self, _input_params, str_to_list(info.get(_input_params))) else: ignore_args = [self._self_arg] # MARKED FOR DEPRECATION IN v3.0 ignore_args += ["_derived", "_theory"] # END OF DEPRECATION BLOCK setattr(self, _input_params, [p for p in argspec.args if p not in ignore_args]) # MARKED FOR DEPRECATION IN v3.0 self._derived_through_arg = "_derived" in argspec.args # END OF DEPRECATION BLOCK if info.get(_output_params, []): setattr(self, _output_params, str_to_list(info.get(_output_params))) # MARKED FOR DEPRECATION IN v3.0 elif self._derived_through_arg: self.log.warning( "The use of a `_derived` argument to deal with derived parameters will be" " deprecated in a future version. From now on please list your derived " "parameters in a list as the value of %r in the likelihood info (see " "documentation) and have your function return a tuple " "`(logp, {derived_param_1: value_1, ...})`.", _output_params) # BEHAVIOUR TO BE REPLACED BY ERROR: derived_kw_index = argspec.args[-len(argspec.defaults):].index( "_derived") setattr(self, _output_params, argspec.defaults[derived_kw_index]) # END OF DEPRECATION BLOCK else: setattr(self, _output_params, []) # Required quantities from other components self._uses_self_arg = self._self_arg in argspec.args if info.get(_requires) and not self._uses_self_arg: raise LoggedError( self.log, "If a likelihood has external requirements, declared under %r, " "it needs to accept a keyword argument %r.", _requires, self._self_arg) # MARKED FOR DEPRECATION IN v3.0 self._uses_old_theory = "_theory" in argspec.args if self._uses_old_theory: self.log.warning( "The use of a `_theory` argument to deal with requirements will be" " deprecated in a future version. From now on please indicate your " "requirements as the value of field %r in the likelihood info (see " "documentation) and have your function take a parameter `_self`.", _requires) # BEHAVIOUR TO BE REPLACED BY ERROR: info[_requires] = argspec.defaults[ argspec.args[-len(argspec.defaults):].index("_theory")] # END OF DEPRECATION BLOCK self._requirements = info.get(_requires, {}) or {} self.log.info("Initialized external likelihood.")
def get_best_covmat_ext(packages_path, params_info, likelihoods_info, random_state, cached=True) -> Optional[dict]: """ Actual covmat finder used by `get_best_covmat`. Call directly for more control on the parameters used. Returns the same dict as `get_best_covmat`, except for the covariance matrix itself. """ global _loaded_covmats_database covmats_database = ( _loaded_covmats_database or get_covmat_database(packages_path, cached=cached)) _loaded_covmats_database = covmats_database # Prepare params and likes aliases params_renames = set(chain(*[ [p] + str_to_list(info.get("renames", [])) for p, info in params_info.items()])) likes_renames = set(chain(*[[like] + str_to_list((info or {}).get("aliases", [])) for like, info in likelihoods_info.items()])) delimiters = r"[_\.]" likes_regexps = [re.compile(delimiters + re.escape(_like) + delimiters) for _like in likes_renames] # Match number of params score_params = ( lambda covmat: len(set(covmat["params"]).intersection(params_renames))) best_p = get_best_score(covmats_database, score_params) if not best_p: log.warning( "No covariance matrix found including at least one of the given parameters") return None # Match likelihood names / keywords # No debug print here: way too many! score_likes = ( lambda covmat: len([0 for r in likes_regexps if r.search(covmat["name"])])) best_p_l = get_best_score(best_p, score_likes) if is_debug(log): log.debug("Subset based on params + likes:\n - " + "\n - ".join([b["name"] for b in best_p_l])) # Finally, in case there is more than one, select shortest #params and name (simpler!) # #params first, to avoid extended models with shorter covmat name def score_simpler_params(covmat): return -len(covmat["params"]) best_p_l_sp = get_best_score(best_p_l, score_simpler_params) if is_debug(log): log.debug("Subset based on params + likes + fewest params:\n - " + "\n - ".join([b["name"] for b in best_p_l_sp])) score_simpler_name = ( lambda covmat: -len(covmat["name"].replace("_", " ").replace("-", " ").split())) best_p_l_sp_sn = get_best_score(best_p_l_sp, score_simpler_name) if is_debug(log): log.debug("Subset based on params + likes + fewest params + shortest name:\n - " + "\n - ".join([b["name"] for b in best_p_l_sp_sn])) # if there is more than one (unlikely), just pick one at random if len(best_p_l_sp_sn) > 1: log.warning("WARNING: >1 possible best covmats: %r", [b["name"] for b in best_p_l_sp_sn]) random_state = np.random.default_rng(random_state) return best_p_l_sp_sn[random_state.choice(range(len(best_p_l_sp_sn)))].copy()
def __init__(self, info, name, timing=None): Theory.__init__(self, info, name=name, timing=timing, standalone=False) # Store the external function and assign its arguments self.external_function = get_external_function(info["external"], name=name) self._self_arg = "_self" argspec = getfullargspec(self.external_function) self.input_params = str_to_list(self.input_params) ignore_args = [self._self_arg] if argspec.defaults: required_args = argspec.args[:-len(argspec.defaults)] else: required_args = argspec.args self.params = {p: None for p in required_args if p not in ignore_args} # MARKED FOR DEPRECATION IN v3.0 if "_derived" in argspec.args: raise LoggedError( self.log, "The use of a `_derived` argument to deal with derived " "parameters has been deprecated. From now on please list your " "derived parameters in a list as the value of %r in the " "likelihood info (see documentation) and have your function " "return a tuple `(logp, {derived_param_1: value_1, ...})`.", "output_params") # END OF DEPRECATION BLOCK if self.output_params: self.output_params = str_to_list(self.output_params) or [] # Required quantities from other components self._uses_self_arg = self._self_arg in argspec.args if info.get("requires") and not self._uses_self_arg: raise LoggedError( self.log, "If a likelihood has external requirements, declared under %r, " "it needs to accept a keyword argument %r.", "requires", self._self_arg) self._requirements = info.get("requires") or {} # MARKED FOR DEPRECATION IN v3.0 if "_theory" in argspec.args: raise LoggedError( self.log, "The use of a `_theory` argument to deal with requirements has " "been deprecated. From now on please indicate your requirements" " as the value of field %r in the likelihood info (see " "documentation) and have your function take a parameter " "`_self`.", "requires") # END OF DEPRECATION BLOCK self._optional_args = \ [p for p, val in chain(zip(argspec.args[-len(argspec.defaults):], argspec.defaults) if argspec.defaults else [], (argspec.kwonlydefaults or {}).items()) if p not in ignore_args and (isinstance(val, numbers.Number) or val is None)] self._args = set(chain(self._optional_args, self.params)) if argspec.varkw: self._args.update(self.input_params) self.log.info("Initialized external likelihood.")
def __init__(self, info=empty_dict, name=None, timing=None, packages_path=None, initialize=True, standalone=True): self.delay = 0 super().__init__(info, name=name, timing=timing, packages_path=packages_path, initialize=initialize, standalone=standalone) # Make sure `types` is a list of data types, for aggregated chi2 self.type = str_to_list(getattr(self, "type", []) or [])
def all_types(self): if not hasattr(self, "_all_types"): self._all_types = set( chain(*[ str_to_list(getattr(self[like], "type", []) or []) for like in self ])) return self._all_types
def get_requirements(self): """ Get a dictionary of requirements that are always needed (e.g. must be calculated by a another component or provided as input parameters). :return: dictionary of requirements (or iterable of requirement names if no optional parameters are needed) """ return dict.fromkeys(str_to_list(getattr(self, _requires, [])))
def initialize(self): """Importing CAMB from the correct path, if given.""" # Allow global import if no direct path specification allow_global = not self.path if not self.path and self.packages_path: self.path = self.get_path(self.packages_path) self.camb = self.is_installed(path=self.path, allow_global=allow_global, check=False) if not self.camb: raise NotInstalledError( self.log, "Could not find CAMB. Check error message above.") super().initialize() self.extra_attrs = { "Want_CMB": False, "Want_cl_2D_array": False, 'WantCls': False } # Derived parameters that may not have been requested, but will be necessary later self.derived_extra = [] # Some default settings self.needs_perts = False self.limber = False self.non_linear_sources = False self.non_linear_pk = False self._base_params = None self._needs_lensing_cross = False self._sigmaR_z_indices = {} if self.external_primordial_pk: self.extra_args['initial_power_model'] \ = self.camb.initialpower.SplinedInitialPower self.initial_power_args, self.power_params = {}, [] else: power_spectrum = self.camb.CAMBparams.make_class_named( self.extra_args.get('initial_power_model', self.camb.initialpower.InitialPowerLaw), self.camb.initialpower.InitialPower) self.initial_power_args, self.power_params = \ self._extract_params(power_spectrum.set_params) nonlin = self.camb.CAMBparams.make_class_named( self.extra_args.get('non_linear_model', self.camb.nonlinear.Halofit), self.camb.nonlinear.NonLinearModel) self.nonlin_args, self.nonlin_params = self._extract_params( nonlin.set_params) self.requires = str_to_list(getattr(self, "requires", [])) self._transfer_requires = [ p for p in self.requires if p not in self.get_can_support_params() ] self.requires = [ p for p in self.requires if p not in self._transfer_requires ] self.log.info("Initialized!")
def get_requirements(self) -> Union[InfoDict, Sequence[str], Sequence[Tuple[str, InfoDict]]]: """ Get a dictionary of requirements (or a list of requirement name, option tuples) that are always needed (e.g. must be calculated by another component or provided as input parameters). :return: dictionary or list of tuples of requirement names and options (or iterable of requirement names if no optional parameters are needed) """ return str_to_list(getattr(self, "requires", []))
def post(info, sample=None): logger_setup(info.get(_debug), info.get(_debug_file)) log = logging.getLogger(__name__.split(".")[-1]) # MARKED FOR DEPRECATION IN v3.0 # BEHAVIOUR TO BE REPLACED BY ERROR: check_deprecated_modules_path(info) # END OF DEPRECATION BLOCK try: info_post = info[_post] except KeyError: raise LoggedError(log, "No 'post' block given. Nothing to do!") if get_mpi_rank(): log.warning( "Post-processing is not yet MPI-aware. Doing nothing for rank > 1 processes.") return if info.get(_resume): log.warning("Resuming not implemented for post-processing. Re-starting.") # 1. Load existing sample output_in = get_output(output_prefix=info.get(_output_prefix)) if output_in: try: info_in = output_in.reload_updated_info() except FileNotFoundError: raise LoggedError(log, "Error loading input model: " "could not find input info at %s", output_in.file_updated) else: info_in = deepcopy_where_possible(info) dummy_model_in = DummyModel(info_in[_params], info_in[kinds.likelihood], info_in.get(_prior, None)) if output_in: if not output_in.find_collections(): raise LoggedError(log, "No samples found for the input model with prefix %s", os.path.join(output_in.folder, output_in.prefix)) collection_in = output_in.load_collections( dummy_model_in, skip=info_post.get("skip", 0), thin=info_post.get("thin", 1), concatenate=True) elif sample: if isinstance(sample, Collection): sample = [sample] collection_in = deepcopy(sample[0]) for s in sample[1:]: try: collection_in.append(s) except: raise LoggedError(log, "Failed to load some of the input samples.") else: raise LoggedError(log, "Not output from where to load from or input collections given.") log.info("Will process %d samples.", len(collection_in)) if len(collection_in) <= 1: raise LoggedError( log, "Not enough samples for post-processing. Try using a larger sample, " "or skipping or thinning less.") # 2. Compare old and new info: determine what to do add = info_post.get(_post_add, {}) or {} remove = info_post.get(_post_remove, {}) # Add a dummy 'one' likelihood, to absorb unused parameters if not add.get(kinds.likelihood): add[kinds.likelihood] = {} add[kinds.likelihood]["one"] = None # Expand the "add" info add = update_info(add) # 2.1 Adding/removing derived parameters and changes in priors of sampled parameters out = {_params: deepcopy_where_possible(info_in[_params])} for p in remove.get(_params, {}): pinfo = info_in[_params].get(p) if pinfo is None or not is_derived_param(pinfo): raise LoggedError( log, "You tried to remove parameter '%s', which is not a derived parameter. " "Only derived parameters can be removed during post-processing.", p) out[_params].pop(p) # Force recomputation of aggregated chi2 for p in list(out[_params]): if p.startswith(_get_chi2_name("")): out[_params].pop(p) mlprior_names_add = [] for p, pinfo in add.get(_params, {}).items(): pinfo_in = info_in[_params].get(p) if is_sampled_param(pinfo): if not is_sampled_param(pinfo_in): # No added sampled parameters (de-marginalisation not implemented) if pinfo_in is None: raise LoggedError( log, "You added a new sampled parameter %r (maybe accidentally " "by adding a new likelihood that depends on it). " "Adding new sampled parameters is not possible. Try fixing " "it to some value.", p) else: raise LoggedError( log, "You tried to change the prior of parameter '%s', " "but it was not a sampled parameter. " "To change that prior, you need to define as an external one.", p) if mlprior_names_add[:1] != _prior_1d_name: mlprior_names_add = ([_minuslogprior + _separator + _prior_1d_name] + mlprior_names_add) elif is_derived_param(pinfo): if p in out[_params]: raise LoggedError( log, "You tried to add derived parameter '%s', which is already " "present. To force its recomputation, 'remove' it too.", p) elif is_fixed_param(pinfo): # Only one possibility left "fixed" parameter that was not present before: # input of new likelihood, or just an argument for dynamical derived (dropped) if ((p in info_in[_params] and pinfo[partag.value] != (pinfo_in or {}).get(partag.value, None))): raise LoggedError( log, "You tried to add a fixed parameter '%s: %r' that was already present" " but had a different value or was not fixed. This is not allowed. " "The old info of the parameter was '%s: %r'", p, dict(pinfo), p, dict(pinfo_in)) else: raise LoggedError(log, "This should not happen. Contact the developers.") out[_params][p] = pinfo # For the likelihood only, turn the rest of *derived* parameters into constants, # so that the likelihoods do not try to compute them) # But be careful to exclude *input* params that have a "derived: True" value # (which in "updated info" turns into "derived: 'lambda [x]: [x]'") out_params_like = deepcopy_where_possible(out[_params]) for p, pinfo in out_params_like.items(): if ((is_derived_param(pinfo) and not (partag.value in pinfo) and p not in add.get(_params, {}))): out_params_like[p] = {partag.value: np.nan, partag.drop: True} # 2.2 Manage adding/removing priors and likelihoods warn_remove = False for level in [_prior, kinds.likelihood]: out[level] = getattr(dummy_model_in, level) if level == _prior: out[level].remove(_prior_1d_name) for pdf in info_post.get(_post_remove, {}).get(level, []) or []: try: out[level].remove(pdf) warn_remove = True except ValueError: raise LoggedError( log, "Trying to remove %s '%s', but it is not present. " "Existing ones: %r", level, pdf, out[level]) if warn_remove: log.warning("You are removing a prior or likelihood pdf. " "Notice that if the resulting posterior is much wider " "than the original one, or displaced enough, " "it is probably safer to explore it directly.") if _prior in add: mlprior_names_add += [_minuslogprior + _separator + name for name in add[_prior]] out[_prior] += list(add[_prior]) prior_recompute_1d = ( mlprior_names_add[:1] == [_minuslogprior + _separator + _prior_1d_name]) # Don't initialise the theory code if not adding/recomputing theory, # theory-derived params or likelihoods recompute_theory = info_in.get(kinds.theory) and not ( list(add[kinds.likelihood]) == ["one"] and not any(is_derived_param(pinfo) for pinfo in add.get(_params, {}).values())) if recompute_theory: # Inherit from the original chain (needs input|output_params, renames, etc add_theory = add.get(kinds.theory) if add_theory: info_theory_out = {} if len(add_theory) > 1: log.warning('Importance sampling with more than one theory is ' 'not really tested') add_theory = add_theory.copy() for theory, theory_info in info_in[kinds.theory].items(): theory_copy = deepcopy_where_possible(theory_info) if theory in add_theory: info_theory_out[theory] = \ recursive_update(theory_copy, add_theory.pop(theory)) else: info_theory_out[theory] = theory_copy info_theory_out.update(add_theory) else: info_theory_out = deepcopy_where_possible(info_in[kinds.theory]) else: info_theory_out = None chi2_names_add = [ _get_chi2_name(name) for name in add[kinds.likelihood] if name != "one"] out[kinds.likelihood] += [l for l in add[kinds.likelihood] if l != "one"] if recompute_theory: log.warning("You are recomputing the theory, but in the current version this does" " not force recomputation of any likelihood or derived parameter, " "unless explicitly removed+added.") for level in [_prior, kinds.likelihood]: for i, x_i in enumerate(out[level]): if x_i in list(out[level])[i + 1:]: raise LoggedError( log, "You have added %s '%s', which was already present. If you " "want to force its recomputation, you must also 'remove' it.", level, x_i) # 3. Create output collection if _post_suffix not in info_post: raise LoggedError(log, "You need to provide a '%s' for your chains.", _post_suffix) # Use default prefix if it exists. If it does not, produce no output by default. # {post: {output: None}} suppresses output, and if it's a string, updates it. out_prefix = info_post.get(_output_prefix, info.get(_output_prefix)) if out_prefix not in [None, False]: out_prefix += _separator_files + _post + _separator_files + info_post[ _post_suffix] output_out = get_output(output_prefix=out_prefix, force=info.get(_force)) if output_out and not output_out.force and output_out.find_collections(): raise LoggedError(log, "Found existing post-processing output with prefix %r. " "Delete it manually or re-run with `force: True` " "(or `-f`, `--force` from the shell).", out_prefix) elif output_out and output_out.force: output_out.delete_infos() for regexp in output_out.find_collections(): output_out.delete_with_regexp(re.compile(regexp)) info_out = deepcopy_where_possible(info) info_out[_post] = info_post # Updated with input info and extended (updated) add info info_out.update(info_in) info_out[_post][_post_add] = add dummy_model_out = DummyModel(out[_params], out[kinds.likelihood], info_prior=out[_prior]) if recompute_theory: # TODO: May need updating for more than one, or maybe can be removed theory = list(info_theory_out)[0] if _input_params not in info_theory_out[theory]: raise LoggedError( log, "You appear to be post-processing a chain generated with an older " "version of Cobaya. For post-processing to work, please edit the " "'[root].updated.yaml' file of the original chain to add, inside the " "theory code block, the list of its input parameters. E.g.\n----\n" "theory:\n %s:\n input_params: [param1, param2, ...]\n" "----\nIf you get strange errors later, it is likely that you did not " "specify the correct set of theory parameters.\n" "The full set of input parameters are %s.", theory, list(dummy_model_out.parameterization.input_params())) # TODO: check allow_renames=False? # TODO: May well be simplifications here, this is v close to pre-refactor logic # Have not gone through or understood all the parameterization stuff model_add = Model(out_params_like, add[kinds.likelihood], info_prior=add.get(_prior), info_theory=info_theory_out, packages_path=info.get(_packages_path), allow_renames=False, post=True, prior_parameterization=dummy_model_out.parameterization) # Remove auxiliary "one" before dumping -- 'add' *is* info_out[_post][_post_add] add[kinds.likelihood].pop("one") collection_out = Collection(dummy_model_out, output_out, name="1") output_out.check_and_dump_info(None, info_out, check_compatible=False) # Prepare recomputation of aggregated chi2 # (they need to be recomputed by hand, because its autocomputation won't pick up # old likelihoods for a given type) all_types = { like: str_to_list(add[kinds.likelihood].get( like, info_in[kinds.likelihood].get(like)).get("type", []) or []) for like in out[kinds.likelihood]} types = set(chain(*list(all_types.values()))) inv_types = {t: [like for like, like_types in all_types.items() if t in like_types] for t in types} # 4. Main loop! log.info("Running post-processing...") last_percent = 0 for i, point in collection_in.data.iterrows(): log.debug("Point: %r", point) sampled = [point[param] for param in dummy_model_in.parameterization.sampled_params()] derived = {param: point.get(param, None) for param in dummy_model_out.parameterization.derived_params()} inputs = {param: point.get( param, dummy_model_in.parameterization.constant_params().get( param, dummy_model_out.parameterization.constant_params().get( param, None))) for param in dummy_model_out.parameterization.input_params()} # Solve inputs that depend on a function and were not saved # (we don't use the Parameterization_to_input method in case there are references # to functions that cannot be loaded at the moment) for p, value in inputs.items(): if value is None: func = dummy_model_out.parameterization._input_funcs[p] args = dummy_model_out.parameterization._input_args[p] inputs[p] = func(*[point.get(arg) for arg in args]) # Add/remove priors priors_add = model_add.prior.logps(sampled) if not prior_recompute_1d: priors_add = priors_add[1:] logpriors_add = dict(zip(mlprior_names_add, priors_add)) logpriors_new = [logpriors_add.get(name, - point.get(name, 0)) for name in collection_out.minuslogprior_names] if log.getEffectiveLevel() <= logging.DEBUG: log.debug( "New set of priors: %r", dict(zip(dummy_model_out.prior, logpriors_new))) if -np.inf in logpriors_new: continue # Add/remove likelihoods output_like = [] if add[kinds.likelihood]: # Notice "one" (last in likelihood_add) is ignored: not in chi2_names loglikes_add, output_like = model_add.logps(inputs, return_derived=True) loglikes_add = dict(zip(chi2_names_add, loglikes_add)) output_like = dict(zip(model_add.output_params, output_like)) else: loglikes_add = dict() loglikes_new = [loglikes_add.get(name, -0.5 * point.get(name, 0)) for name in collection_out.chi2_names] if log.getEffectiveLevel() <= logging.DEBUG: log.debug( "New set of likelihoods: %r", dict(zip(dummy_model_out.likelihood, loglikes_new))) if output_like: log.debug("New set of likelihood-derived parameters: %r", output_like) if -np.inf in loglikes_new: continue # Add/remove derived parameters and change priors of sampled parameters for p in add[_params]: if p in dummy_model_out.parameterization._directly_output: derived[p] = output_like[p] elif p in dummy_model_out.parameterization._derived_funcs: func = dummy_model_out.parameterization._derived_funcs[p] args = dummy_model_out.parameterization._derived_args[p] derived[p] = func( *[point.get(arg, output_like.get(arg, None)) for arg in args]) # We need to recompute the aggregated chi2 by hand for type_, likes in inv_types.items(): derived[_get_chi2_name(type_)] = sum( [-2 * lvalue for lname, lvalue in zip(collection_out.chi2_names, loglikes_new) if _undo_chi2_name(lname) in likes]) if log.getEffectiveLevel() <= logging.DEBUG: log.debug("New derived parameters: %r", dict([(p, derived[p]) for p in dummy_model_out.parameterization.derived_params() if p in add[_params]])) # Save to the collection (keep old weight for now) collection_out.add( sampled, derived=derived.values(), weight=point.get(_weight), logpriors=logpriors_new, loglikes=loglikes_new) # Display progress percent = np.round(i / len(collection_in) * 100) if percent != last_percent and not percent % 5: last_percent = percent progress_bar(log, percent, " (%d/%d)" % (i, len(collection_in))) if not collection_out.data.last_valid_index(): raise LoggedError( log, "No elements in the final sample. Possible causes: " "added a prior or likelihood valued zero over the full sampled domain, " "or the computation of the theory failed everywhere, etc.") # Reweight -- account for large dynamic range! # Prefer to rescale +inf to finite, and ignore final points with -inf. # Remove -inf's (0-weight), and correct indices difflogmax = max(collection_in[_minuslogpost] - collection_out[_minuslogpost]) collection_out.data[_weight] *= np.exp( collection_in[_minuslogpost] - collection_out[_minuslogpost] - difflogmax) collection_out.data = ( collection_out.data[collection_out.data.weight > 0].reset_index(drop=True)) collection_out._n = collection_out.data.last_valid_index() + 1 # Write! collection_out.out_update() log.info("Finished! Final number of samples: %d", len(collection_out)) return info_out, {"sample": collection_out}
def update_info(info): """ Creates an updated info starting from the defaults for each component and updating it with the input info. """ component_base_classes = get_base_classes() # Don't modify the original input, and convert all Mapping to consistent dict input_info = deepcopy_where_possible(info) # Creates an equivalent info using only the defaults updated_info = {} default_params_info = {} default_prior_info = {} components = get_used_components(input_info) from cobaya.component import CobayaComponent for block in components: updated = {} updated_info[block] = updated input_block = input_info[block] for component in components[block]: # Preprocess "no options" and "external function" in input try: input_block[component] = input_block[component] or {} except TypeError: raise LoggedError( log, "Your input info is not well formatted at the '%s' block. " "It must be a dictionary {'%s_i':{options}, ...}. ", block, block) if isinstance(component, CobayaComponent) or \ isinstance(input_block[component], CobayaComponent): raise LoggedError( log, "Input for %s:%s should specify a class not " "an instance", block, component) # TODO: allow instance passing? # could allow this, but would have to sort out deepcopy # if input_block[component]: # raise LoggedError(log, "Instances should be passed a dictionary " # "entry of the form 'instance: None'") # change_key(input_block, component, component.get_name(), # {_external: component}) # updated[component.get_name()] = input_block[component.get_name()].copy() # continue if inspect.isclass(input_block[component]) or \ not isinstance(input_block[component], dict): input_block[component] = {_external: input_block[component]} ext = input_block[component].get(_external) if ext: if inspect.isclass(ext): default_class_info = get_default_info( ext, block, input_options=input_block[component]) else: default_class_info = deepcopy_where_possible( component_base_classes[block].get_defaults()) else: component_path = input_block[component].get( _component_path, None) default_class_info = get_default_info( component, block, class_name=input_block[component].get(_class_name), component_path=component_path, input_options=input_block[component]) updated[component] = default_class_info or {} # Update default options with input info # Consistency is checked only up to first level! (i.e. subkeys may not match) # Reserved attributes not necessarily already in default info: reserved = { _external, _class_name, _provides, _requires, partag.renames, _input_params, _output_params, _component_path, _aliases } options_not_recognized = (set( input_block[component]).difference(reserved).difference( set(updated[component]))) if options_not_recognized: alternatives = {} available = ({ _external, _class_name, _requires, partag.renames }.union(updated_info[block][component])) while options_not_recognized: option = options_not_recognized.pop() alternatives[option] = fuzzy_match(option, available, n=3) did_you_mean = ", ".join([ ("'%s' (did you mean %s?)" % (o, "|".join(["'%s'" % _ for _ in a])) if a else "'%s'" % o) for o, a in alternatives.items() ]) raise LoggedError( log, "%s '%s' does not recognize some options: %s. " "Check the documentation for '%s'.", block, component, did_you_mean, block) updated[component].update(input_block[component]) # save params and priors of class to combine later default_params_info[component] = default_class_info.get( _params, {}) default_prior_info[component] = default_class_info.get(_prior, {}) # Add priors info, after the necessary checks if _prior in input_info or any(default_prior_info.values()): updated_info[_prior] = input_info.get(_prior, {}) for prior_info in default_prior_info.values(): for name, prior in prior_info.items(): if updated_info[_prior].get(name, prior) != prior: raise LoggedError( log, "Two different priors cannot have the same name: '%s'.", name) updated_info[_prior][name] = prior # Add parameters info, after the necessary updates and checks defaults_merged = merge_default_params_info(default_params_info) updated_info[_params] = merge_params_info( [defaults_merged, input_info.get(_params, {})], default_derived=False) # Add aggregated chi2 params if kinds.likelihood in info: all_types = set( chain(*[ str_to_list(like_info.get("type", []) or []) for like_info in updated_info[kinds.likelihood].values() ])) for t in all_types: updated_info[_params][_get_chi2_name(t)] = { partag.latex: _get_chi2_label(t), partag.derived: True } # Add automatically-defined parameters if _auto_params in updated_info: make_auto_params(updated_info.pop(_auto_params), updated_info[_params]) # Add aliases for theory params (after merging!) for kind in [ k for k in [kinds.theory, kinds.likelihood] if k in updated_info ]: for item in updated_info[kind].values(): renames = item.get(partag.renames) if renames: if not isinstance(renames, Mapping): raise LoggedError( log, "'renames' should be a dictionary of name mappings " "(or you meant to use 'aliases')") renames_flat = [ set([k] + str_to_list(v)) for k, v in renames.items() ] for p in updated_info[_params]: # Probably could be made faster by inverting the renames dicts *once* renames_pairs = [a for a in renames_flat if p in a] if renames_pairs: this_renames = reduce( lambda x, y: x.union(y), [a for a in renames_flat if p in a]) updated_info[_params][p][partag.renames] = \ list(set(this_renames).union(set(str_to_list( updated_info[_params][p].get(partag.renames, [])))) .difference({p})) # Rest of the options for k, v in input_info.items(): if k not in updated_info: updated_info[k] = v return updated_info
def update_info(info: _Dict, add_aggr_chi2=True) -> _Dict: """ Creates an updated info starting from the defaults for each component and updating it with the input info. """ component_base_classes = get_base_classes() # Don't modify the original input, and convert all Mapping to consistent dict input_info = deepcopy_where_possible(info) # Creates an equivalent info using only the defaults updated_info: _Dict = {} default_params_info = {} default_prior_info = {} used_kind_members = get_used_components(input_info) from cobaya.component import CobayaComponent for block in used_kind_members: updated: InfoDict = {} updated_info[block] = updated input_block = input_info[block] name: str for name in used_kind_members[block]: # Preprocess "no options" and "external function" in input try: input_block[name] = input_block[name] or {} except TypeError: raise LoggedError( log, "Your input info is not well formatted at the '%s' block. " "It must be a dictionary {'%s_i':{options}, ...}. ", block, block) if isinstance(name, CobayaComponent) or isinstance(name, type): raise LoggedError( log, "Instances and classes should be passed a " "dictionary entry of the form 'name: instance'") if isinstance(input_block[name], CobayaComponent): log.warning("Support for input instances is experimental") if isinstance(input_block[name], type) or \ not isinstance(input_block[name], dict): input_block[name] = {"external": input_block[name]} ext = input_block[name].get("external") annotations = {} if ext: if isinstance(ext, type): default_class_info, annotations = \ get_default_info(ext, block, input_options=input_block[name], return_undefined_annotations=True) else: default_class_info = deepcopy_where_possible( component_base_classes[block].get_defaults()) else: component_path = input_block[name].get("python_path") default_class_info, annotations = get_default_info( name, block, class_name=input_block[name].get("class"), component_path=component_path, input_options=input_block[name], return_undefined_annotations=True) updated[name] = default_class_info or {} # Update default options with input info # Consistency is checked only up to first level! (i.e. subkeys may not match) # Reserved attributes not necessarily already in default info: reserved = { "external", "class", "provides", "requires", "renames", "input_params", "output_params", "python_path", "aliases" } options_not_recognized = set(input_block[name]).difference( chain(reserved, updated[name], annotations)) if options_not_recognized: alternatives = {} available = {"external", "class", "requires", "renames"}.union(updated_info[block][name]) while options_not_recognized: option = options_not_recognized.pop() alternatives[option] = fuzzy_match(option, available, n=3) did_you_mean = ", ".join([ ("'%s' (did you mean %s?)" % (o, "|".join(["'%s'" % _ for _ in a])) if a else "'%s'" % o) for o, a in alternatives.items() ]) raise LoggedError( log, "%s '%s' does not recognize some options: %s. " "Check the documentation for '%s'.", block, name, did_you_mean, block) updated[name].update(input_block[name]) # save params and priors of class to combine later default_params_info[name] = default_class_info.get("params", {}) default_prior_info[name] = default_class_info.get("prior", {}) # Add priors info, after the necessary checks if "prior" in input_info or any(default_prior_info.values()): updated_info["prior"] = input_info.get("prior", {}) for prior_info in default_prior_info.values(): for name, prior in prior_info.items(): if updated_info["prior"].get(name, prior) != prior: raise LoggedError( log, "Two different priors cannot have the same name: '%s'.", name) updated_info["prior"][name] = prior # Add parameters info, after the necessary updates and checks defaults_merged = merge_default_params_info(default_params_info) param_info: ExpandedParamsDict = merge_params_info( [defaults_merged, input_info.get("params", {})], default_derived=False) updated_info["params"] = param_info # type: ignore # Add aggregated chi2 params if info.get("likelihood") and add_aggr_chi2: all_types = set( chain(*[ str_to_list(like_info.get("type", []) or []) for like_info in updated_info["likelihood"].values() if like_info is not None ])) add_aggregated_chi2_params(param_info, all_types) # Add automatically-defined parameters if "auto_params" in updated_info: make_auto_params(updated_info.pop("auto_params"), param_info) # Add aliases for theory params (after merging!) for name in ("theory", "likelihood"): if isinstance(updated_info.get(name), dict): for item in updated_info[name].values(): renames = item.get("renames") if renames: if not isinstance(renames, Mapping): raise LoggedError( log, "'renames' should be a dictionary of " "name mappings " "(or you meant to use 'aliases')") renames_flat = [ set([k] + str_to_list(v)) for k, v in renames.items() ] for p in param_info: # Probably could be made faster by inverting # the renames dicts *once* renames_pairs = [a for a in renames_flat if p in a] if renames_pairs: this_renames = reduce( lambda x, y: x.union(y), [a for a in renames_flat if p in a]) param_info[p]["renames"] = \ list(set(chain(this_renames, str_to_list( param_info[p].get("renames", [])))) .difference({p})) # Rest of the options for k, v in input_info.items(): if k not in updated_info: updated_info[k] = v return updated_info
def type_list(self) -> List[str]: # list of labels that classify this component # not usually used for Theory, can be used for aggregated chi2 in likelihoods return str_to_list(getattr(self, "type", []) or [])
def initialize(self): """Importing CAMB from the correct path, if given.""" if not self.path and self.packages_path: self.path = self.get_path(self.packages_path) camb_path = None if self.path and not os.path.exists(self.path): # Fail if this was a directly specified path, # or ignore and try to global-import if it came from a packages_path if self.packages_path: self.log.info("*local* CAMB not found at " + self.path) self.log.info("Importing *global* CAMB.") else: raise LoggedError(self.log, "*local* CAMB not found at " + self.path) elif self.path: self.log.info("Importing *local* CAMB from " + self.path) if not os.path.exists(self.path): raise LoggedError( self.log, "The given folder does not exist: '%s'", self.path) camb_path = self.path if not os.path.exists(os.path.join(self.path, "setup.py")): raise LoggedError( self.log, "Either CAMB is not in the given folder, '%s', or you are using a " "very old version without the Python interface.", self.path) else: self.log.info("Importing *global* CAMB.") try: self.camb = load_module("camb", path=camb_path, min_version=self._min_camb_version) except ImportError: raise LoggedError( self.log, "Couldn't find the CAMB python interface.\n" "Make sure that you have compiled it, and that you either\n" " (a) specify a path (you didn't) or\n" " (b) install the Python interface globally with\n" " 'python -m pip install -e /path/to/camb [--user]'") except VersionCheckError as e: raise LoggedError(self.log, str(e)) super().initialize() self.extra_attrs = {"Want_CMB": False, "Want_cl_2D_array": False, 'WantCls': False} # Derived parameters that may not have been requested, but will be necessary later self.derived_extra = [] # Some default settings self.needs_perts = False self.limber = False self.non_linear_sources = False self.non_linear_pk = False self._base_params = None self._needs_lensing_cross = False self._sigmaR_z_indices = {} if self.external_primordial_pk: self.extra_args['initial_power_model'] \ = self.camb.initialpower.SplinedInitialPower self.initial_power_args, self.power_params = {}, [] else: power_spectrum = self.camb.CAMBparams.make_class_named( self.extra_args.get('initial_power_model', self.camb.initialpower.InitialPowerLaw), self.camb.initialpower.InitialPower) self.initial_power_args, self.power_params = \ self._extract_params(power_spectrum.set_params) nonlin = self.camb.CAMBparams.make_class_named( self.extra_args.get('non_linear_model', self.camb.nonlinear.Halofit), self.camb.nonlinear.NonLinearModel) self.nonlin_args, self.nonlin_params = self._extract_params(nonlin.set_params) self.requires = str_to_list(getattr(self, _requires, [])) self._transfer_requires = [p for p in self.requires if p not in self.get_can_support_params()] self.requires = [p for p in self.requires if p not in self._transfer_requires]
def __init__(self, info_params: Union[ParamsDict, ExpandedParamsDict], allow_renames=True, ignore_unused_sampled=False): self.set_logger() self.allow_renames = allow_renames # First, we load the parameters, # not caring about whether they are understood by any likelihood. # `input` contains the parameters (expected to be) understood by the likelihood, # with its fixed value, its fixing function, or None if their value is given # directly by the sampler. self._infos = {} self._input: ParamValuesDict = {} self._input_funcs = {} self._input_args = {} self._input_dependencies: Dict[str, Set[str]] = {} self._dropped: Set[str] = set() self._output: ParamValuesDict = {} self._constant: ParamValuesDict = {} self._sampled: ParamValuesDict = {} self._sampled_renames: Dict[str, List[str]] = {} self._derived: ParamValuesDict = {} self._derived_inputs = [] self._derived_funcs = {} self._derived_args = {} self._derived_dependencies: Dict[str, Set[str]] = {} # Notice here that expand_info_param *always* adds a "derived":True tag # to infos without "prior" or "value", and a "value" field # to fixed params for p, info in info_params.items(): if isinstance(info, Mapping) and not set(info).issubset(partags): raise LoggedError(self.log, "Parameter '%s' has unknown options %s", p, set(info).difference(partags)) info = expand_info_param(info) self._infos[p] = info if is_fixed_or_function_param(info): if isinstance(info["value"], Real): self._constant[p] = float(info["value"]) self._input[p] = self._constant[p] if info.get("drop"): self._dropped.add(p) else: self._input[p] = np.nan self._input_funcs[p] = get_external_function(info["value"]) self._input_args[p] = getfullargspec( self._input_funcs[p]).args if is_sampled_param(info): self._sampled[p] = np.nan self._input[p] = np.nan if info.get("drop"): self._dropped.add(p) self._sampled_renames[p] = str_to_list( info.get("renames") or []) if is_derived_param(info): self._derived[p] = np.nan # Dynamical parameters whose value we want to save if info["derived"] is True and is_fixed_or_function_param( info): # parameters that are already known or computed by input funcs self._derived_inputs.append(p) elif info["derived"] is True: self._output[p] = np.nan else: self._derived_funcs[p] = get_external_function( info["derived"]) self._derived_args[p] = getfullargspec( self._derived_funcs[p]).args # Check that the sampled and derived params are all valid python variable names for p in chain(self._sampled, self._derived): if not is_valid_variable_name(p): is_in = p in self._sampled eg_in = " p_prime:\n prior: ...\n %s: " \ "'lambda p_prime: p_prime'\n" % p eg_out = " p_prime: 'lambda %s: %s'\n" % (p, p) raise LoggedError( self.log, "Parameter name '%s' is not a valid Python variable name " "(it needs to start with a letter or '_').\n" "If this is an %s parameter of a likelihood or theory, " "whose name you cannot change,%s define an associated " "%s one with a valid name 'p_prime' as: \n\n%s", p, "input" if is_in else "output", "" if is_in else " remove it and", "sampled" if is_in else "derived", eg_in if is_in else eg_out) # input params depend on input and sampled only, # never on output/derived unless constant known_input = set(self._input) all_input_arguments = set(chain(*self._input_args.values())) bad_input_dependencies = all_input_arguments - known_input if bad_input_dependencies: raise LoggedError( self.log, "Input parameters defined as functions can only depend on other " "input parameters. In particular, an input parameter cannot depend on %r." " Use an explicit Theory calculator for more complex dependencies.\n" "If you intended to define a derived output parameter use derived: " "instead of value:", list(bad_input_dependencies)) # Assume that the *un*known function arguments are likelihood/theory # output parameters for arg in (all_input_arguments.union(*self._derived_args.values()). difference(known_input).difference(self._derived)): self._output[arg] = np.nan # Useful set: directly "output-ed" derived self._directly_output = [p for p in self._derived if p in self._output] self._wrapped_input_funcs, self._wrapped_derived_funcs = \ self._get_wrapped_functions_evaluation_order() # Useful mapping: input params that vary if each sample is varied self._sampled_input_dependence = { s: [ i for i in self._input if s in self._input_dependencies.get(i, {}) ] for s in self._sampled } # From here on, some error control. # Only actually raise error after checking if used by prior. if not ignore_unused_sampled: self._dropped_not_directly_used = self._dropped.intersection( p for p, v in self._sampled_input_dependence.items() if not v) else: self._dropped_not_directly_used = set() # warn if repeated labels labels_inv_repeated = invert_dict(self.labels()) labels_inv_repeated = { k: v for k, v in labels_inv_repeated.items() if len(v) > 1 } if labels_inv_repeated: self.log.warning("There are repeated parameter labels: %r", labels_inv_repeated)
def initial_proposal_covmat(self, auto_params=None): """ Build the initial covariance matrix, using the data provided, in descending order of priority: 1. "covmat" field in the sampler block (including `auto` search). 2. "proposal" field for each parameter. 3. variance of the reference pdf. 4. variance of the prior pdf. The covariances between parameters when both are present in a covariance matrix provided through option 1 are preserved. All other covariances are assumed 0. If `covmat: auto`, use the keyword `auto_params` to restrict the parameters for which a covariance matrix is searched (default: None, meaning all sampled params). """ params_infos = self.model.parameterization.sampled_params_info() covmat = np.diag([np.nan] * len(params_infos)) # Try to generate it automatically self.covmat = getattr(self, 'covmat', None) if isinstance(self.covmat, str) and self.covmat.lower() == "auto": params_infos_covmat = deepcopy_where_possible(params_infos) for p in list(params_infos_covmat): if p not in (auto_params or []): params_infos_covmat.pop(p, None) auto_covmat = self.model.get_auto_covmat(params_infos_covmat, random_state=self._rng) if auto_covmat: self.covmat = os.path.join(auto_covmat["folder"], auto_covmat["name"]) self.log.info("Covariance matrix selected automatically: %s", self.covmat) else: self.covmat = None self.log.info( "Could not automatically find a good covmat. " "Will generate from parameter info (proposal and prior).") # If given, load and test the covariance matrix loaded_params: Sequence[str] if isinstance(self.covmat, str): covmat_pre = "{%s}" % packages_path_input if self.covmat.startswith(covmat_pre): self.covmat = self.covmat.format( **{ packages_path_input: self.packages_path }).replace("/", os.sep) try: with open(self.covmat, "r", encoding="utf-8-sig") as file_covmat: header = file_covmat.readline() loaded_covmat = np.loadtxt(self.covmat) self.log.debug( f"Loaded a covariance matrix from '{self.covmat}'") except TypeError: raise LoggedError( self.log, "The property 'covmat' must be a file name," "but it's '%s'.", str(self.covmat)) except IOError: raise LoggedError(self.log, "Can't open covmat file '%s'.", self.covmat) if header[0] != "#": raise LoggedError( self.log, "The first line of the covmat file '%s' " "must be one list of parameter names separated by spaces " "and staring with '#', and the rest must be a square " "matrix, with one row per line.", self.covmat) loaded_params = header.strip("#").strip().split() elif hasattr(self.covmat, "__getitem__"): if not self.covmat_params: raise LoggedError( self.log, "If a covariance matrix is passed as a numpy array, " "you also need to pass the parameters it corresponds to " "via 'covmat_params: [name1, name2, ...]'.") loaded_params = self.covmat_params loaded_covmat = np.array(self.covmat) elif self.covmat: raise LoggedError(self.log, "Invalid covmat") if self.covmat is not None: str_msg = "the `covmat_params` list" if isinstance(self.covmat, str): str_msg = "the header of the covmat file %r" % self.covmat if len(loaded_params) != len(set(loaded_params)): duplicated = list( set(p for p in loaded_params if list(loaded_params).count(p) > 1)) raise LoggedError( self.log, "Parameter(s) %r appear more than once in %s", duplicated, str_msg) if len(loaded_params) != loaded_covmat.shape[0]: raise LoggedError( self.log, "The number of parameters in %s and the " "dimensions of the matrix do not agree: %d vs %r", str_msg, len(loaded_params), loaded_covmat.shape) loaded_covmat = np.atleast_2d(loaded_covmat) is_square_symmetric = ( len(loaded_covmat.shape) == 2 and loaded_covmat.shape[0] == loaded_covmat.shape[1] and np.allclose(loaded_covmat.T, loaded_covmat)) # Not checking for positive-definiteness yet: may contain highly degenerate # derived parameters that would spoil it now, but will later be dropped. if not is_square_symmetric: from_msg = (f"loaded from '{self.covmat}'" if isinstance( self.covmat, str) else "passed") raise LoggedError( self.log, f"The covariance matrix {from_msg} is not a symmetric square matrix." ) # Fill with parameters in the loaded covmat renames = { p: [p] + str_to_list(v.get("renames") or []) for p, v in params_infos.items() } indices_used, indices_sampler = zip(*[[ loaded_params.index(p), [ list(params_infos).index(q) for q, a in renames.items() if p in a ] ] for p in loaded_params]) if not any(indices_sampler): raise LoggedError( self.log, "A proposal covariance matrix has been loaded, but none of its " "parameters are actually sampled here. Maybe a mismatch between" " parameter names in the covariance matrix and the input file?" ) indices_used, indices_sampler = zip( *[[i, j] for i, j in zip(indices_used, indices_sampler) if j]) if any(len(j) - 1 for j in indices_sampler): first = next(j for j in indices_sampler if len(j) > 1) raise LoggedError( self.log, "The parameters %s have duplicated aliases. Can't assign them an " "element of the covariance matrix unambiguously.", ", ".join([list(params_infos)[i] for i in first])) indices_sampler = tuple(chain(*indices_sampler)) covmat[np.ix_(indices_sampler, indices_sampler)] = (loaded_covmat[np.ix_( indices_used, indices_used)]) self.log.info("Covariance matrix loaded for params %r", [list(params_infos)[i] for i in indices_sampler]) missing_params = set(params_infos).difference( list(params_infos)[i] for i in indices_sampler) if missing_params: self.log.info("Missing proposal covariance for params %r", [ p for p in self.model.parameterization.sampled_params() if p in missing_params ]) else: self.log.info( "All parameters' covariance loaded from given covmat.") # Fill gaps with "proposal" property, if present, otherwise ref (or prior) where_nan = np.isnan(covmat.diagonal()) if np.any(where_nan): covmat[where_nan, where_nan] = np.array([ (info.get("proposal", np.nan) or np.nan)**2 for info in params_infos.values() ])[where_nan] where_nan2 = np.isnan(covmat.diagonal()) if np.any(where_nan2): # the variances are likely too large for a good proposal, e.g. conditional # widths may be much smaller than the marginalized ones. # Divide by 4, better to be too small than too large. covmat[where_nan2, where_nan2] = ( self.model.prior.reference_variances()[where_nan2] / self.fallback_covmat_scale) assert not np.any(np.isnan(covmat)) return covmat, where_nan
def post(info_or_yaml_or_file: Union[InputDict, str, os.PathLike], sample: Union[SampleCollection, List[SampleCollection], None] = None ) -> PostTuple: info = load_input_dict(info_or_yaml_or_file) logger_setup(info.get("debug"), info.get("debug_file")) log = get_logger(__name__) # MARKED FOR DEPRECATION IN v3.0 if info.get("modules"): raise LoggedError(log, "The input field 'modules' has been deprecated." "Please use instead %r", packages_path_input) # END OF DEPRECATION BLOCK info_post: PostDict = info.get("post") or {} if not info_post: raise LoggedError(log, "No 'post' block given. Nothing to do!") if mpi.is_main_process() and info.get("resume"): log.warning("Resuming not implemented for post-processing. Re-starting.") if not info.get("output") and info_post.get("output") \ and not info.get("params"): raise LoggedError(log, "The input dictionary must have be a full option " "dictionary, or have an existing 'output' root to load " "previous settings from ('output' to read from is in the " "main block not under 'post'). ") # 1. Load existing sample output_in = get_output(prefix=info.get("output")) if output_in: info_in = output_in.load_updated_info() or update_info(info) else: info_in = update_info(info) params_in: ExpandedParamsDict = info_in["params"] # type: ignore dummy_model_in = DummyModel(params_in, info_in.get("likelihood", {}), info_in.get("prior")) in_collections = [] thin = info_post.get("thin", 1) skip = info_post.get("skip", 0) if info.get('thin') is not None or info.get('skip') is not None: # type: ignore raise LoggedError(log, "'thin' and 'skip' should be " "parameters of the 'post' block") if sample: # If MPI, assume for each MPI process post is passed in the list of # collections that should be processed by that process # (e.g. single chain output from sampler) if isinstance(sample, SampleCollection): in_collections = [sample] else: in_collections = sample for i, collection in enumerate(in_collections): if skip: if 0 < skip < 1: skip = int(round(skip * len(collection))) collection = collection.filtered_copy(slice(skip, None)) if thin != 1: collection = collection.thin_samples(thin) in_collections[i] = collection elif output_in: files = output_in.find_collections() numbered = files if not numbered: # look for un-numbered output files files = output_in.find_collections(name=False) if files: if mpi.size() > len(files): raise LoggedError(log, "Number of MPI processes (%s) is larger than " "the number of sample files (%s)", mpi.size(), len(files)) for num in range(mpi.rank(), len(files), mpi.size()): in_collections += [SampleCollection( dummy_model_in, output_in, onload_thin=thin, onload_skip=skip, load=True, file_name=files[num], name=str(num + 1) if numbered else "")] else: raise LoggedError(log, "No samples found for the input model with prefix %s", os.path.join(output_in.folder, output_in.prefix)) else: raise LoggedError(log, "No output from where to load from, " "nor input collections given.") if any(len(c) <= 1 for c in in_collections): raise LoggedError( log, "Not enough samples for post-processing. Try using a larger sample, " "or skipping or thinning less.") mpi.sync_processes() log.info("Will process %d sample points.", sum(len(c) for c in in_collections)) # 2. Compare old and new info: determine what to do add = info_post.get("add") or {} if "remove" in add: raise LoggedError(log, "remove block should be under 'post', not 'add'") remove = info_post.get("remove") or {} # Add a dummy 'one' likelihood, to absorb unused parameters if not add.get("likelihood"): add["likelihood"] = {} add["likelihood"]["one"] = None # Expand the "add" info, but don't add new default sampled parameters orig_params = set(add.get("params") or []) add = update_info(add, add_aggr_chi2=False) add_params: ExpandedParamsDict = add["params"] # type: ignore for p in set(add_params) - orig_params: if p in params_in: add_params.pop(p) # 2.1 Adding/removing derived parameters and changes in priors of sampled parameters out_combined_params = deepcopy_where_possible(params_in) remove_params = list(str_to_list(remove.get("params")) or []) for p in remove_params: pinfo = params_in.get(p) if pinfo is None or not is_derived_param(pinfo): raise LoggedError( log, "You tried to remove parameter '%s', which is not a derived parameter. " "Only derived parameters can be removed during post-processing.", p) out_combined_params.pop(p) # Force recomputation of aggregated chi2 for p in list(out_combined_params): if p.startswith(get_chi2_name("")): out_combined_params.pop(p) prior_recompute_1d = False for p, pinfo in add_params.items(): pinfo_in = params_in.get(p) if is_sampled_param(pinfo): if not is_sampled_param(pinfo_in): # No added sampled parameters (de-marginalisation not implemented) if pinfo_in is None: raise LoggedError( log, "You added a new sampled parameter %r (maybe accidentally " "by adding a new likelihood that depends on it). " "Adding new sampled parameters is not possible. Try fixing " "it to some value.", p) else: raise LoggedError( log, "You tried to change the prior of parameter '%s', " "but it was not a sampled parameter. " "To change that prior, you need to define as an external one.", p) # recompute prior if potentially changed sampled parameter priors prior_recompute_1d = True elif is_derived_param(pinfo): if p in out_combined_params: raise LoggedError( log, "You tried to add derived parameter '%s', which is already " "present. To force its recomputation, 'remove' it too.", p) elif is_fixed_or_function_param(pinfo): # Only one possibility left "fixed" parameter that was not present before: # input of new likelihood, or just an argument for dynamical derived (dropped) if pinfo_in and p in params_in and pinfo["value"] != pinfo_in.get("value"): raise LoggedError( log, "You tried to add a fixed parameter '%s: %r' that was already present" " but had a different value or was not fixed. This is not allowed. " "The old info of the parameter was '%s: %r'", p, dict(pinfo), p, dict(pinfo_in)) elif not pinfo_in: # OK as long as we have known value for it raise LoggedError(log, "Parameter %s no known value. ", p) out_combined_params[p] = pinfo out_combined: InputDict = {"params": out_combined_params} # type: ignore # Turn the rest of *derived* parameters into constants, # so that the likelihoods do not try to recompute them # But be careful to exclude *input* params that have a "derived: True" value # (which in "updated info" turns into "derived: 'lambda [x]: [x]'") # Don't assign to derived parameters to theories, only likelihoods, so they can be # recomputed if needed. If the theory does not need to be computed, it doesn't matter # if it is already assigned parameters in the usual way; likelihoods can get # the required derived parameters from the stored sample derived parameter inputs. out_params_with_computed = deepcopy_where_possible(out_combined_params) dropped_theory = set() for p, pinfo in out_params_with_computed.items(): if (is_derived_param(pinfo) and "value" not in pinfo and p not in add_params): out_params_with_computed[p] = {"value": np.nan} dropped_theory.add(p) # 2.2 Manage adding/removing priors and likelihoods warn_remove = False kind: ModelBlock for kind in ("prior", "likelihood", "theory"): out_combined[kind] = deepcopy_where_possible(info_in.get(kind)) or {} for remove_item in str_to_list(remove.get(kind)) or []: try: out_combined[kind].pop(remove_item, None) if remove_item not in (add.get(kind) or []) and kind != "theory": warn_remove = True except ValueError: raise LoggedError( log, "Trying to remove %s '%s', but it is not present. " "Existing ones: %r", kind, remove_item, list(out_combined[kind])) if kind != "theory" and kind in add: dups = set(add.get(kind) or []).intersection(out_combined[kind]) - {"one"} if dups: raise LoggedError( log, "You have added %s '%s', which was already present. If you " "want to force its recomputation, you must also 'remove' it.", kind, dups) out_combined[kind].update(add[kind]) if warn_remove and mpi.is_main_process(): log.warning("You are removing a prior or likelihood pdf. " "Notice that if the resulting posterior is much wider " "than the original one, or displaced enough, " "it is probably safer to explore it directly.") mlprior_names_add = minuslogprior_names(add.get("prior") or []) chi2_names_add = [get_chi2_name(name) for name in add["likelihood"] if name != "one"] out_combined["likelihood"].pop("one", None) add_theory = add.get("theory") if add_theory: if len(add["likelihood"]) == 1 and not any( is_derived_param(pinfo) for pinfo in add_params.values()): log.warning("You are adding a theory, but this does not force recomputation " "of any likelihood or derived parameters unless explicitly " "removed+added.") # Inherit from the original chain (input|output_params, renames, etc) added_theory = add_theory.copy() for theory, theory_info in out_combined["theory"].items(): if theory in list(added_theory): out_combined["theory"][theory] = \ recursive_update(theory_info, added_theory.pop(theory)) out_combined["theory"].update(added_theory) # Prepare recomputation of aggregated chi2 # (they need to be recomputed by hand, because auto-computation won't pick up # old likelihoods for a given type) all_types = {like: str_to_list(opts.get("type") or []) for like, opts in out_combined["likelihood"].items()} types = set(chain(*all_types.values())) inv_types = {t: [like for like, like_types in all_types.items() if t in like_types] for t in sorted(types)} add_aggregated_chi2_params(out_combined_params, types) # 3. Create output collection # Use default prefix if it exists. If it does not, produce no output by default. # {post: {output: None}} suppresses output, and if it's a string, updates it. out_prefix = info_post.get("output", info.get("output")) if out_prefix: suffix = info_post.get("suffix") if not suffix: raise LoggedError(log, "You need to provide a '%s' for your output chains.", "suffix") out_prefix += separator_files + "post" + separator_files + suffix output_out = get_output(prefix=out_prefix, force=info.get("force")) output_out.set_lock() if output_out and not output_out.force and output_out.find_collections(): raise LoggedError(log, "Found existing post-processing output with prefix %r. " "Delete it manually or re-run with `force: True` " "(or `-f`, `--force` from the shell).", out_prefix) elif output_out and output_out.force and mpi.is_main_process(): output_out.delete_infos() for _file in output_out.find_collections(): output_out.delete_file_or_folder(_file) info_out = deepcopy_where_possible(info) info_post = info_post.copy() info_out["post"] = info_post # Updated with input info and extended (updated) add info info_out.update(info_in) # type: ignore info_post["add"] = add dummy_model_out = DummyModel(out_combined_params, out_combined["likelihood"], info_prior=out_combined["prior"]) out_func_parameterization = Parameterization(out_params_with_computed) # TODO: check allow_renames=False? model_add = Model(out_params_with_computed, add["likelihood"], info_prior=add.get("prior"), info_theory=out_combined["theory"], packages_path=(info_post.get(packages_path_input) or info.get(packages_path_input)), allow_renames=False, post=True, stop_at_error=info.get('stop_at_error', False), skip_unused_theories=True, dropped_theory_params=dropped_theory) # Remove auxiliary "one" before dumping -- 'add' *is* info_out["post"]["add"] add["likelihood"].pop("one") out_collections = [SampleCollection(dummy_model_out, output_out, name=c.name, cache_size=OutputOptions.default_post_cache_size) for c in in_collections] # TODO: should maybe add skip/thin to out_combined, so can tell post-processed? output_out.check_and_dump_info(info_out, out_combined, check_compatible=False) collection_in = in_collections[0] collection_out = out_collections[0] last_percent = None known_constants = dummy_model_out.parameterization.constant_params() known_constants.update(dummy_model_in.parameterization.constant_params()) missing_params = dummy_model_in.parameterization.sampled_params().keys() - set( collection_in.columns) if missing_params: raise LoggedError(log, "Input samples do not contain expected sampled parameter " "values: %s", missing_params) missing_priors = set(name for name in collection_out.minuslogprior_names if name not in mlprior_names_add and name not in collection_in.columns) if _minuslogprior_1d_name in missing_priors: prior_recompute_1d = True if prior_recompute_1d: missing_priors.discard(_minuslogprior_1d_name) mlprior_names_add.insert(0, _minuslogprior_1d_name) prior_regenerate: Optional[Prior] if missing_priors and "prior" in info_in: # in case there are input priors that are not stored in input samples # e.g. when postprocessing GetDist/CosmoMC-format chains in_names = minuslogprior_names(info_in["prior"]) info_prior = {piname: inf for (piname, inf), in_name in zip(info_in["prior"].items(), in_names) if in_name in missing_priors} regenerated_prior_names = minuslogprior_names(info_prior) missing_priors.difference_update(regenerated_prior_names) prior_regenerate = Prior(dummy_model_in.parameterization, info_prior) else: prior_regenerate = None regenerated_prior_names = None if missing_priors: raise LoggedError(log, "Missing priors: %s", missing_priors) mpi.sync_processes() output_in.check_lock() # 4. Main loop! Loop over input samples and adjust as required. if mpi.is_main_process(): log.info("Running post-processing...") difflogmax: Optional[float] = None to_do = sum(len(c) for c in in_collections) weights = [] done = 0 last_dump_time = time.time() for collection_in, collection_out in zip(in_collections, out_collections): importance_weights = [] def set_difflogmax(): nonlocal difflogmax difflog = (collection_in[OutPar.minuslogpost].to_numpy( dtype=np.float64)[:len(collection_out)] - collection_out[OutPar.minuslogpost].to_numpy(dtype=np.float64)) difflogmax = np.max(difflog) if abs(difflogmax) < 1: difflogmax = 0 # keep simple when e.g. very similar log.debug("difflogmax: %g", difflogmax) if mpi.more_than_one_process(): difflogmax = max(mpi.allgather(difflogmax)) if mpi.is_main_process(): log.debug("Set difflogmax: %g", difflogmax) _weights = np.exp(difflog - difflogmax) importance_weights.extend(_weights) collection_out.reweight(_weights) for i, point in collection_in.data.iterrows(): all_params = point.to_dict() for p in remove_params: all_params.pop(p, None) log.debug("Point: %r", point) sampled = np.array([all_params[param] for param in dummy_model_in.parameterization.sampled_params()]) all_params = out_func_parameterization.to_input(all_params).copy() # Add/remove priors if prior_recompute_1d: priors_add = [model_add.prior.logps_internal(sampled)] if priors_add[0] == -np.inf: continue else: priors_add = [] if model_add.prior.external: priors_add.extend(model_add.prior.logps_external(all_params)) logpriors_add = dict(zip(mlprior_names_add, priors_add)) logpriors_new = [logpriors_add.get(name, - point.get(name, 0)) for name in collection_out.minuslogprior_names] if prior_regenerate: regenerated = dict(zip(regenerated_prior_names, prior_regenerate.logps_external(all_params))) for _i, name in enumerate(collection_out.minuslogprior_names): if name in regenerated_prior_names: logpriors_new[_i] = regenerated[name] if is_debug(log): log.debug("New set of priors: %r", dict(zip(dummy_model_out.prior, logpriors_new))) if -np.inf in logpriors_new: continue # Add/remove likelihoods and/or (re-)calculate derived parameters loglikes_add, output_derived = model_add._loglikes_input_params( all_params, return_output_params=True) loglikes_add = dict(zip(chi2_names_add, loglikes_add)) output_derived = dict(zip(model_add.output_params, output_derived)) loglikes_new = [loglikes_add.get(name, -0.5 * point.get(name, 0)) for name in collection_out.chi2_names] if is_debug(log): log.debug("New set of likelihoods: %r", dict(zip(dummy_model_out.likelihood, loglikes_new))) if output_derived: log.debug("New set of derived parameters: %r", output_derived) if -np.inf in loglikes_new: continue all_params.update(output_derived) all_params.update(out_func_parameterization.to_derived(all_params)) derived = {param: all_params.get(param) for param in dummy_model_out.parameterization.derived_params()} # We need to recompute the aggregated chi2 by hand for type_, likes in inv_types.items(): derived[get_chi2_name(type_)] = sum( -2 * lvalue for lname, lvalue in zip(collection_out.chi2_names, loglikes_new) if undo_chi2_name(lname) in likes) if is_debug(log): log.debug("New derived parameters: %r", {p: derived[p] for p in dummy_model_out.parameterization.derived_params() if p in add["params"]}) # Save to the collection (keep old weight for now) weight = point.get(OutPar.weight) mpi.check_errors() if difflogmax is None and i > OutputOptions.reweight_after and \ time.time() - last_dump_time > OutputOptions.output_inteveral_s / 2: set_difflogmax() collection_out.out_update() if difflogmax is not None: logpost_new = sum(logpriors_new) + sum(loglikes_new) importance_weight = np.exp(logpost_new + point.get(OutPar.minuslogpost) - difflogmax) weight = weight * importance_weight importance_weights.append(importance_weight) if time.time() - last_dump_time > OutputOptions.output_inteveral_s: collection_out.out_update() last_dump_time = time.time() if weight > 0: collection_out.add(sampled, derived=derived.values(), weight=weight, logpriors=logpriors_new, loglikes=loglikes_new) # Display progress percent = int(np.round((i + done) / to_do * 100)) if percent != last_percent and not percent % 5: last_percent = percent progress_bar(log, percent, " (%d/%d)" % (i + done, to_do)) if difflogmax is None: set_difflogmax() if not collection_out.data.last_valid_index(): raise LoggedError( log, "No elements in the final sample. Possible causes: " "added a prior or likelihood valued zero over the full sampled " "domain, or the computation of the theory failed everywhere, etc.") collection_out.out_update() weights.append(np.array(importance_weights)) done += len(collection_in) assert difflogmax is not None points = 0 tot_weight = 0 min_weight = np.inf max_weight = -np.inf max_output_weight = -np.inf sum_w2 = 0 points_removed = 0 for collection_in, collection_out, importance_weights in zip(in_collections, out_collections, weights): output_weights = collection_out[OutPar.weight] points += len(collection_out) tot_weight += np.sum(output_weights) points_removed += len(importance_weights) - len(output_weights) min_weight = min(min_weight, np.min(importance_weights)) max_weight = max(max_weight, np.max(importance_weights)) max_output_weight = max(max_output_weight, np.max(output_weights)) sum_w2 += np.dot(output_weights, output_weights) (tot_weights, min_weights, max_weights, max_output_weights, sum_w2s, points_s, points_removed_s) = mpi.zip_gather( [tot_weight, min_weight, max_weight, max_output_weight, sum_w2, points, points_removed]) if mpi.is_main_process(): output_out.clear_lock() log.info("Finished! Final number of distinct sample points: %s", sum(points_s)) log.info("Importance weight range: %.4g -- %.4g", min(min_weights), max(max_weights)) if sum(points_removed_s): log.info("Points deleted due to zero weight: %s", sum(points_removed_s)) log.info("Effective number of single samples if independent (sum w)/max(w): %s", int(sum(tot_weights) / max(max_output_weights))) log.info( "Effective number of weighted samples if independent (sum w)^2/sum(w^2): " "%s", int(sum(tot_weights) ** 2 / sum(sum_w2s))) products: PostResultDict = {"sample": value_or_list(out_collections), "stats": {'min_importance_weight': (min(min_weights) / max(max_weights)), 'points_removed': sum(points_removed_s), 'tot_weight': sum(tot_weights), 'max_weight': max(max_output_weights), 'sum_w2': sum(sum_w2s), 'points': sum(points_s)}, "logpost_weight_offset": difflogmax, "weights": value_or_list(weights)} return PostTuple(info=out_combined, products=products)
def initialize(self): """Importing CAMB from the correct path, if given.""" try: install_path = (lambda p: self.get_path(p) if p else None)(self.packages_path) min_version = None if self.ignore_obsolete else self._min_camb_version self.camb = load_external_module( "camb", path=self.path, install_path=install_path, min_version=min_version, get_import_path=self.get_import_path, logger=self.log, not_installed_level="debug") except VersionCheckError as excpt: raise VersionCheckError( str(excpt) + " If you are using CAMB unmodified, upgrade with" "`cobaya-install camb --upgrade`. If you are using a modified CAMB, " "set the option `ignore_obsolete: True` for CAMB.") except ComponentNotInstalledError as excpt: raise ComponentNotInstalledError( self.log, (f"Could not find CAMB: {excpt}. " "To install it, run `cobaya-install camb`")) super().initialize() self.extra_attrs = { "Want_CMB": False, "Want_cl_2D_array": False, 'WantCls': False } # Derived parameters that may not have been requested, but will be necessary later self.derived_extra = [] # Some default settings self.needs_perts = False self.limber = False self.non_linear_sources = False self.non_linear_pk = False self._base_params = None self._needs_lensing_cross = False self._sigmaR_z_indices = {} if self.external_primordial_pk: self.extra_args['initial_power_model'] \ = self.camb.initialpower.SplinedInitialPower self.initial_power_args, self.power_params = {}, [] else: power_spectrum = self.camb.CAMBparams.make_class_named( self.extra_args.get('initial_power_model', self.camb.initialpower.InitialPowerLaw), self.camb.initialpower.InitialPower) self.initial_power_args, self.power_params = \ self._extract_params(power_spectrum.set_params) nonlin = self.camb.CAMBparams.make_class_named( self.extra_args.get('non_linear_model', self.camb.nonlinear.Halofit), self.camb.nonlinear.NonLinearModel) self.nonlin_args, self.nonlin_params = self._extract_params( nonlin.set_params) self.requires = str_to_list(getattr(self, "requires", [])) self._transfer_requires = [ p for p in self.requires if p not in self.get_can_support_params() ] self.requires = [ p for p in self.requires if p not in self._transfer_requires ]