def initialize(self): """ Creates a 1-point collection to store the point at which the posterior is evaluated. """ try: self.N = int(self.N) except ValueError: raise LoggedError( self.log, "Could not convert the number of samples to an integer: %r", self.N) self.one_point = SampleCollection(self.model, self.output, name="1") self.log.info("Initialized!")
def initialize(self): """Imports the PolyChord sampler and prepares its arguments.""" # Allow global import if no direct path specification allow_global = not self.path if not self.path and self.packages_path: self.path = self.get_path(self.packages_path) self.pc: Any = self.is_installed(path=self.path, allow_global=allow_global, check=False) if not self.pc: raise NotInstalledError( self.log, "Could not find PolyChord. Check error message above. " "To install it, run 'cobaya-install polychord --%s " "[packages_path]'", packages_path_arg) # Prepare arguments and settings self.n_sampled = len(self.model.parameterization.sampled_params()) self.n_derived = len(self.model.parameterization.derived_params()) self.n_priors = len(self.model.prior) self.n_likes = len(self.model.likelihood) self.nDims = self.model.prior.d() self.nDerived = (self.n_derived + self.n_priors + self.n_likes) if self.logzero is None: self.logzero = np.nan_to_num(-np.inf) if self.max_ndead == np.inf: self.max_ndead = -1 self._quants_d_units = ["nlive", "max_ndead"] for p in self._quants_d_units: if getattr(self, p) is not None: setattr( self, p, NumberWithUnits(getattr(self, p), "d", scale=self.nDims, dtype=int).value) self._quants_nlive_units = ["nprior", "nfail"] for p in self._quants_nlive_units: if getattr(self, p) is not None: setattr( self, p, NumberWithUnits(getattr(self, p), "nlive", scale=self.nlive, dtype=int).value) # Fill the automatic ones if getattr(self, "feedback", None) is None: values = { logging.CRITICAL: 0, logging.ERROR: 0, logging.WARNING: 0, logging.INFO: 1, logging.DEBUG: 2 } self.feedback = values[self.log.getEffectiveLevel()] # Prepare output folders and prefixes if self.output: self.file_root = self.output.prefix self.read_resume = self.output.is_resuming() else: output_prefix = share_mpi( hex(int(self._rng.random() * 16**6))[2:] if is_main_process() else None) self.file_root = output_prefix # dummy output -- no resume! self.read_resume = False self.base_dir = self.get_base_dir(self.output) self.raw_clusters_dir = os.path.join(self.base_dir, self._clusters_dir) self.output.create_folder(self.base_dir) if self.do_clustering: self.clusters_folder = self.get_clusters_dir(self.output) self.output.create_folder(self.clusters_folder) self.mpi_info("Storing raw PolyChord output in '%s'.", self.base_dir) # Exploiting the speed hierarchy if self.blocking: blocks, oversampling_factors = self.model.check_blocking( self.blocking) else: if self.measure_speeds: self.model.measure_and_set_speeds(n=self.measure_speeds, random_state=self._rng) blocks, oversampling_factors = self.model.get_param_blocking_for_sampler( oversample_power=self.oversample_power) self.mpi_info("Parameter blocks and their oversampling factors:") max_width = len(str(max(oversampling_factors))) for f, b in zip(oversampling_factors, blocks): self.mpi_info("* %" + "%d" % max_width + "d : %r", f, b) # Save blocking in updated info, in case we want to resume self._updated_info["blocking"] = list(zip(oversampling_factors, blocks)) blocks_flat = list(chain(*blocks)) self.ordering = [ blocks_flat.index(p) for p in self.model.parameterization.sampled_params() ] self.grade_dims = [len(block) for block in blocks] # Steps per block # NB: num_repeats is ignored by PolyChord when int "grade_frac" given, # so needs to be applied by hand. # In num_repeats, `d` is interpreted as dimension of each block self.grade_frac = [ int(o * read_dnumber(self.num_repeats, dim_block)) for o, dim_block in zip(oversampling_factors, self.grade_dims) ] # Assign settings pc_args = [ "nlive", "num_repeats", "nprior", "nfail", "do_clustering", "feedback", "precision_criterion", "logzero", "max_ndead", "boost_posterior", "posteriors", "equals", "cluster_posteriors", "write_resume", "read_resume", "write_stats", "write_live", "write_dead", "write_prior", "maximise", "compression_factor", "synchronous", "base_dir", "file_root", "seed", "grade_dims", "grade_frac", "nlives" ] # As stated above, num_repeats is ignored, so let's not pass it pc_args.pop(pc_args.index("num_repeats")) settings: Any = load_module('pypolychord.settings', path=self._poly_build_path, min_version=None) self.pc_settings = settings.PolyChordSettings( self.nDims, self.nDerived, seed=(self.seed if self.seed is not None else -1), **{ p: getattr(self, p) for p in pc_args if getattr(self, p) is not None }) # Prepare callback function if self.callback_function is not None: self.callback_function_callable = (get_external_function( self.callback_function)) self.last_point_callback = 0 # Prepare runtime live and dead points collections self.live = SampleCollection(self.model, None, name="live") self.dead = SampleCollection(self.model, self.output, name="dead") # Done! if is_main_process(): self.log.debug("Calling PolyChord with arguments:") for p, v in inspect.getmembers(self.pc_settings, lambda a: not (callable(a))): if not p.startswith("_"): self.log.debug(" %s: %s", p, v) self.mpi_info("Initialized!")
class Evaluate(Sampler): file_base_name = 'evaluate' override: Mapping[str, float] N: int def initialize(self): """ Creates a 1-point collection to store the point at which the posterior is evaluated. """ try: self.N = int(self.N) except ValueError: raise LoggedError( self.log, "Could not convert the number of samples to an integer: %r", self.N) self.one_point = SampleCollection(self.model, self.output, name="1") self.log.info("Initialized!") def run(self): """ First gets a reference point. If a single reference point is not given, the point is sampled from the reference pdf. If that one is not defined either, the point is sampled from the prior. Then it evaluates the prior and likelihood(s) and stores them in the one-member sample collection. """ for i in range(self.N): if self.N > 1: self.log.info("Evaluating sample #%d ------------------------------", i + 1) self.log.info("Looking for a reference point with non-zero prior.") reference_values = self.model.prior.reference(random_state=self._rng) reference_point = dict( zip(self.model.parameterization.sampled_params(), reference_values)) for p, v in (self.override or {}).items(): if p not in reference_point: raise LoggedError( self.log, "Parameter '%s' used in override not known. " "Known parameters names are %r.", p, self.model.parameterization.sampled_params()) reference_point[p] = v self.log.info("Reference point:\n " + "\n ".join( ["%s = %g" % pv for pv in reference_point.items()])) self.log.info("Evaluating prior and likelihoods...") self.logposterior = self.model.logposterior(reference_point) self.one_point.add( list(reference_point.values()), derived=self.logposterior.derived, logpost=self.logposterior.logpost, logpriors=self.logposterior.logpriors, loglikes=self.logposterior.loglikes) self.log.info("log-posterior = %g", self.logposterior.logpost) self.log.info("log-prior = %g", self.logposterior.logprior) for j, name in enumerate(self.model.prior): self.log.info( " logprior_" + name + " = %g", self.logposterior.logpriors[j]) if self.logposterior.logprior > -np.inf: self.log.info("log-likelihood = %g", self.logposterior.loglike) for j, name in enumerate(self.model.likelihood): self.log.info( " chi2_" + name + " = %g", (-2 * self.logposterior.loglikes[j])) self.log.info("Derived params:") for name, value in zip(self.model.parameterization.derived_params(), self.logposterior.derived): self.log.info(" " + name + " = %g", value) else: self.log.info("Likelihoods and derived parameters not computed, " "since the prior is null.") # Write the output: the point and its prior, posterior and likelihood. self.one_point.out_update() def products(self): """ Auxiliary function to define what should be returned in a scripted call. Returns: The sample ``SampleCollection`` containing the sequentially discarded live points. """ return {"sample": self.one_point}