Exemplo n.º 1
0
 def _load_covmat(self, prefer_load_old, auto_params=None):
     if prefer_load_old and os.path.exists(self.covmat_filename()):
         if is_main_process():
             covmat = np.atleast_2d(np.loadtxt(self.covmat_filename()))
         else:
             covmat = None
         covmat = share_mpi(covmat)
         self.mpi_info("Covariance matrix from previous sample.")
         return covmat, []
     else:
         return share_mpi(
             self.initial_proposal_covmat(
                 auto_params=auto_params) if is_main_process() else None)
Exemplo n.º 2
0
 def _load_covmat(self,
                  from_old_chain,
                  default_not_found=None,
                  auto_params=None):
     if from_old_chain and os.path.exists(self.covmat_filename()):
         if is_main_process():
             covmat = np.atleast_2d(np.loadtxt(self.covmat_filename()))
         else:
             covmat = None
         covmat = share_mpi(covmat)
         self.mpi_info("Covariance matrix from checkpoint.")
         return covmat, []
     elif default_not_found is not None:
         return default_not_found, []
     else:
         return share_mpi(
             self.initial_proposal_covmat(
                 auto_params=auto_params) if is_main_process() else None)
Exemplo n.º 3
0
 def __init__(self, *args, **kwargs):
     if is_main_process():
         Output.__init__(self, *args, **kwargs)
     if more_than_one_process():
         to_broadcast = (
             "folder", "prefix", "kind", "ext", "_resuming", "prefix_regexp_str")
         values = share_mpi([getattr(self, var) for var in to_broadcast]
                            if is_main_process() else None)
         for name, var in zip(to_broadcast, values):
             setattr(self, name, var)
Exemplo n.º 4
0
    def __init__(self,
                 info_sampler: SamplerDict,
                 model: Model,
                 output=Optional[Output],
                 packages_path: Optional[str] = None,
                 name: Optional[str] = None):
        """
        Actual initialization of the class. Loads the default and input information and
        call the custom ``initialize`` method.

        [Do not modify this one.]
        """
        self._model = model
        self._output = output
        self._updated_info = deepcopy_where_possible(info_sampler)
        super().__init__(info_sampler,
                         packages_path=packages_path,
                         name=name,
                         initialize=False,
                         standalone=False)
        if not model.parameterization.sampled_params():
            self.mpi_warning("No sampled parameters requested! "
                             "This will fail for non-mock samplers.")
        # Load checkpoint info, if resuming
        if self.output.is_resuming() and not isinstance(self, Minimizer):
            checkpoint_info = None
            if mpi.is_main_process():
                try:
                    checkpoint_info = yaml_load_file(
                        self.checkpoint_filename())

                    if self.get_name() not in checkpoint_info["sampler"]:
                        raise LoggedError(
                            self.log, "Checkpoint file found at '%s' "
                            "but it corresponds to a different sampler.",
                            self.checkpoint_filename())
                except (IOError, TypeError):
                    pass
            checkpoint_info = mpi.share_mpi(checkpoint_info)
            if checkpoint_info:
                self.set_checkpoint_info(checkpoint_info)
                self.mpi_info("Resuming from previous sample!")
        elif not isinstance(self, Minimizer) and mpi.is_main_process():
            try:
                output.delete_file_or_folder(self.checkpoint_filename())
                output.delete_file_or_folder(self.progress_filename())
            except (OSError, TypeError):
                pass
        self._set_rng()
        self.initialize()
        model.set_cache_size(self._get_requested_cache_size())
        # Add to the updated info some values which are
        # only available after initialisation
        self._updated_info["version"] = self.get_version()
Exemplo n.º 5
0
def info_random_gaussian_mixture(ranges, n_modes=1, input_params_prefix="",
                                 output_params_prefix="", O_std_min=1e-2, O_std_max=1,
                                 derived=False, mpi_aware=True,
                                 random_state=None):
    """
    Wrapper around ``random_mean`` and ``random_cov`` to generate the likelihood and
    parameter info for a random Gaussian.

    If ``mpi_aware=True``, it draws the random stuff only once, and communicates it to
    the rest of the MPI processes.
    """
    cov: Any
    mean: Any
    if is_main_process() or not mpi_aware:
        cov = random_cov(ranges, n_modes=n_modes, O_std_min=O_std_min,
                         O_std_max=O_std_max, mpi_warn=False, random_state=random_state)
        if n_modes == 1:
            cov = [cov]
        # Make sure it stays away from the edges
        mean = [[]] * n_modes
        for i in range(n_modes):
            std = np.sqrt(cov[i].diagonal())
            factor = 3
            ranges_mean = [[r[0] + factor * s, r[1] - +factor * s] for r, s in
                           zip(ranges, std)]
            # If this implies min>max, take the centre
            ranges_mean = [
                (r if r[0] <= r[1] else 2 * [(r[0] + r[1]) / 2]) for r in ranges_mean]
            mean[i] = random_mean(ranges_mean, n_modes=1, mpi_warn=False,
                                  random_state=random_state)
    else:
        mean, cov = None, None
    if mpi_aware:
        mean, cov = share_mpi((mean, cov))
    dimension = len(ranges)
    info: InputDict = {"likelihood": {"gaussian_mixture": {
        "means": mean, "covs": cov, "input_params_prefix": input_params_prefix,
        "output_params_prefix": output_params_prefix, "derived": derived}},
        "params": dict(
            # sampled
            tuple((input_params_prefix + "_%d" % i,
                   {"prior": {"min": ranges[i][0], "max": ranges[i][1]},
                    "latex": r"\alpha_{%i}" % i})
                  for i in range(dimension)) +
            # derived
            (tuple((output_params_prefix + "_%d" % i,
                    {"latex": r"\beta_{%i}" % i})
                   for i in range(dimension * n_modes)) if derived else ()))}
    return info
Exemplo n.º 6
0
def info_random_gaussian_mixture(
        ranges, n_modes=1, input_params_prefix="", output_params_prefix="",
        O_std_min=1e-2, O_std_max=1, derived=False, mpi_aware=True):
    """
    Wrapper around ``random_mean`` and ``random_cov`` to generate the likelihood and
    parameter info for a random Gaussian.

    If ``mpi_aware=True``, it draws the random stuff only once, and communicates it to
    the rest of the MPI processes.
    """
    if is_main_process() or not mpi_aware:
        cov = random_cov(ranges, n_modes=n_modes,
                         O_std_min=O_std_min, O_std_max=O_std_max, mpi_warn=False)
        if n_modes == 1:
            cov = [cov]
        # Make sure it stays away from the edges
        mean = [[]] * n_modes
        for i in range(n_modes):
            std = np.sqrt(cov[i].diagonal())
            factor = 3
            ranges_mean = [[l[0] + factor * s, l[1] - +factor * s] for l, s in
                           zip(ranges, std)]
            # If this implies min>max, take the centre
            ranges_mean = [
                (l if l[0] <= l[1] else 2 * [(l[0] + l[1]) / 2]) for l in ranges_mean]
            mean[i] = random_mean(ranges_mean, n_modes=1, mpi_warn=False)
    if mpi_aware:
        mean, cov = share_mpi((mean, cov) if is_main_process() else None)
    dimension = len(ranges)
    info = {kinds.likelihood: {"gaussian_mixture": {
        "means": mean, "covs": cov, _input_params_prefix: input_params_prefix,
        _output_params_prefix: output_params_prefix, "derived": derived}}}
    info[_params] = dict(
        # sampled
        [(input_params_prefix + "_%d" % i,
          {"prior": {"min": ranges[i][0], "max": ranges[i][1]},
           "latex": r"\alpha_{%i}" % i})
         for i in range(dimension)] +
        # derived
        ([[output_params_prefix + "_%d" % i,
           {"min": -3, "max": 3, "latex": r"\beta_{%i}" % i}]
          for i in range(dimension * n_modes)] if derived else []))
    return info
Exemplo n.º 7
0
 def initialize(self):
     """Imports the PolyChord sampler and prepares its arguments."""
     # Allow global import if no direct path specification
     allow_global = not self.path
     if not self.path and self.packages_path:
         self.path = self.get_path(self.packages_path)
     self.pc = self.is_installed(path=self.path, allow_global=allow_global)
     if not self.pc:
         raise NotInstalledError(
             self.log,
             "Could not find PolyChord. Check error message above. "
             "To install it, run 'cobaya-install polychord --%s "
             "[packages_path]'", _packages_path_arg)
     # Prepare arguments and settings
     from pypolychord.settings import PolyChordSettings
     self.n_sampled = len(self.model.parameterization.sampled_params())
     self.n_derived = len(self.model.parameterization.derived_params())
     self.n_priors = len(self.model.prior)
     self.n_likes = len(self.model.likelihood)
     self.nDims = self.model.prior.d()
     self.nDerived = (self.n_derived + self.n_priors + self.n_likes)
     if self.logzero is None:
         self.logzero = np.nan_to_num(-np.inf)
     if self.max_ndead == np.inf:
         self.max_ndead = -1
     self._quants_d_units = ["nlive", "max_ndead"]
     for p in self._quants_d_units:
         if getattr(self, p) is not None:
             setattr(
                 self, p,
                 NumberWithUnits(getattr(self, p),
                                 "d",
                                 scale=self.nDims,
                                 dtype=int).value)
     self._quants_nlive_units = ["nprior"]
     for p in self._quants_nlive_units:
         if getattr(self, p) is not None:
             setattr(
                 self, p,
                 NumberWithUnits(getattr(self, p),
                                 "nlive",
                                 scale=self.nlive,
                                 dtype=int).value)
     # Fill the automatic ones
     if getattr(self, "feedback", None) is None:
         values = {
             logging.CRITICAL: 0,
             logging.ERROR: 0,
             logging.WARNING: 0,
             logging.INFO: 1,
             logging.DEBUG: 2
         }
         self.feedback = values[self.log.getEffectiveLevel()]
     # Prepare output folders and prefixes
     if self.output:
         self.file_root = self.output.prefix
         self.read_resume = self.output.is_resuming()
     else:
         output_prefix = share_mpi(
             hex(int(random() * 16**6))[2:] if is_main_process() else None)
         self.file_root = output_prefix
         # dummy output -- no resume!
         self.read_resume = False
     self.base_dir = self.get_base_dir(self.output)
     self.raw_clusters_dir = os.path.join(self.base_dir, self._clusters_dir)
     self.output.create_folder(self.base_dir)
     if self.do_clustering:
         self.clusters_folder = self.get_clusters_dir(self.output)
         self.output.create_folder(self.clusters_folder)
     self.mpi_info("Storing raw PolyChord output in '%s'.", self.base_dir)
     # Exploiting the speed hierarchy
     if self.blocking:
         blocks, oversampling_factors = self.model.check_blocking(
             self.blocking)
     else:
         if self.measure_speeds:
             self.model.measure_and_set_speeds(n=self.measure_speeds)
         blocks, oversampling_factors = self.model.get_param_blocking_for_sampler(
             oversample_power=self.oversample_power)
     self.mpi_info("Parameter blocks and their oversampling factors:")
     max_width = len(str(max(oversampling_factors)))
     for f, b in zip(oversampling_factors, blocks):
         self.mpi_info("* %" + "%d" % max_width + "d : %r", f, b)
     # Save blocking in updated info, in case we want to resume
     self._updated_info["blocking"] = list(zip(oversampling_factors,
                                               blocks))
     blocks_flat = list(chain(*blocks))
     self.ordering = [
         blocks_flat.index(p)
         for p in self.model.parameterization.sampled_params()
     ]
     self.grade_dims = [len(block) for block in blocks]
     # Steps per block
     # NB: num_repeats is ignored by PolyChord when int "grade_frac" given,
     # so needs to be applied by hand.
     # In num_repeats, `d` is interpreted as dimension of each block
     self.grade_frac = [
         int(o * read_dnumber(self.num_repeats, dim_block))
         for o, dim_block in zip(oversampling_factors, self.grade_dims)
     ]
     # Assign settings
     pc_args = [
         "nlive", "num_repeats", "nprior", "do_clustering",
         "precision_criterion", "max_ndead", "boost_posterior", "feedback",
         "logzero", "posteriors", "equals", "compression_factor",
         "cluster_posteriors", "write_resume", "read_resume", "write_stats",
         "write_live", "write_dead", "base_dir", "grade_frac", "grade_dims",
         "feedback", "read_resume", "base_dir", "file_root", "grade_frac",
         "grade_dims"
     ]
     # As stated above, num_repeats is ignored, so let's not pass it
     pc_args.pop(pc_args.index("num_repeats"))
     self.pc_settings = PolyChordSettings(
         self.nDims,
         self.nDerived,
         seed=(self.seed if self.seed is not None else -1),
         **{
             p: getattr(self, p)
             for p in pc_args if getattr(self, p) is not None
         })
     # prior conversion from the hypercube
     bounds = self.model.prior.bounds(
         confidence_for_unbounded=self.confidence_for_unbounded)
     # Check if priors are bounded (nan's to inf)
     inf = np.where(np.isinf(bounds))
     if len(inf[0]):
         params_names = self.model.parameterization.sampled_params()
         params = [params_names[i] for i in sorted(list(set(inf[0])))]
         raise LoggedError(
             self.log,
             "PolyChord needs bounded priors, but the parameter(s) '"
             "', '".join(params) + "' is(are) unbounded.")
     locs = bounds[:, 0]
     scales = bounds[:, 1] - bounds[:, 0]
     # This function re-scales the parameters AND puts them in the right order
     self.pc_prior = lambda x: (locs + np.array(x)[self.ordering] * scales
                                ).tolist()
     # We will need the volume of the prior domain, since PolyChord divides by it
     self.logvolume = np.log(np.prod(scales))
     # Prepare callback function
     if self.callback_function is not None:
         self.callback_function_callable = (get_external_function(
             self.callback_function))
     self.last_point_callback = 0
     # Prepare runtime live and dead points collections
     self.live = Collection(self.model,
                            None,
                            name="live",
                            initial_size=self.pc_settings.nlive)
     self.dead = Collection(self.model, self.output, name="dead")
     # Done!
     if is_main_process():
         self.log.debug("Calling PolyChord with arguments:")
         for p, v in inspect.getmembers(self.pc_settings,
                                        lambda a: not (callable(a))):
             if not p.startswith("_"):
                 self.log.debug("  %s: %s", p, v)
     self.mpi_info("Initialized!")
Exemplo n.º 8
0
    def process_results(self):
        """
        Determines success (or not), chooses best (if MPI)
        and produces output (if requested).
        """
        evals_attr_ = evals_attr[self.method.lower()]
        # If something failed
        if not hasattr(self, "result"):
            return
        if more_than_one_process():
            results = get_mpi_comm().gather(self.result, root=0)
            successes = get_mpi_comm().gather(self.success, root=0)
            _affine_transform_baselines = get_mpi_comm().gather(
                self._affine_transform_baseline, root=0)
            if is_main_process():
                mins = [(getattr(r, evals_attr_) if s else np.inf)
                        for r, s in zip(results, successes)]
                i_min = np.argmin(mins)
                self.result = results[i_min]
                self._affine_transform_baseline = _affine_transform_baselines[
                    i_min]
        else:
            successes = [self.success]
        if is_main_process():
            if not any(successes):
                raise LoggedError(
                    self.log,
                    "Minimization failed! Here is the raw result object:\n%s",
                    str(self.result))
            elif not all(successes):
                self.log.warning('Some minimizations failed!')
            elif more_than_one_process():
                if max(mins) - min(mins) > 1:
                    self.log.warning('Big spread in minima: %r', mins)
                elif max(mins) - min(mins) > 0.2:
                    self.log.warning('Modest spread in minima: %r', mins)

            logp_min = -np.array(getattr(self.result, evals_attr_))
            x_min = self.inv_affine_transform(self.result.x)
            self.log.info("-log(%s) minimized to %g",
                          "likelihood" if self.ignore_prior else "posterior",
                          -logp_min)
            recomputed_post_min = self.model.logposterior(x_min, cached=False)
            recomputed_logp_min = (sum(recomputed_post_min.loglikes)
                                   if self.ignore_prior else
                                   recomputed_post_min.logpost)
            if not np.allclose(logp_min, recomputed_logp_min, atol=1e-2):
                raise LoggedError(
                    self.log,
                    "Cannot reproduce log minimum to within 0.01. Maybe your "
                    "likelihood is stochastic or large numerical error? "
                    "Recomputed min: %g (was %g) at %r", recomputed_logp_min,
                    logp_min, x_min)
            self.minimum = OnePoint(self.model,
                                    self.output,
                                    name="",
                                    extension=get_collection_extension(
                                        self.ignore_prior))
            self.minimum.add(x_min,
                             derived=recomputed_post_min.derived,
                             logpost=recomputed_post_min.logpost,
                             logpriors=recomputed_post_min.logpriors,
                             loglikes=recomputed_post_min.loglikes)
            self.log.info("Parameter values at minimum:\n%s",
                          self.minimum.data.to_string())
            self.minimum.out_update()
            self.dump_getdist()
        # Share results ('result' object may not be picklable)
        self.minimum = share_mpi(getattr(self, "minimum", None))
        self._inv_affine_transform_matrix = share_mpi(
            getattr(self, "_inv_affine_transform_matrix"))
        self._affine_transform_baseline = share_mpi(
            getattr(self, "_affine_transform_baseline"))
        try:
            self.result = share_mpi(getattr(self, "result"))
        except:
            self.result = None
Exemplo n.º 9
0
 def tmpdir():
     return mpi.share_mpi()
Exemplo n.º 10
0
 def tmpdir(tmpdir):
     return mpi.share_mpi(str(tmpdir))
Exemplo n.º 11
0
 def check_convergence_and_learn_proposal(self):
     """
     Checks the convergence of the sampling process, and, if requested,
     learns a new covariance matrix for the proposal distribution from the covariance
     of the last samples.
     """
     if more_than_one_process():
         # Compute and gather means, covs and CL intervals of last half of chains
         use_first = int(self.n() / 2)
         mean = self.collection.mean(first=use_first)
         cov = self.collection.cov(first=use_first)
         mcsamples = self.collection._sampled_to_getdist_mcsamples(
             first=use_first)
         try:
             bound = np.array([[
                 mcsamples.confidence(i,
                                      limfrac=self.Rminus1_cl_level / 2.,
                                      upper=which)
                 for i in range(self.model.prior.d())
             ] for which in [False, True]]).T
             success_bounds = True
         except:
             bound = None
             success_bounds = False
         Ns, means, covs, bounds, acceptance_rates = map(
             lambda x: np.array(get_mpi_comm().gather(x)),
             [self.n(), mean, cov, bound, self.acceptance_rate])
     else:
         # Compute and gather means, covs and CL intervals of last m-1 chain fractions
         m = 1 + self.Rminus1_single_split
         cut = int(len(self.collection) / m)
         try:
             Ns = (m - 1) * [cut]
             means = np.array([
                 self.collection.mean(first=i * cut, last=(i + 1) * cut - 1)
                 for i in range(1, m)
             ])
             covs = np.array([
                 self.collection.cov(first=i * cut, last=(i + 1) * cut - 1)
                 for i in range(1, m)
             ])
             mcsamples_list = [
                 self.collection._sampled_to_getdist_mcsamples(
                     first=i * cut, last=(i + 1) * cut - 1)
                 for i in range(1, m)
             ]
         except:
             self.log.info(
                 "Not enough points in chain to check convergence. "
                 "Waiting for next checkpoint.")
             return
         acceptance_rates = self.acceptance_rate
         try:
             bounds = [
                 np.array([[
                     mcs.confidence(i,
                                    limfrac=self.Rminus1_cl_level / 2.,
                                    upper=which)
                     for i in range(self.model.prior.d())
                 ] for which in [False, True]]).T for mcs in mcsamples_list
             ]
             success_bounds = True
         except:
             bounds = None
             success_bounds = False
     # Compute convergence diagnostics
     if is_main_process():
         self.progress.at[self.i_learn,
                          "N"] = (sum(Ns)
                                  if more_than_one_process() else self.n())
         self.progress.at[self.i_learn, "timestamp"] = \
             datetime.datetime.now().isoformat()
         acceptance_rate = (np.average(acceptance_rates, weights=Ns) if
                            more_than_one_process() else acceptance_rates)
         self.log.info(
             " - Acceptance rate: %.3f" +
             (" = avg(%r)" %
              list(acceptance_rates) if more_than_one_process() else ""),
             acceptance_rate)
         self.progress.at[self.i_learn, "acceptance_rate"] = acceptance_rate
         # "Within" or "W" term -- our "units" for assessing convergence
         # and our prospective new covariance matrix
         mean_of_covs = np.average(covs, weights=Ns, axis=0)
         # "Between" or "B" term
         # We don't weight with the number of samples in the chains here:
         # shorter chains will likely be outliers, and we want to notice them
         cov_of_means = np.atleast_2d(np.cov(means.T))  # , fweights=Ns)
         # For numerical stability, we turn mean_of_covs into correlation matrix:
         #   rho = (diag(Sigma))^(-1/2) * Sigma * (diag(Sigma))^(-1/2)
         # and apply the same transformation to the mean of covs (same eigenvals!)
         diagSinvsqrt = np.diag(np.power(np.diag(cov_of_means), -0.5))
         corr_of_means = diagSinvsqrt.dot(cov_of_means).dot(diagSinvsqrt)
         norm_mean_of_covs = diagSinvsqrt.dot(mean_of_covs).dot(
             diagSinvsqrt)
         success = False
         # Cholesky of (normalized) mean of covs and eigvals of Linv*cov_of_means*L
         try:
             L = np.linalg.cholesky(norm_mean_of_covs)
         except np.linalg.LinAlgError:
             self.log.warning(
                 "Negative covariance eigenvectors. "
                 "This may mean that the covariance of the samples does not "
                 "contain enough information at this point. "
                 "Skipping learning a new covmat for now.")
         else:
             Linv = np.linalg.inv(L)
             # Suppress numpy warnings (restored later in this function)
             error_handling = deepcopy(np.geterr())
             np.seterr(all="ignore")
             try:
                 eigvals = np.linalg.eigvalsh(
                     Linv.dot(corr_of_means).dot(Linv.T))
                 success = True
             except np.linalg.LinAlgError:
                 self.log.warning("Could not compute eigenvalues. "
                                  "Skipping learning a new covmat for now.")
             else:
                 Rminus1 = max(np.abs(eigvals))
                 self.progress.at[self.i_learn, "Rminus1"] = Rminus1
                 # For real square matrices, a possible def of the cond number is:
                 condition_number = Rminus1 / min(np.abs(eigvals))
                 self.log.debug(" - Condition number = %g",
                                condition_number)
                 self.log.debug(" - Eigenvalues = %r", eigvals)
                 self.log.info(
                     " - Convergence of means: R-1 = %f after %d accepted steps"
                     %
                     (Rminus1,
                      (sum(Ns) if more_than_one_process() else self.n())) +
                     (" = sum(%r)" %
                      list(Ns) if more_than_one_process() else ""))
                 # Have we converged in means?
                 # (criterion must be fulfilled twice in a row)
                 if max(Rminus1, self.Rminus1_last) < self.Rminus1_stop:
                     # Check the convergence of the bounds of the confidence intervals
                     # Same as R-1, but with the rms deviation from the mean bound
                     # in units of the mean standard deviation of the chains
                     if success_bounds:
                         Rminus1_cl = (np.std(bounds, axis=0).T /
                                       np.sqrt(np.diag(mean_of_covs)))
                         self.log.debug(
                             " - normalized std's of bounds = %r",
                             Rminus1_cl)
                         Rminus1_cl = np.max(Rminus1_cl)
                         self.progress.at[self.i_learn,
                                          "Rminus1_cl"] = Rminus1_cl
                         self.log.info(
                             " - Convergence of bounds: R-1 = %f after %d "
                             % (Rminus1_cl,
                                (sum(Ns) if more_than_one_process(
                                ) else self.n())) + "accepted steps" +
                             (" = sum(%r)" %
                              list(Ns) if more_than_one_process() else ""))
                         if Rminus1_cl < self.Rminus1_cl_stop:
                             self.converged = True
                             self.log.info("The run has converged!")
                         self._Ns = Ns
                     else:
                         self.log.info(
                             "Computation of the bounds was not possible. "
                             "Waiting until the next converge check.")
             np.seterr(**error_handling)
     else:
         mean_of_covs = np.empty(
             (self.model.prior.d(), self.model.prior.d()))
         success = None
         Rminus1 = None
     # Broadcast and save the convergence status and the last R-1 of means
     success = share_mpi(success)
     if success:
         self.Rminus1_last, self.converged = share_mpi((
             Rminus1, self.converged) if is_main_process() else None)
         # Do we want to learn a better proposal pdf?
         if self.learn_proposal and not self.converged:
             good_Rminus1 = (self.learn_proposal_Rminus1_max >
                             self.Rminus1_last >
                             self.learn_proposal_Rminus1_min)
             if not good_Rminus1:
                 self.mpi_info(
                     "Convergence less than requested for updates: "
                     "waiting until the next convergence check.")
                 return
             if more_than_one_process():
                 get_mpi_comm().Bcast(mean_of_covs, root=0)
             else:
                 mean_of_covs = covs[0]
             try:
                 self.proposer.set_covariance(mean_of_covs)
                 if is_main_process():
                     self.log.info(
                         " - Updated covariance matrix of proposal pdf.")
                     self.log.debug("%r", mean_of_covs)
             except:
                 if is_main_process():
                     self.log.debug(
                         "Updating covariance matrix failed unexpectedly. "
                         "waiting until next covmat learning attempt.")
     # Save checkpoint info
     self.write_checkpoint()
Exemplo n.º 12
0
def load_input_MPI(input_file):
    return share_mpi(load_input(input_file) if is_main_process() else None)
Exemplo n.º 13
0
 def set_resuming(self, *args, **kwargs):
     if is_main_process():
         Output.set_resuming(self, *args, **kwargs)
     if more_than_one_process():
         self._resuming = share_mpi(
             self._resuming if is_main_process() else None)
Exemplo n.º 14
0
 def check_and_dump_info(self, *args, **kwargs):
     if is_main_process():
         Output.check_and_dump_info(self, *args, **kwargs)
     # Share cached loaded info
     self._old_updated_info = share_mpi(
         getattr(self, "_old_updated_info", None))