Exemplo n.º 1
0
def test_std(logp=logp, seed=42):
    ndim = np.random.randint(2, 5)
    nwalkers = 2 * ndim
    nsteps = np.random.randint(3000, 5000)
    sampler = zeus.sampler(logp, nwalkers, ndim, verbose=False)
    start = np.random.rand(nwalkers, ndim)
    sampler.run(start, nsteps)
    assert np.all(np.abs(np.std(sampler.flatten(), axis=0) - 1.0) < 0.1)
Exemplo n.º 2
0
def test_mean(logp=logp, seed=42):
    np.random.seed(seed)
    ndim = np.random.randint(2, 5)
    nwalkers = 2 * ndim
    nsteps = np.random.randint(3000, 5000)
    sampler = zeus.sampler(nwalkers, ndim, logp, verbose=False)
    start = np.random.rand(nwalkers, ndim)
    sampler.run(start, nsteps)
    assert np.all(np.abs(np.mean(sampler.flatten(), axis=0) - 1.0) < 0.1)
    assert np.all(np.isfinite(sampler.get_log_prob(flat=True)))
    assert np.all(np.isfinite(sampler.get_log_prob()))
Exemplo n.º 3
0
def test_ncall(seed=42):
    np.random.seed(seed)

    def loglike(theta):
        assert len(theta) == 5
        a = theta[:-1]
        b = theta[1:]
        loglike.ncalls += 1
        return -2 * (100 * (b - a**2)**2 + (1 - a)**2).sum()

    loglike.ncalls = 0

    ndim = 5
    nsteps = 100
    nwalkers = 2 * ndim
    sampler = zeus.sampler(nwalkers, ndim, loglike, verbose=False)
    start = np.random.rand(nwalkers, ndim)
    sampler.run(start, nsteps)

    assert loglike.ncalls == sampler.ncall + nwalkers
Exemplo n.º 4
0
 def run_slice_ensemble(self):
     self.sampler = zeus.sampler(self.nwalker, self.ndim, self.model.lnprob)
     self.sampler.run_mcmc(self.p0, self.nstep)
Exemplo n.º 5
0
def initialise_sampler():
    """Initialise the likelihood sampler.

    Returns
    -------
    likelihood : :class:`lumfunc_likelihood.LumFuncLikelihood`
        Logarithmic likelihood.
    prior_ranges : :class:`numpy.ndarray`
        Parameter-space boundaries.
    mcmc_sampler : :class:`emcee.EnsembleSampler` or :class:`zeus.sampler`
        Markov chain Monte Carlo sampler.
    initial_state : :class:`numpy.ndarray`
        Initial parameter-space state.
    dimension : int
        Dimension of the parameter space.

    """
    # Set up likelihood and prior.
    lumfunc_model = getattr(modeller, prog_params.model_name + '_lumfunc')

    measurement_file, uncertainty_file = prog_params.data_files

    fixed_file = PATHIN/prog_params.fixed_file \
        if prog_params.fixed_file else None

    model_constraint = getattr(modeller, prog_params.model_name +
                               '_constraint',
                               None) if prog_params.use_constraint else None

    likelihood = LumFuncLikelihood(lumfunc_model,
                                   PATHEXT / measurement_file,
                                   PATHIN / prog_params.prior_file,
                                   uncertainty_file=PATHEXT / uncertainty_file,
                                   fixed_file=fixed_file,
                                   prescription=prog_params.prescription,
                                   model_constraint=model_constraint)

    logger.info("\n---Prior parameters---\n%s\n",
                pformat(dict(likelihood.prior.items())))
    if likelihood.fixed:
        logger.info("\n---Fixed parameters---\n%s\n",
                    pformat(dict(likelihood.fixed.items())))

    dimension = len(likelihood.prior)
    _prior_ranges = np.array(list(likelihood.prior.values()))

    if prog_params.task == 'retrieve':
        return likelihood, _prior_ranges, dimension

    # Set up the sampler.
    if prog_params.sampler == 'emcee':
        output_file = (PATHOUT / prog_params.chain_file).with_suffix('.h5')

        backend = mc.backends.HDFBackend(output_file)
        if prog_params.task == 'sample':
            backend.reset(prog_params.nwalkers, dimension)

        mcmc_sampler = mc.EnsembleSampler(
            prog_params.nwalkers,
            dimension,
            likelihood,
            kwargs={'use_prior': prog_params.use_prior},
            backend=backend,
            pool=pool)
    elif prog_params.sampler == 'zeus':
        if prog_params.jump:
            proposal = {
                'differential': 0.85,
                'gaussian': 0.0,
                'jump': 0.15,
                'random': 0.0,
            }
        else:
            proposal = {
                'differential': 1.0,
                'gaussian': 0.0,
                'jump': 0.0,
                'random': 0.0,
            }

        mcmc_sampler = zeus.sampler(
            likelihood,
            prog_params.nwalkers,
            dimension,
            proposal=proposal,
            pool=pool,
            kwargs={'use_prior': prog_params.use_prior})

    # Set up the initial state.
    def _initialise_state():
        if model_constraint:
            _ini_pos = []
            while len(_ini_pos) < prog_params.nwalkers:
                criterion = False
                while not criterion:
                    pos = np.random.uniform(_prior_ranges[:, 0],
                                            _prior_ranges[:, -1])
                    criterion = model_constraint(
                        dict(zip(likelihood.prior.keys(), pos)))
                _ini_pos.append(pos)
        else:
            _ini_pos = np.random.uniform(_prior_ranges[:, 0],
                                         _prior_ranges[:, -1],
                                         size=(prog_params.nwalkers,
                                               dimension))
        return np.asarray(_ini_pos)

    if prog_params.task == "resume":
        initial_state = None
    else:
        initial_state = _initialise_state()

        logger.info(
            "\n---Starting positions (walkers, parameters)---\n%s\n%s...\n",
            pformat(list(likelihood.prior.keys()), width=79,
                    compact=True).lstrip("{").rstrip("}"),
            pformat(
                np.array2string(
                    initial_state[::(prog_params.nwalkers // 10), :],
                    formatter={'float_kind': '{:.2f}'.format
                               })).replace("(", "").replace(")", "").replace(
                                   "' ", "").replace("'",
                                                     "").replace("\\n", ""))

    return mcmc_sampler, initial_state, dimension
Exemplo n.º 6
0
def main(
    completeness, max_mag_syn, obs_clust, ext_coefs, st_dist_mass, N_fc,
    err_pars, m_ini_idx, binar_flag, lkl_method, fundam_params, theor_tracks,
    R_V, nsteps_mcee, nwalkers_mcee, nburn_mcee, priors_mcee, emcee_moves,
        hmax, **kwargs):
    """
    """
    varIdxs, ndim, ranges = varPars(fundam_params)
    # Pack synthetic cluster arguments.
    synthcl_args = [
        theor_tracks, completeness, max_mag_syn, st_dist_mass, R_V, ext_coefs,
        N_fc, err_pars, m_ini_idx, binar_flag]

    # # Start timing.
    # max_secs = hmax * 60. * 60.
    # available_secs = max(30, max_secs)

    ntemps = 1
    pos0 = initPop(
        ranges, varIdxs, lkl_method, obs_clust, fundam_params, synthcl_args,
        ntemps, nwalkers_mcee, 'random', None, None)
    pos0 = pos0[0]

    sampler = zeus.sampler(nwalkers_mcee, ndim, log_posterior, args=[
        priors_mcee, varIdxs, ranges, fundam_params, synthcl_args, lkl_method,
        obs_clust])

    elapsed, start = 0., t.time()
    sampler.run_mcmc(pos0, nsteps_mcee)

    print(sampler.summary)

    maf_steps, prob_mean, tau_autocorr = [], [], []
    map_lkl, map_sol_old = [], [[], -np.inf]

    # We'll track how the average autocorrelation time estimate changes.
    # This will be useful to testing convergence.
    # tau_index, autocorr_vals, old_tau = 0, np.empty(nsteps), np.inf

    # with warnings.catch_warnings():
    #     warnings.simplefilter("ignore")

    #     N_steps_store, runs = 50, 0

    #     elapsed, start = 0., t.time()
    #     milestones = list(range(10, 101, 10))
    #     for i, (pos, prob, stat) in enumerate(
    #             sampler.sample(pos0, iterations=nsteps_mcee)):

    #         # Only check convergence every 'N_steps_store' steps
    #         if (i + 1) % N_steps_store:
    #             continue
    #         runs += 1

    #         # Compute the autocorrelation time so far. Using tol=0 means that
    #         # we'll always get an estimate even if it isn't trustworthy.
    #         tau = sampler.get_autocorr_time(tol=0)
    #         tau_autocorr.append([i, np.mean(tau)])

    #         # # Check convergence
    #         # converged = np.all(tau * N_conv < (i + 1))
    #         # converged &= np.all(np.abs(old_tau - tau) / tau < tol_conv)
    #         # autocorr_vals[tau_index] = np.nanmean(tau)
    #         # tau_index += 1

    #         maf = np.mean(sampler.acceptance_fraction)
    #         maf_steps.append(maf)

    #         # Store MAP solution in this iteration.
    #         prob_mean.append(np.mean(prob))
    #         idx_best = np.argmax(prob)
    #         # Update if a new optimal solution was found.
    #         if prob[idx_best] > map_sol_old[1]:
    #             map_sol_old = [
    #                 fillParams(fundam_params, varIdxs, pos[idx_best]),
    #                 prob[idx_best]]
    #         map_lkl.append(map_sol_old[1])

    #         # Time used to check how fast the sampler is advancing.
    #         elapsed += t.time() - start
    #         start = t.time()
    #         # Print progress.
    #         percentage_complete = (100. * (i + 1) / nsteps_mcee)
    #         if len(milestones) > 0 and percentage_complete >= milestones[0]:
    #             map_sol, logprob = map_sol_old
    #             m, s = divmod(nsteps_mcee / (i / elapsed) - elapsed, 60)
    #             h, m = divmod(m, 60)
    #             print("{:>3}% ({:.3f}) LP={:.1f} ({:.5f}, {:.3f}, {:.3f}, "
    #                   "{:.2f}, {:.0f}, {:.2f})".format(
    #                       milestones[0], maf, logprob, *map_sol) +
    #                   " [{:.0f} m/s | {:.0f}h{:.0f}m]".format(
    #                       (ntemps * nwalkers_mcee * i) / elapsed, h, m))
    #             milestones = milestones[1:]

    #         # Stop when available time is consumed.
    #         if elapsed >= available_secs:
    #             print("  Time consumed")
    #             break

    elapsed += t.time() - start

    # Total number of steps
    # N_steps = N_steps_store * np.arange(1, runs + 1)
    N_steps = 1 * np.arange(1, nsteps_mcee + 1)

    # Mean acceptance fractions for all replicas.
    maf_allT = np.array([])
    # Temperature swaps acceptance fractions.
    tswaps_afs = np.array([])
    # Betas history
    betas_pt = np.array([])

    # Final MAP fit.
    map_sol, map_lkl_final = (0., 0., 0., 0., 0., 0.), 0.  # map_sol_old

    # # This number should be between approximately 0.25 and 0.5 if everything
    # # went as planned.
    # m_accpt_fr = np.mean(sampler.acceptance_fraction)
    # if m_accpt_fr > .5 or m_accpt_fr < .25:
    #     print("  WARNING: mean acceptance fraction is outside of the\n"
    #           "  recommended range.")

    # all_chains.shape = (N_steps_store * runs, nchains, ndims)
    all_chains = sampler.get_chain()
    # Store burn-in chain phase.
    bi_steps = int(nburn_mcee * all_chains.shape[0])
    # chains_nruns.shape: (bi_steps, nchains, ndim)
    chains_nruns = all_chains[:bi_steps]
    # pars_chains_bi.shape: (ndim, nchains, bi_steps)
    pars_chains_bi = chains_nruns.T

    # After burn-in
    chains_nruns = all_chains[bi_steps:]
    pars_chains = chains_nruns.T

    # Convergence parameters.
    tau_autocorr = np.array(tau_autocorr).T
    _, acorr_t, med_at_c, all_taus, geweke_z, acorr_function,\
        mcmc_ess = convergenceVals(
            'ptemcee', ndim, varIdxs, chains_nruns, bi_steps)

    # Re-shape trace for all parameters (flat chain).
    # Shape: (ndim, runs * nchains)
    mcmc_trace = chains_nruns.reshape(-1, ndim).T

    param_r2 = r2Dist(fundam_params, varIdxs, mcmc_trace)
    mode_sol, pardist_kde = modeKDE(fundam_params, varIdxs, mcmc_trace)

    # Mean and median.
    mean_sol = np.mean(mcmc_trace, axis=1)
    median_sol = np.median(mcmc_trace, axis=1)

    # Fill the spaces of the parameters not fitted with their fixed values.
    mean_sol = fillParams(fundam_params, varIdxs, mean_sol)
    mode_sol = fillParams(fundam_params, varIdxs, mode_sol)
    median_sol = fillParams(fundam_params, varIdxs, median_sol)

    # Total number of values used to estimate the parameter's distributions.
    N_total = mcmc_trace.shape[-1]

    isoch_fit_params = {
        'varIdxs': varIdxs, 'mean_sol': mean_sol, 'Tmax': "none",
        'median_sol': median_sol, 'map_sol': map_sol, 'map_lkl': map_lkl,
        'mode_sol': mode_sol, 'pardist_kde': pardist_kde, 'param_r2': param_r2,
        'map_lkl_final': map_lkl_final, 'prob_mean': prob_mean,
        'bf_elapsed': elapsed, 'mcmc_trace': mcmc_trace,
        'pars_chains_bi': pars_chains_bi, 'pars_chains': pars_chains,
        'maf_allT': maf_allT, 'tswaps_afs': tswaps_afs, 'betas_pt': betas_pt,
        #
        'tau_autocorr': tau_autocorr, 'acorr_t': acorr_t, 'med_at_c': med_at_c,
        'all_taus': all_taus, 'acorr_function': acorr_function,
        # 'max_at_c': max_at_c, 'min_at_c': min_at_c,
        # 'minESS': minESS, 'mESS': mESS, 'mESS_epsilon': mESS_epsilon,
        'geweke_z': geweke_z, 'mcmc_ess': mcmc_ess, 'N_total': N_total,
        'N_steps': N_steps
    }

    return isoch_fit_params
Exemplo n.º 7
0
    def sample(self, quiet=False):

        if not self._is_setup:

            log.info("You forgot to setup the sampler!")
            return

        loud = not quiet

        self._update_free_parameters()

        n_dim = len(list(self._free_parameters.keys()))

        # Get starting point

        p0 = self._get_starting_points(self._n_walkers)

        # Deactivate memoization in astromodels, which is useless in this case since we will never use twice the
        # same set of parameters
        with use_astromodels_memoization(False):

            if using_mpi:

                with MPIPoolExecutor() as executor:

                    sampler = zeus.sampler(
                        logprob_fn=self.get_posterior,
                        nwalkers=self._n_walkers,
                        ndim=n_dim,
                        pool=executor,
                    )

                    # if self._seed is not None:

                    #     sampler._random.seed(self._seed)

                    # Run the true sampling
                    log.debug("Start zeus run")
                    _ = sampler.run(
                        p0,
                        self._n_iterations + self._n_burn_in,
                        progress=loud,
                    )
                    log.debug("Zeus run done")

            elif threeML_config["parallel"]["use_parallel"]:

                c = ParallelClient()
                view = c[:]

                sampler = zeus.sampler(
                    logprob_fn=self.get_posterior,
                    nwalkers=self._n_walkers,
                    ndim=n_dim,
                    pool=view,
                )

            else:

                sampler = zeus.sampler(logprob_fn=self.get_posterior,
                                       nwalkers=self._n_walkers,
                                       ndim=n_dim)

            # If a seed is provided, set the random number seed
            # if self._seed is not None:

            #     sampler._random.seed(self._seed)

            # Sample the burn-in
            if not using_mpi:
                log.debug("Start zeus run")
                _ = sampler.run(p0,
                                self._n_iterations + self._n_burn_in,
                                progress=loud)
                log.debug("Zeus run done")

        self._sampler = sampler
        self._raw_samples = sampler.get_chain(flat=True,
                                              discard=self._n_burn_in)

        # Compute the corresponding values of the likelihood

        # First we need the prior
        log_prior = np.array([self._log_prior(x) for x in self._raw_samples])
        self._log_probability_values = sampler.get_log_prob(
            flat=True, discard=self._n_burn_in)

        # np.array(
        #     [self.get_posterior(x) for x in self._raw_samples]
        # )

        # Now we get the log posterior and we remove the log prior

        self._log_like_values = self._log_probability_values - log_prior

        # we also want to store the log probability

        self._marginal_likelihood = None

        self._build_samples_dictionary()

        self._build_results()

        # Display results
        if loud:
            print(self._sampler.summary)
            self._results.display()

        return self.samples
Exemplo n.º 8
0
plt.colorbar()

icov = np.linalg.inv(C)

mu = np.random.rand(ndim) * 100.0


def logp(x, mu, icov):
    return -0.5 * np.dot(np.dot((x - mu).T, icov), (x - mu))


start = np.random.rand(ndim)

# In[3]:

sampler = zeus.sampler(logp, nwalkers, ndim, args=[mu, icov])
sampler.run(start, nsteps)

# In[4]:

plt.figure(figsize=(16, 1.5 * ndim))
for n in range(ndim):
    plt.subplot2grid((ndim, 1), (n, 0))
    plt.plot(np.arange(np.shape(sampler.chain)[1]),
             sampler.chain[:, :, n].T,
             alpha=0.5)
    plt.axhline(y=mu[n])
plt.tight_layout()
plt.show()

trace = sampler.flatten()