def __init__(self, model, nwalkers, ntemps=None, Tmax=None, betas=None, adaptive=False, adaptation_lag=None, adaptation_time=None, scale_factor=None, loglikelihood_function=None, checkpoint_interval=None, checkpoint_signal=None, nprocesses=1, use_mpi=False): self.model = model ndim = len(model.variable_params) # create temperature ladder if needed if ntemps is None and Tmax is None and betas is None: raise ValueError("must provide either ntemps/Tmax or betas") if betas is None: betas = ptemcee.make_ladder(ndim, ntemps=ntemps, Tmax=Tmax) # construct the keyword arguments to pass; if a kwarg is None, we # won't pass it, resulting in ptemcee's defaults being used kwargs = {} kwargs['adaptive'] = adaptive kwargs['betas'] = betas if adaptation_lag is not None: kwargs['adaptation_lag'] = adaptation_lag if adaptation_time is not None: kwargs['adaptation_time'] = adaptation_time if scale_factor is not None: kwargs['scale_factor'] = scale_factor # create a wrapper for calling the model if loglikelihood_function is None: loglikelihood_function = 'loglikelihood' # frustratingly, ptemcee does not support blob data, so we have to # turn it off model_call = models.CallModel(model, loglikelihood_function, return_all_stats=False) # these are used to help paralleize over multiple cores / MPI models._global_instance = model_call model_call = models._call_global_model prior_call = models._call_global_model_logprior self.pool = choose_pool(mpi=use_mpi, processes=nprocesses) # construct the sampler self._sampler = ptemcee.Sampler(nwalkers=nwalkers, ndim=ndim, logl=model_call, logp=prior_call, mapper=self.pool.map, **kwargs) self.nwalkers = nwalkers self._ntemps = ntemps self._checkpoint_interval = checkpoint_interval self._checkpoint_signal = checkpoint_signal # we'll initialize ensemble and chain to None self._chain = None self._ensemble = None
def samplePtemcee(t, y, ye, mup, sigp, Tmax, nwalkers=100, nsteps=1000, nburn=None, ntemps=21, sampleFile=None, maxTemp=np.inf): ndim = len(mup) ndata = len(t) if nburn is None: nburn = nsteps // 4 doTheSampling = True betas = None if sampleFile is not None: with h5.File(sampleFile, "a") as f: if ('ptemcee/chain' in f and 'ptemcee/lnprobability' in f and 'ptemcee/lnlikelihood' in f and 'ptemcee/betas' in f): chain = f['ptemcee/chain'][...] lnprobability = f['ptemcee/lnprobability'][...] lnlikelihood = f['ptemcee/lnlikelihood'][...] betas = f['ptemcee/betas'][...] try: assert chain.shape == (ntemps, nwalkers, nsteps, ndim) assert lnprobability.shape == (ntemps, nwalkers, nsteps) assert lnlikelihood.shape == (ntemps, nwalkers, nsteps) assert betas.shape == (ntemps, ) chain = chain samps = chain[0].reshape((-1, ndim)) lnprobs = lnprobability[0].reshape((-1, )) lnlikes = lnlikelihood doTheSampling = False except AssertionError: pass if doTheSampling: if betas is None: betas = ptemcee.make_ladder(ndim, ntemps, maxTemp) sampler = ptemcee.Sampler(nwalkers, ndim, loglike, logprior, logl_args=(t, y, ye), logp_args=(mup, sigp), betas=betas, adaptive=True) p0 = mup[None, None, :] + np.random.normal(0.0, 1.0e-4, (ntemps, nwalkers, ndim)) if nburn > 0: for i, result in enumerate( sampler.sample(p0, iterations=nburn, storechain=False)): print("Burn in {0:d} steps: {1:.1f}%".format( nburn, 100 * (i + 1) / nburn), end='\r') print('') sampler.reset() else: result = (p0, ) for i, result in enumerate( sampler.sample(*result, iterations=nsteps, storechain=True)): print("Sampling {0:d} steps: {1:.1f}%".format( nsteps, 100 * (i + 1) / nsteps), end='\r') print('') chain = sampler.chain samps = sampler.flatchain[0] lnprobs = sampler.lnprobability[0].reshape((-1, )) lnlikes = sampler.lnlikelihood betas = sampler.betas if sampleFile is not None: f = h5.File(sampleFile, 'a') if 'ptemcee/chain' in f: f['ptemcee/chain'].resize(sampler.chain.shape) f['ptemcee/chain'][...] = sampler.chain[...] else: f.create_dataset('ptemcee/chain', data=sampler.chain, maxshape=(None, None, None, None)) if 'ptemcee/lnprobability' in f: f['ptemcee/lnprobability'].resize(sampler.lnprobability.shape) f['ptemcee/lnprobability'][...] = sampler.lnprobability[...] else: f.create_dataset('ptemcee/lnprobability', data=sampler.lnprobability, maxshape=(None, None, None)) if 'ptemcee/lnlikelihood' in f: f['ptemcee/lnlikelihood'].resize(sampler.lnlikelihood.shape) f['ptemcee/lnlikelihood'][...] = sampler.lnlikelihood[...] else: f.create_dataset('ptemcee/lnlikelihood', data=sampler.lnlikelihood, maxshape=(None, None, None)) if 'ptemcee/betas' in f: f['ptemcee/betas'].resize(betas.shape) f['ptemcee/betas'][...] = betas[...] else: f.create_dataset('ptemcee/betas', data=betas, maxshape=(None, )) f.close() labels = ['C{0:01d}'.format(i) for i in range(ndim)] fig = corner.corner(samps, labels=labels) figname = "emceePT_corner.png" print("Saving", figname) fig.savefig(figname) plt.close(fig) # for k in range(ntemps): for k in [0, ntemps - 1]: for i in range(ndim): fig, ax = plt.subplots(1, 1, figsize=(8, 4)) for j in range(nwalkers): ax.plot(chain[k, j, :, i], alpha=2.0 / nwalkers, color='k') ax.set_xlabel('# Iterations') ax.set_ylabel(labels[i]) figname = "emceePT_trace_T{0:01d}_{1:s}.png".format(k, labels[i]) print("Saving", figname) fig.savefig(figname) plt.close(fig) imap = lnprobs.argmax() taus = autocorr.integrated_time(chain, timeAxis=2, walkerAxis=1) lnlike_taus = autocorr.integrated_time(lnlikes, timeAxis=2, walkerAxis=1) print("emceePT AutoCorrTau:", taus) print("emceePT AutoCorrTau logLike:", lnlike_taus) lnlike_adj = lnlikes - lnlikes.mean(axis=(1, 2), keepdims=True) lnlike_var = (lnlike_adj * lnlike_adj).mean(axis=(1, 2)) xmap = samps[imap] means = samps.mean(axis=0) diffs = samps - means cov = (diffs[:, :, None] * diffs[:, None, :]).mean(axis=0) avglnl = lnlikes.mean(axis=(1, 2))[::-1] avglnl_err = np.sqrt(lnlike_taus / (nsteps * nwalkers) * lnlike_var)[::-1] betas = betas[::-1] if betas[0] > 0.0: betas = np.concatenate(([0.0], betas)) avglnl = np.concatenate(([avglnl[0]], avglnl)) avglnl_err = np.concatenate(([avglnl_err[0]], avglnl_err)) return xmap, means, cov, samps, lnprobs, avglnl, betas, avglnl_err