Beispiel #1
0
    def _sample_L(self, L, u):
        """
        Sample the next L parameter given the previous setting. We use the
        standard Metropolis-Hastings implementation in `emcee` to get the next
        sample.

        L: The previous setting of the L parameter.
        u: The current setting of the u parameter.

        Returns a tuple consisting of the newly sampled L parameter and its
        posterior probability.
        """
        Nu = self.Nu
        data = self.data

        def log_L_prob(Lp):
            Lpm = np.zeros(L.shape)
            Lpm[np.tril_indices(L.shape[0])] = Lp
            log_prior = np.sum(-0.5 * Lp**2 / self.parameters['L_PRIOR_VAR'])

            return self._log_data_likelihood(u, Lpm) + log_prior

        dim = int((L.shape[0]**2 + L.shape[0]) / 2)
        scale = self.parameters['MH_L_SCALE']
        sampler = emcee.MHSampler(np.eye(dim) * scale,
                                  dim=dim,
                                  lnprobfn=log_L_prob)
        Lp, _, _ = sampler.run_mcmc(L[np.tril_indices(L.shape[0])], 1)
        Lpm = np.zeros(L.shape)
        Lpm[np.tril_indices(L.shape[0])] = Lp

        return Lpm, log_L_prob(Lp)
Beispiel #2
0
def _worker(args):
    i, outfn, nsteps = args

    pid = os.getpid()
    _random = _rngs.get(pid,
                        np.random.RandomState(int(int(pid) + time.time())))
    _rngs[pid] = _random

    ndim = int(np.ceil(2**(7 * _random.rand())))
    nwalkers = 2 * ndim + 2
    # nwalkers += nwalkers % 2
    print(ndim, nwalkers)

    cov = random_cov(ndim)
    icov = np.linalg.inv(cov)

    ens_samp = emcee.EnsembleSampler(nwalkers, ndim, lnprobfn, args=[icov])
    ens_samp.random_state = _random.get_state()
    pos, lnprob, state = ens_samp.run_mcmc(
        np.random.randn(nwalkers * ndim).reshape([nwalkers, ndim]), nsteps)

    proposal = np.diag(cov.diagonal())
    mh_samp = emcee.MHSampler(proposal, ndim, lnprobfn, args=[icov])
    mh_samp.random_state = state
    mh_samp.run_mcmc(np.random.randn(ndim), nsteps)

    f = h5py.File(outfn)
    f["data"][i, :] = np.array(
        [ndim, np.mean(ens_samp.acor),
         np.mean(mh_samp.acor)])
    f.close()
Beispiel #3
0
    def _sample_logtau(self, logtau, u, L):
        """
        Sample the next log(tau) parameter given a previous setting. We use the
        standard Metropolis-Hastings implementation in `emcee` to get the next
        sample.

        logtau: The previous setting of the log(tau) parameters.
        u: The current setting of u.
        L: The current setting of L.

        Returns a tuple consisting of the newly sampled log(tau) parameter and
        its posterior probability.
        """
        Nu = self.Nu
        data = self.data
        T = data.shape[1]

        def log_logtau_prob(logtaup):
            K = self._construct_kernel(np.exp(logtaup), range(T))
            Kinv = np.linalg.inv(K)
            log_u_prob = -0.5 * np.matmul(u, np.matmul(Kinv, u))
            mean = self.parameters['TAU_PRIOR_MEAN']
            var = self.parameters['TAU_PRIOR_VAR']
            log_prior = np.sum(-0.5 * ((logtaup - mean)**2 / var))

            return log_u_prob + log_prior

        dim = np.prod(logtau.shape)
        sampler = emcee.MHSampler(np.eye(dim),
                                  dim=dim,
                                  lnprobfn=log_logtau_prob)
        logtaup, _, _ = sampler.run_mcmc(logtau, 1)

        return logtaup, log_logtau_prob(logtaup)
Beispiel #4
0
    def get_emcee_sampler(self, xarr, data, error, **kwargs):
        """
        Get an emcee walker for the data & model

        Parameters
        ----------
        xarr : pyspeckit.units.SpectroscopicAxis
        data : np.ndarray
        error : np.ndarray

        Examples
        --------

        >>> import pyspeckit
        >>> x = pyspeckit.units.SpectroscopicAxis(np.linspace(-10,10,50), unit='km/s')
        >>> e = np.random.randn(50)
        >>> d = np.exp(-np.asarray(x)**2/2.)*5 + e
        >>> sp = pyspeckit.Spectrum(data=d, xarr=x, error=np.ones(50)*e.std())
        >>> sp.specfit(fittype='gaussian')
        >>> emcee_sampler = sp.specfit.fitter.get_emcee_sampler(sp.xarr, sp.data, sp.error)
        >>> p0 = sp.specfit.parinfo
        >>> emcee_sampler.run_mcmc(p0,100)
        """
        try:
            import emcee
        except ImportError:
            return

        def probfunc(pars):
            return self.logp(xarr, data, error, pars=pars)

        raise NotImplementedError("emcee's metropolis-hastings sampler is not implemented; use pymc")
        sampler = emcee.MHSampler(self.npars*self.npeaks+self.vheight, probfunc, **kwargs)

        return sampler
Beispiel #5
0
def build_mixture(means, covs):
    m = Mixture(means, covs)
    mixes.append(m)
    samplers.append(emcee.EnsembleSampler(nwalkers, ndim, m))
    mh_samps.append(emcee.MHSampler(0.005 * np.array([[1, 0], [0, 1]]),
                                    ndim, m))
    ics.append(np.array([4 * np.random.rand(2) - 2 for n in range(nwalkers)]))
    mh_ics.append(means[np.random.randint(len(means))])
Beispiel #6
0
	def _run_(self,nsteps,covariance=None,burnin=0):
		import emcee
		self.set_covariance(covariance)
		self.check_chain()
		sampler = emcee.MHSampler(self.fitted_covariance,self.nfitted,self.lnposteriorargs,kwargs=self.fixed_values)
		self.logger.info('Running MCMC for {:d} steps...'.format(nsteps))
		sampler.run_mcmc(self.first[0],nsteps,rstate0=self.rng.get_state())
		self.logger.info('Done.')
		sampler._chain = sampler.chain[None,...]
		sampler._lnprob = sampler._lnprob[None,...]
		self.set_from_sampler(sampler)
		self.logger.info('Mean acceptance fraction of {:.4f} ({:.4f}/{:d}).'.format(scipy.mean(self.acceptance_fraction),scipy.mean(self.naccepted),self.niterations))
Beispiel #7
0
    return lnp


ndim = 2

#Create our own parameters for this Gaussian
means = np.array([10, 3])
cov = np.array([[3.0, 0.0], [0.0, 1.0]])
icov = np.linalg.inv(cov)

print("Inverse covariance matrix", icov)

#Jump distribution parameters
MH_cov = np.array([[1.5, 0], [0., 0.7]])

sampler = emcee.MHSampler(MH_cov, ndim, lnprob, args=[means, icov])

pos, prob, state = sampler.run_mcmc(np.array([0, 0]), 5)
print("Samples", sampler.flatchain)
# sampler.reset()

# sampler.run_mcmc(pos, 5)

print("Acceptance fraction", sampler.acceptance_fraction)
#
# import triangle
# import matplotlib.pyplot as plt
#
# samples = sampler.flatchain
# figure = triangle.corner(samples, labels=(r"$\mu_1$", r"$\mu_2$"), quantiles=[0.16, 0.5, 0.84],
#                          show_titles=True, title_args={"fontsize": 12})
Beispiel #8
0
        if not np.isfinite(lp):
            return -np.inf

        return lp + self.log_like(x,y,yerr,theta) #loglikechain[0]

MCMC = MCMCSetup()

# Initialize the MCMC from a random point drawn from the prior
Teffinitial = np.exp( np.random.uniform(np.log(thetashape[0][0]),np.log(thetashape[0][1])) )
logfacinitial=np.random.uniform(thetashape[1][0],thetashape[1][1])
thetachain=np.array([[Teffinitial,logfacinitial]])

ndim, nwalkers = 2, 100
pos = [[Teffinitial,logfacinitial] + 1e-4*np.random.randn(ndim) for i in range(nwalkers)]
sampler = emcee.MHSampler(cov, dim = ndim, lnprobfn = MCMC.lnprob , args=(x, y, yerr))

# Clear and run the production chain.
print("Running MCMC...")
sampler.run_mcmc(pos[0], 5000,rstate0=np.random.get_state())
print("Done.")

# Make the triangle plot.
burnin = 500
samples = sampler.chain[burnin:,:]#.reshape((-1, 2))

# Compute the quantiles.
samples[:] #= np.exp(samples[:])
T_mcmc, logfac_mcmc = list(map(lambda v: (v[1], v[2]-v[1], v[1]-v[0]),
                             zip(*np.percentile(samples, [16, 50, 84],
                                                axis=0))))
Beispiel #9
0
    def run_chain(self, t0=None, niter=None, burnin=None):

        if not niter is None:
            self.niter = niter
        if not burnin is None:
            if burnin < 1.0:
                self.burnin = burnin * niter
                self.allsamples = self.niter
            else:
                self.burnin = burnin
                self.allsamples = burnin + self.niter

        if t0 is None:
            t0 = self.pdist(self.popt, self.cov)

        if self.emcee:
            ndim = len(t0)
            print("ndim: " + str(ndim))
            sampler = emcee.MHSampler(self.cov,
                                      dim=ndim,
                                      lnprobfn=self.lpost,
                                      args=[False])
            pos, prob, state = sampler.run_mcmc(t0, self.burnin)
            sampler.reset()

            sampler.run_mcmc(pos, self.niter, rstate0=state)

            self.chain = sampler.chain
            self.lnprobability = sampler.lnprobability
            self.accept = sampler.acceptance_fraction * self.niter

        else:

            accept = 0
            ### set up array
            ttemp, logp = [], []
            ttemp.append(t0)
            #lpost = posterior.PerPosterior(self.ps, self.func)
            logp.append(self.lpost(t0, neg=False))

            for t in np.arange(self.allsamples - 1) + 1:

                tprop = self.pdist(ttemp[t - 1], self.cov)

                pprop = self.lpost(tprop, neg=False)

                logr = pprop - logp[t - 1]
                logr = min(logr, 0.0)
                r = np.exp(logr)
                update = choice([True, False], size=1, weights=[r, 1.0 - r])

                if update:
                    ttemp.append(tprop)
                    logp.append(pprop)
                    if t > self.burnin:
                        accept = accept + 1
                else:
                    ttemp.append(ttemp[t - 1])
                    logp.append(logp[t - 1])

            self.chain = ttemp[self.burnin + 1:]
            self.lnprobability = logp[self.burnin + 1:]
            self.accept = accept
        return
Beispiel #10
0
if __name__ == "__main__":
    time0 = time.time()
    ndim = 2
    cov_in = np.matrix('1.0, 0.0; 0., 1.0')
    multiple_logical = raw_input("Multiple chains (y/n)?")
    if (multiple_logical == 'Y' or multiple_logical == 'y'):
        num_chains = 4
    elif (multiple_logical == 'N' or multiple_logical == 'n'):
        num_chains = 1
    else:
        print('Input not recognised')
        stop

    for ichain in range(0, num_chains):
        sampler = emcee.MHSampler(cov_in, ndim, ln_prob)

        if num_chains > 1:
            file_name = "chain_{0:1d}.dat".format(ichain)
            random_x = random.uniform(-500.0, 500.0)
            random_y = random.uniform(-500.0, 500.0)
            p0 = (random_x, random_y)
        else:
            file_name = "chain_long.dat"
            p0 = (-100.0, 100.0)

        f = open(file_name, "w")
        f.close()
        for result in sampler.sample(p0, iterations=20000, storechain=False):
            position = result[0]
            chi2_out = result[1]
Beispiel #11
0
        hist_maxs,
        width=np.diff(lower_maxs)[0],
        color='r',
        alpha=0.5)

    hist_mins, edges_mins = histogram(simulated_mins, normed=True)
    lower_mins = np.resize(edges_mins, len(edges_mins) - 1)
    bar(lower_mins,
        hist_mins,
        width=np.diff(lower_mins)[0],
        color='b',
        alpha=0.5)

    # Draw realized data's statistic
    axvline(x=np.mean(detections), linewidth=2, color='g')
    axvline(x=np.min(detections), linewidth=2, color='b')
    axvline(x=np.max(detections), linewidth=2, color='r')

    # Draw 5% & 95% borders
    axvline(x=percent(simulated_means, proc=5.0), color='g')
    axvline(x=percent(simulated_means, proc=95.0), color='g')
    axvline(x=percent(simulated_maxs, proc=5.0), color='r')
    axvline(x=percent(simulated_maxs, proc=95.0), color='r')
    axvline(x=percent(simulated_mins, proc=5.0), color='b')
    axvline(x=percent(simulated_mins, proc=95.0), color='b')

    # Using MH MCMC
    p0 = [0.5]
    sampler_mh = emcee.MHSampler(cov=[[0.05]], dim=1, lnprobfn=lnpost)
    for results in sampler_mh.sample(p0, iterations=1000):
        pass