def fit_polynomial_p(self, degree):
        name = 'p{}'.format(degree)
        self.stored_data[name] = {}
        ndim = degree + 2
        sampler = PTSampler(
            self.ntemps,
            self.nwalkers,
            ndim,
            self.logl_polynomial_p,
            self.logp_polynomial_p,
            loglargs=[self.x, self.y],
            betas=self.betas)
        param_keys = ["b{}".format(j) for j in range(0, degree + 1)]
        param_keys.append('sigma')
        p0 = [[[
            np.random.uniform(*self.get_unif_prior_lims(key))
            for key in param_keys
        ] for i in range(self.nwalkers)] for j in range(self.ntemps)]

        if self.nburn0 != 0:
            out = sampler.run_mcmc(p0, self.nburn0)
            self.stored_data[name]['chains0'] = sampler.chain[0, :, :, :]
            p0 = self.get_new_p0(sampler, ndim)
            sampler.reset()
        else:
            self.stored_data[name]['chains0'] = None

        out = sampler.run_mcmc(p0, self.nburn + self.nprod)
        self.stored_data[name]['chains'] = sampler.chain[0, :, :, :]

        self.stored_data[name]['sampler'] = sampler
        samples = sampler.chain[0, :, self.nburn:, :].reshape((-1, ndim))
        self.stored_data[name]['samples'] = samples
#Numnber of iterations (after the burn-in)
niter = 1500000
num_files=50 #I trust YOU, user, to make sure niter / num_files is remainderless

nperfile=niter/num_files
thin=1
#Burn-in for 1/5 of the number of iterations
nburn = 50000

if __name__=='__main__':
    print 'Beginning main function'
    time.clock()
    time.sleep(1)
    startTime = datetime.now()
    sampler=PTSampler(ntemps, nwalkers, ndim, logl, logp ,loglargs=[eps,x,y,sigx,sigy],logpargs=[eps,x,y,sigx,sigy],threads=32)
    sampler.run_mcmc(p0,nburn,thin=thin)
    for index in range(num_files):
        if index == 0:
            p = sampler.chain[:,:,-1,:]
            np.savez_compressed(savename+'_burn_'+str(index),af=sampler.acceptance_fraction[0],
                chain=sampler.chain[0],lnp=sampler.lnprobability[0],p=sampler.chain[:,:,-1,:])
            print 'Burn in complete'
            #burnchain = sampler.chain
            #pdb.set_trace()
        else:
            p = sampler.chain[:,:,-1,:]
        sampler.reset()
        #print 'Burn in complete'
        sampler.run_mcmc(p,nperfile,thin=thin)
        #print 'orbit fitting complete'
Example #3
0
# Of which burn-in is the first
burnin = 500;

# Initial spot parameters.
# [spotx, spoty, spotradius, spotcontrast]
spot = numpy.array([0.204, 0.376, 0.096, 0.524]);
# Create 3D matrix for initial state for each temperature and walker.
p0 = numpy.repeat(spot[:,numpy.newaxis].T, ntemps*nwalkers, axis=0).reshape(ntemps, nwalkers, ndim);
# Randomize the initial states in a small neighborhood.
p0 += numpy.random.normal(scale=1e-3, size=p0.shape);

# Initialize sampler.
sampler = PTSampler(ntemps, nwalkers, ndim, logl, logp);

# Run sampler.
pos, prob, state = sampler.run_mcmc(p0, niter);

# Take a view of the T=0 chain.
zerotemp = sampler.chain[0];

# We take iterations at T=0 after burn-in as equilibrium
# distribution. With a 100 walkers, this is 1e4 points.
eq = zerotemp[:,burnin:,:].reshape([nwalkers*(niter-burnin), ndim]);

# Plot distribution of every possible pairs.
labels = ["spotx", "spoty", "spotradius", "spotcontrast"];
for ploti in range(ndim-1):
  for plotj in range(ploti+1,ndim):
    pyplot.figure()
    pyplot.plot(eq[:,ploti],eq[:,plotj],"b.");
    pyplot.xlabel(labels[ploti]);
Example #4
0
		    sampler = emcee.EnsembleSampler(nwalkers, ndim, lnlikeZ, args=(Geom,wl_obs,f_obs,f_err,gauss_width,scaling,shift,nspec),pool=pool)
		elif parameters==['NH','Vmax','Z','theta']:
		    pos = [[NH[i],V[i],Z[i],TH[i]] for i in range(nwalkers)]
		    sampler = emcee.EnsembleSampler(nwalkers, ndim, lnlikeZTH, args=(Geom,wl_obs,f_obs,f_err,gauss_width,scaling,shift,nspec),pool=pool)
		elif parameters==['NH','Vmax','theta']:
		    pos = [[NH[i],V[i],TH[i]] for i in range(nwalkers)]
		    sampler = emcee.EnsembleSampler(nwalkers, ndim, lnlikeTH, args=(Geom,wl_obs,f_obs,f_err,gauss_width,scaling,shift,nspec),pool=pool)
		else:
		    pos = [[NH[i],V[i]] for i in range(nwalkers)]
		    sampler = emcee.EnsembleSampler(nwalkers, ndim, lneqnlike, args=(Geom,wl_obs,f_obs,f_err,scaling,shift,nspec),pool=pool)



	print 'starting burn-in iterations'
	sys.stdout.flush()
	pos0, prob, state = sampler.run_mcmc(pos, 5)
	print 'done'
	sys.stdout.flush()
	
	sampler.reset()


        #f = open("chain.dat", "w")
        #f.close()
	

        #for result in sampler.sample(pos_end, iterations=10, storechain=False):
	#    position = result[0]
	#    f = open("chain.dat", "a")
	#    for k in range(position.shape[0]):
	#        f.write("{0:4d} {1:s}\n".format(k, " ".join(position[k])))
Example #5
0
    return 0.0      

# Training Parameters
    
sigma = 1e-6
ntemp = 10
nruns = 2000
Temp_i = 0
#initialize the chian with a=0, b=0, c=0.5
pos = np.tile((0,0,0.5),5)/10+1e-4*np.random.randn(ntemp,64, 15)

ntemps, nwalkers, ndim = pos.shape

#first MCMC chain
sampler = PTSampler(ntemps,nwalkers, ndim, log_probability,logp, loglargs=(x, JV_exp, sigma))
sampler.run_mcmc(pos, nruns )
samples = sampler.chain
#%%
#use the values obtained in the first MCMC chain to update the inistal estimate
pos_update = samples[:,:,-1,:]+1e-5*np.random.randn(ntemp,64, 15)
sampler.reset()
#second MCM chain
sampler = PTSampler(ntemps,nwalkers, ndim, log_probability,logp, loglargs=(x, JV_exp, sigma))
sampler.run_mcmc(pos_update, nruns);
flat_samples = sampler.flatchain
zero_flat_samples = flat_samples[Temp_i,:,:]
zero_samples = samples[Temp_i,:,:,:]

#visulize a1
plt.figure()
plt.plot(zero_samples[0,:,0])
Example #6
0
                                            args=(Geom, wl_obs, f_obs, f_err,
                                                  gauss_width, scaling, shift,
                                                  nspec),
                                            pool=pool)
        else:
            pos = [[NH[i], V[i]] for i in range(nwalkers)]
            sampler = emcee.EnsembleSampler(nwalkers,
                                            ndim,
                                            lneqnlike,
                                            args=(Geom, wl_obs, f_obs, f_err,
                                                  scaling, shift, nspec),
                                            pool=pool)

    print 'starting burn-in iterations'
    sys.stdout.flush()
    pos0, prob, state = sampler.run_mcmc(pos, 5)
    print 'done'
    sys.stdout.flush()

    sampler.reset()

    #f = open("chain.dat", "w")
    #f.close()

    #for result in sampler.sample(pos_end, iterations=10, storechain=False):
    #    position = result[0]
    #    f = open("chain.dat", "a")
    #    for k in range(position.shape[0]):
    #        f.write("{0:4d} {1:s}\n".format(k, " ".join(position[k])))
    #    fc.lose()
init_pos =  np.tile(init_theta,(ntemps,nwalkers,1)) + noise_model



priors = [prior_z,prior_vp]


#sampler = emcee.PTSampler(nwalkers, ndim, logprob, args=(x,tp, sigma_inv,log_sigma_det,nLayers,priors)) 

sampler = PTSampler(ntemps,nwalkers,ndim,logl = loglike,logp=logprior,
                    logpargs= (prior_z,prior_vp,nLayers),
                    loglargs = (x,tp, sigma_inv,log_sigma_det,nLayers),
                    threads = 6
                    
                    )
sampler.run_mcmc(init_pos, 6500)

#samples = sampler.chain[:, 50:, :].reshape((-1, ndim))

samples = sampler.chain[0,...].reshape((-1, ndim))

samples_info = [samples,sampler.chain,sampler.betas,sampler.lnlikelihood,sampler.nswap,
                sampler.nswap_accepted,sampler.acceptance_fraction
                ]
pickle.dump( samples_info, open( "samplerMCMC_PT_emcee.p", "wb" ))
import corner
fig = corner.corner(samples[5000:,:], labels=["V1", "V2", "V3","Z1","Z2"],
                      truths= [3100,4470,6200,2000,2000])


Example #8
0
def run_emcee(logl,
              logp,
              p0func,
              ntemps=0,
              nwalkers=50,
              nsamples=2500,
              thin=1,
              minlogbeta=None,
              nupdates=10,
              threads=1,
              outfilename=None,
              saveall=True,
              **kwargs):
    if minlogbeta is None:
        if ntemps == 0:
            # use cunning ladder
            betas = np.concatenate(
                (np.linspace(0, -0.9375, 16), np.linspace(-1, -1.875, 8),
                 np.linspace(-2, -3.75, 8), np.linspace(-4, -7.5, 8),
                 np.linspace(-8, -15,
                             8), np.linspace(-16, -30,
                                             8), np.linspace(-32, -56, 4)))
            betas = 10**(np.sort(betas)[::-1])
        else:
            betas = None  # use emcee default
    else:
        betas = np.logspace(0, minlogbeta, ntemps)
    if betas is not None:
        ntemps = len(betas)
    pos = p0func((ntemps, nwalkers))
    if not check_init_pars(logl, logp, pos):
        return None
    ndim = pos.shape[-1]
    if threads > 1:
        pool = Pool(threads)
    else:
        pool = None
    sampler = PTSampler(ntemps,
                        nwalkers,
                        ndim,
                        logl,
                        logp,
                        betas=betas,
                        pool=pool)
    if nupdates > 0:
        start = time.clock()
        print('Steps:', end='')
        sys.stdout.flush()
        nsteps = nsamples // nupdates
        for i in range(nupdates):
            pos, lnprob, rstate = sampler.run_mcmc(pos, nsteps, thin=thin)
            print(' {}'.format((i + 1) * nsteps), end='')
            sys.stdout.flush()
    nsteps = nsamples - sampler.chain.shape[-2]
    if nsteps > 0:
        pos, lnprob, rstate = sampler.run_mcmc(pos, nsteps, thin=thin)
    if nupdates > 0:
        print('\nTime taken = {:.2f} secs'.format(time.clock() - start))
    if outfilename is None:
        outfilename = 'emcee_sampler.npz'
    try:
        acor = sampler.acor
    except autocorr.AutocorrError:
        acor = None
    if saveall:
        np.savez_compressed(
            outfilename,
            acceptance_fraction=sampler.acceptance_fraction,
            acor=acor,
            beta=sampler.betas,
            chain=sampler.chain,
            lnlikelihood=sampler.lnlikelihood,
            lnprobability=sampler.lnprobability,
            tswap_acceptance_fraction=sampler.tswap_acceptance_fraction)
    else:
        # only save lowest temperature and thin samples by factor of ten
        np.savez_compressed(
            outfilename,
            acceptance_fraction=sampler.acceptance_fraction,
            acor=acor,
            beta=sampler.betas,
            chain=sampler.chain[0, :, ::10],
            lnlikelihood=sampler.lnlikelihood[0, :, ::10],
            lnprobability=sampler.lnprobability[0, :, ::10],
            tswap_acceptance_fraction=sampler.tswap_acceptance_fraction)
    return sampler