# use lensFit to calculate the likelihood at each point in the chain for i in range(1): S = AMAOpt(pars,[likelihood],[logP],cov=optCov/4.) S.set_minprop(len(pars)*2) S.sample(100*len(pars)**2) #S = AMAOpt(pars,[likelihood],[logP],cov=optCov/8.) #S.set_minprop(len(pars)*2) #S.sample(10*len(pars)**2) #S = AMAOpt(pars,[likelihood],[logP],cov=optCov/8.) #S.set_minprop(len(pars)*2) #S.sample(10*len(pars)**2) logp,trace,det = S.result() # log likelihoods; chain (steps * params); det['extShear PA'] = chain in this variable coeff = [] for i in range(len(pars)): coeff.append(trace[-1,i]) coeff = numpy.asarray(coeff) pars = coeff o = 'npars = [' for i in range(pars.size): o += '%f,'%(pars)[i] o = o[:-1]+"]" keylist = [] dkeylist = []
def optimize(data, niter, oname=None, first=True): import pymc, pyfits, numpy import indexTricks as iT priors = data['PRIORS'] models = data['MODELS'] pars = data['PARAMS'] image = {} for key in data['IMG'].keys(): image[key] = data['IMG'][key].copy() ZP = data['ZP'] filters = [filt for filt in data['FILTERS']] sigmas = data['SIGMA'] if 'GAIN' in data.keys(): gain = data['GAIN'] doSigma = True else: doSigma = False if 'OVRS' in data.keys(): OVRS = data['OVRS'] else: OVRS = 1 MASK = data['MASK'].copy() mask = MASK == 0 mask_r = mask.ravel() key2index = {} i = 0 for key in filters: key2index[key] = i i += 1 model2index = {} i = 0 for key in filters: for model in models[key]: model2index[model.name] = i i += 1 imshape = MASK.shape yc, xc = iT.overSample(imshape, OVRS) if doSigma == True: nu = {} eta = {} background = {} counts = {} sigmask = {} for key in filters: nu[key] = pymc.Uniform('nu_%s' % key, -6, 6, value=log10(gain[key])) eta[key] = pymc.Uniform('eta_%s' % key, -4, 5, value=1.) background[key] = sigmas[key] sigmask[key] = image[key] > 1.5 * sigmas[key]**0.5 counts[key] = image[key][sigmask[key]].copy() pars.append(nu[key]) pars.append(eta[key]) def getSigma(n=nu, e=eta, b=background, c=counts, m=mask): sigma = b.copy() sigma[m] += ((10**n) * c)**e return numpy.sqrt(sigma).ravel() sigmas = [] for key in filters: parents = { 'n': nu[key], 'e': eta[key], 'b': background[key], 'c': counts[key], 'm': sigmask[key] } sigmas.append( pymc.Deterministic(eval=getSigma, name='sigma_%s' % key, parents=parents, doc='', trace=False, verbose=False)) else: for key in filters: sigmas[key] = sigmas[key].ravel() for key in filters: image[key] = image[key].ravel() @pymc.deterministic(trace=False) def logpAndMags(p=pars): lp = 0. mags = [] for key in filters: indx = key2index[key] if doSigma == True: sigma = sigmas[indx].value else: sigma = sigmas[key] simage = (image[key] / sigma)[mask_r] lp += linearmodelSB(p, simage, sigma[mask_r], mask, models[key], xc, yc, OVRS=OVRS) mags += [model.Mag(ZP[key]) for model in models[key]] return lp, mags @pymc.deterministic def lp(lpAM=logpAndMags): return lpAM[0] @pymc.deterministic def Mags(lpAM=logpAndMags): return lpAM[1] @pymc.observed def logpCost(value=0., logP=lp): return logP costs = [logpCost] if priors is not None: @pymc.observed def colorPrior(value=0., M=Mags): lp = 0. for p in priors: color = M[model2index[p[0]]] - M[model2index[p[1]]] lp += p[2](color) return lp costs.append(colorPrior) def resid(p): model = numpy.empty(0) for key in filters: indx = key2index[key] if doSigma == True: sigma = sigmas[indx].value else: sigma = sigmas[key] simage = (image[key] / sigma)[mask_r] model = numpy.append( model, linearmodelSB(p, simage, sigma[mask_r], mask, models[key], xc, yc, levMar=True, OVRS=OVRS)) return model print "Optimizing", niter from SampleOpt import AMAOpt as Opt, levMar as levMar default = numpy.empty(0) for key in filters: indx = key2index[key] if doSigma == True: sigma = sigmas[indx].value else: sigma = sigmas[key] simage = (image[key] / sigma)[mask_r] default = numpy.append(default, simage) # levMar(pars,resid,default) cov = None if 'COV' in data.keys(): cov = data['COV'] O = Opt(pars, costs, [lp, Mags], cov=cov) O.set_minprop(len(pars) * 2) O.sample(niter / 10) O = Opt(pars, costs, [lp, Mags], cov=cov) O.set_minprop(len(pars) * 2) O.cov = O.cov / 4. O.sample(niter / 4) O = Opt(pars, costs, [lp, Mags], cov=cov) O.set_minprop(len(pars) * 2) O.cov = O.cov / 10. O.sample(niter / 4) O = Opt(pars, costs, [lp, Mags], cov=cov) O.set_minprop(len(pars) * 2) O.cov = O.cov / 10. O.sample(niter) logp, trace, result = O.result() mags = numpy.array(result['Mags']) for key in model2index.keys(): result[key] = mags[:, model2index[key]].copy() del result['Mags'] output = {} for key in filters: indx = key2index[key] if doSigma == True: sigma = sigmas[indx].value else: sigma = sigmas[key] simage = (image[key] / sigma)[mask_r] m = linearmodelSB([p.value for p in pars], simage, sigma[mask_r], mask, models[key], xc, yc, noResid=True, OVRS=OVRS) output[key] = m return output, (logp, trace, result)
optCov = None if optCov is None: optCov = numpy.array(cov) #S = myEmcee.PTEmcee(pars+[likelihood],cov=optCov,nthreads=12,nwalkers=60,ntemps=4,initialPars=trace[-1]) #S.sample(20000) #S = myEmcee.Emcee(pars+[likelihood],cov=optCov,nwalkers=100,nthreads=6) # should have 100 walkers. #S.sample(1000) S = AMAOpt(pars,[likelihood],[logP],cov=optCov/2.) S.set_minprop(len(pars)*2) S.sample(1000*len(pars)**2) outFile = '/data/ljo31/Lens/J1125/opt'+str(X) f = open(outFile,'wb') cPickle.dump(S.result(),f,2) f.close() lp,trace,det = S.result() # log likelihoods; chain (steps * params); det['extShear PA'] = chain in this variable for i in range(len(pars)): pars[i].value = trace[-1,i] print "%18s %8.3f"%(pars[i].__name__,pars[i].value) ## now refit! colours = ['F555W', 'F814W'] #mods = S.blobs models = [] for i in range(len(imgs)): #mod = mods[i] #models.append(mod[a1,a2,a3])
def fastMCMC(self,niter,nburn,nthin=1): from Sampler import SimpleSample as sample from scipy import interpolate import pymc,numpy,time import ndinterp models = self.model.models data = self.data filters = data.keys() # Remove redshift dict for key in models.keys(): if type(models[key])==type({}): z = models[key].keys()[0] models[key] = models[key][z] pars = [self.priors[key] for key in self.names] ax = {} doExp = [] cube2par = [] i = 0 for key in self.model.axes_names: ax[key] = i i += 1 i = 0 for key in self.names: if key[0]=='X': continue if key.find('log')==0: pntkey = key.split('log')[1] #self.priors[key].value = numpy.log10(best[ax[pntkey]]) doExp.append(True) else: pntkey = key doExp.append(False) #self.priors[key].value = best[ax[pntkey]] cube2par.append(ax[pntkey]) doExp = numpy.array(doExp)==True par2cube = numpy.argsort(cube2par) # add stellar mass parameters pars.append(pymc.Uniform('log_Mlens',9.,12.)) pars.append(pymc.Uniform('log_Msrc',9.,12.)) M = numpy.empty(len(filters)) D = numpy.empty(len(filters)) V = numpy.empty(len(filters)) for i in range(D.size): f = filters[i] D[i] = data[f]['mag'] V[i] = data[f]['sigma']**2 @pymc.deterministic def mass_and_logp(value=0.,pars=pars): logp = 0 p = numpy.array(pars[:-2]) p[doExp] = 10**p[doExp] p = numpy.atleast_2d(p[par2cube]) mlens, msrc = pars[-2],pars[-1] for i in range(M.size): filt = filters[i] M[i] = models[filt].eval(p) if M[i]==0: return [-1.,-1e300] if len(D[i])==1: # sdss magnitude ml,ms = M[i][0] - 2.5*mlens, M[i][1] - 2.5*Msrc f = 10**(-0.4*ml) + 10**(-0.4*ms) f = -2.5*np.log10(f) logp += -0.5*(f-D[i])**2./V[i] elif len(D[i]) ==2: # HST/Keck magnitude logp += -0.5*(M[i][0] - 2.5*mlens - D[i][0])**2./V[i][0] - 0.5*(M[i][1] - 2.5*msrc - D[i][1])**2./V[i][1] return logp @pymc.observed def loglikelihood(value=0.,lp=mass_and_logp): return lp cov = [] for key in self.names: if key=='age': cov.append(0.5) elif key=='logage': cov.append(0.03) elif key=='tau': cov.append(0.1) elif key=='logtau': cov.append(0.03) elif key=='tau_V': cov.append(self.priors[key]['prior'].value/20.) elif key=='logtau_V': cov.append(0.1) elif key=='tauV': cov.append(self.priors[key]['prior'].value/20.) elif key=='logtauV': cov.append(0.1) elif key=='Z': cov.append(self.priors[key]['prior'].value/20.) elif key=='logZ': cov.append(0.03) elif key=='redshift': P = self.priors['redshift'] if type(P)==type(pymc.Normal('t',0.,1)): cov.append(P.parents['tau']**-0.5) elif type(P)==type(pymc.Uniform('t',0.,1.)): cov.append((P.parents['upper']-P.parents['lower'])/10.) else: cov.append(P.parents['cov']) #cov.append(0.1) cov += [0.5,0.5] # masses cov = numpy.array(cov) costs = self.constraints+[loglikelihood] from SampleOpt import Sampler,AMAOpt S = AMAOpt(pars,costs,[mass_and_logp],cov=cov) S.sample(nburn/4) S = Sampler(pars,costs,[mass_and_logp]) S.setCov(cov) S.sample(nburn/4) S = Sampler(pars,costs,[mass_and_logp]) S.setCov(cov) S.sample(nburn/2) logps,trace,dets = S.result() cov = numpy.cov(trace[nburn/4:].T) S = AMAOpt(pars,costs,[mass_and_logp],cov=cov/4.) S.sample(nburn/2) logps,trace,dets = S.result() S = Sampler(pars,costs,[mass_and_logp]) S.setCov(cov) S.sample(nburn/2) logps,trace,dets = S.result() cov = numpy.cov(trace[nburn/4:].T) S = Sampler(pars,costs,[mass_and_logp]) S.setCov(cov) S.sample(niter) logps,trace,dets = S.result() logL = dets['mass_and_logp'].T o = {'logP':logps,'logL':logL} cnt = 0 for key in self.names: o[key] = trace[:,cnt].copy() cnt += 1 return o
#return for i in range(1): S = AMAOpt(pars,[likelihood],[logP],cov=optCov/4.) S.set_minprop(len(pars)*2) S.sample(8*len(pars)**2) S = AMAOpt(pars,[likelihood],[logP],cov=optCov/8.) S.set_minprop(len(pars)*2) S.sample(10*len(pars)**2) S = AMAOpt(pars,[likelihood],[logP],cov=optCov/8.) S.set_minprop(len(pars)*2) S.sample(10*len(pars)**2) logp,trace,det = S.result() coeff = [] for i in range(len(pars)): coeff.append(trace[-1,i]) coeff = numpy.asarray(coeff) pars = coeff o = 'npars = [' for i in range(pars.size): o += '%f,'%(pars)[i] o = o[:-1]+"]" print o im = lensModel.lensFit(coeff,image,sigma,gals,lenses,srcs,xc,yc,OVRS,psf=psf,verbose=True) im = lensModel.lensFit(coeff,image,sigma,gals,lenses,srcs,xc,yc,OVRS,noResid=True,psf=psf,verbose=True) model = lensModel.lensFit(coeff,image,sigma,gals,lenses,srcs,xc,yc,OVRS,noResid=True,psf=psf,verbose=True,getModel=True)
def fastMCMC(self,niter,nburn,nthin=1): from Sampler import SimpleSample as sample from scipy import interpolate import pymc,numpy,time import ndinterp if self.format=='new': models = self.model.models else: models = self.model data = self.data filters = data.keys() pars = [self.priors[key] for key in self.names] ax = {} doExp = [] cube2par = [] i = 0 for key in self.model.axes_names: ax[key] = i i += 1 i = 0 for key in self.names: if key[0]=='X': continue if key.find('log')==0: pntkey = key.split('log')[1] #self.priors[key].value = numpy.log10(best[ax[pntkey]]) doExp.append(True) else: pntkey = key doExp.append(False) #self.priors[key].value = best[ax[pntkey]] cube2par.append(ax[pntkey]) doExp = numpy.array(doExp)==True par2cube = numpy.argsort(cube2par) M = numpy.empty(len(filters)) D = numpy.empty(len(filters)) V = numpy.empty(len(filters)) for i in range(D.size): f = filters[i] D[i] = data[f]['mag'] V[i] = data[f]['sigma']**2 @pymc.deterministic def mass_and_logp(value=0.,pars=pars): p = numpy.array(pars) p[doExp] = 10**p[doExp] p = numpy.atleast_2d(p[par2cube]) for i in range(M.size): filt = filters[i] if self.format=='new': M[i] = models[filt].eval(p) else: M[i] = models.eval(p,filt,data[filt]['redshift']) if M[i]==0: return [-1.,-1e300] m = ((M-D)/V).sum()/(2.5/V).sum() logp = -0.5*((M-2.5*m-D)**2/V).sum() return [m,logp] @pymc.observed def loglikelihood(value=0.,lp=mass_and_logp): return lp[1] cov = [] for key in self.names: if key=='age': cov.append(0.5) elif key=='logage': cov.append(0.03) elif key=='tau': cov.append(0.1) elif key=='logtau': cov.append(0.03) elif key=='tau_V': cov.append(self.priors[key]['prior'].value/20.) elif key=='logtau_V': cov.append(0.1) elif key=='Z': cov.append(self.priors[key]['prior'].value/20.) elif key=='logZ': cov.append(0.03) elif key=='redshift': P = self.priors['redshift'] if type(P)==type(pymc.Normal('t',0.,1)): cov.append(P.parents['tau']**-0.5) elif type(P)==type(pymc.Uniform('t',0.,1.)): cov.append((P.parents['upper']-P.parents['lower'])/10.) else: cov.append(P.parents['cov']) #cov.append(0.1) cov = numpy.array(cov) costs = self.constraints+[loglikelihood] from SampleOpt import Sampler,AMAOpt S = AMAOpt(pars,costs,[mass_and_logp],cov=cov) S.sample(nburn/4) S = Sampler(pars,costs,[mass_and_logp]) S.setCov(cov) S.sample(nburn/4) S = Sampler(pars,costs,[mass_and_logp]) S.setCov(cov) S.sample(nburn/2) logps,trace,dets = S.result() cov = numpy.cov(trace[nburn/4:].T) S = AMAOpt(pars,costs,[mass_and_logp],cov=cov/4.) S.sample(nburn/2) logps,trace,dets = S.result() S = Sampler(pars,costs,[mass_and_logp]) S.setCov(cov) S.sample(nburn/2) logps,trace,dets = S.result() cov = numpy.cov(trace[nburn/4:].T) S = Sampler(pars,costs,[mass_and_logp]) S.setCov(cov) S.sample(niter) logps,trace,dets = S.result() mass,logL = dets['mass_and_logp'].T o = {'logP':logps,'logL':logL,'logmass':mass} cnt = 0 for key in self.names: o[key] = trace[:,cnt].copy() cnt += 1 return o
def fastMCMC(self,niter,nburn,nthin=1): from Sampler import SimpleSample as sample from scipy import interpolate import pymc,numpy,time import ndinterp models = self.model.models data = self.data filters = data.keys() t = time.time() T1 = models[filters[0]]*0. T2 = 0. for f in filters: T1 += (models[f]-data[f]['mag'])/data[f]['sigma']**2 T2 += 2.5/self.data[f]['sigma']**2 M = T1/T2 logp = 0. for f in filters: logp += -0.5*(-2.5*M+models[f]-data[f]['mag'])**2/data[f]['sigma']**2 t = time.time() axes = {} i = 0 ax = {} ind = numpy.unravel_index(logp.argmax(),logp.shape) best = [] for key in self.model.axes_names: a = self.model.axes[key]['points'] axes[i] = interpolate.splrep(a,numpy.arange(a.size),k=1,s=0) ax[key] = i best.append(a[ind[i]]) i += 1 print logp.max() logpmodel = ndinterp.ndInterp(axes,logp,order=1) massmodel = ndinterp.ndInterp(axes,M,order=1) pars = [self.priors[key] for key in self.names] doExp = [] cube2par = [] i = 0 for key in self.names: if key.find('log')==0: pntkey = key.split('log')[1] #self.priors[key].value = numpy.log10(best[ax[pntkey]]) doExp.append(True) else: pntkey = key doExp.append(False) #self.priors[key].value = best[ax[pntkey]] cube2par.append(ax[pntkey]) doExp = numpy.array(doExp)==True par2cube = numpy.argsort(cube2par) logp -= logp.max() p = numpy.exp(logp) p /= p.sum() i = 0 wmean = numpy.empty(p.ndim) axarr = [] for key in self.model.axes_names: a = self.model.axes[key]['points'] p0 = numpy.rollaxis(p,i,p.ndim) wmean[i] = (a*p0).sum() axarr.append(numpy.rollaxis(a+p0*0,p.ndim-1,i)) i += 1 cov = numpy.empty((p.ndim,p.ndim)) #for i in range(p.ndim): # for j in range(i,p.ndim): # cov[i,j] = (p*(axarr[i]-wmean[i])*(axarr[j]-wmean[j])).sum() # cov[j,i] = cov[i,j] for i in range(p.ndim): k = cube2par[i] for j in range(i,p.ndim): l = cube2par[j] cov[i,j] = (p*(axarr[k]-wmean[k])*(axarr[l]-wmean[l])).sum() cov[j,i] = cov[i,j] cov /= 1.-(p**2).sum() #for key in self.names: # if key.find('log')==0: # pntkey = key.split('log')[1] # self.priors[key].value = numpy.log10(wmean[ax[pntkey]]) # else: # self.priors[key].value = wmean[ax[key]] #self.priors['redshift'].value = 0.1 pnt = numpy.empty((len(self.priors),1)) @pymc.deterministic def mass_and_logp(value=0.,pars=pars): p = numpy.array(pars) p[doExp] = 10**p[doExp] p = numpy.atleast_2d(p[par2cube]) mass = massmodel.eval(p) if mass==0.: return [0.,-1e200] logp = logpmodel.eval(p) return [mass,logp] @pymc.observed def loglikelihood(value=0.,lp=mass_and_logp): return lp[1] """ logp -= logp.max() p = numpy.exp(logp) p /= p.sum() i = 0 wmean = numpy.empty(p.ndim) for key in self.model.axes_names: a = self.model.axes[key]['points'] p0 = numpy.rollaxis(p,i,p.ndim) wmean[i] = (a*p0).sum() i += 1 cov = [] for key in self.names: if key=='age': cov.append(0.5) elif key=='logage': cov.append(0.03) elif key=='tau': cov.append(0.1) elif key=='logtau': cov.append(0.03) elif key=='tau_V': cov.append(self.priors[key]['prior'].value/20.) elif key=='logtau_V': cov.append(0.1) elif key=='Z': cov.append(self.priors[key]['prior'].value/20.) elif key=='logZ': cov.append(0.03) elif key=='redshift': cov.append(0.1) cov = numpy.array(cov) """ from SampleOpt import Sampler,AMAOpt S = AMAOpt(pars,[loglikelihood],[mass_and_logp],cov=cov) S.sample(nburn) logps,trace,dets = S.result() print logps.max() S = Sampler(pars,[loglikelihood],[mass_and_logp]) S.setCov(cov) S.sample(nburn/2) logps,trace,dets = S.result() cov = numpy.cov(trace.T) S = Sampler(pars,[loglikelihood],[mass_and_logp]) S.setCov(cov) S.sample(niter) logps,trace,dets = S.result() mass,logL = dets['mass_and_logp'][:,:,0].T o = {'logP':logps,'logL':logL,'logmass':mass} cnt = 0 for key in self.names: o[key] = trace[:,cnt].copy() cnt += 1 return o arg = logp.argmax() logp -= logp.max() p = numpy.exp(logp) p /= p.sum() print p.max() i = 0 for key in self.model.axes_names: a = self.model.axes[key]['points'] if key=='redshift': a = a[::5] p0 = numpy.rollaxis(p,i,p.ndim) print key,(a*p0).sum() i += 1 print numpy.unravel_index(arg,logp.shape) logp -= max print (M*numpy.exp(logp)).sum()/numpy.exp(logp).sum() z = (M*0.+1)*self.model.axes['redshift']['points'][::5] print (z*numpy.exp(logp)).sum()/numpy.exp(logp).sum() f = open('check','wb') import cPickle cPickle.dump([M,logp],f,2) f.close() mod = ndinterp.ndInterp(self.models.axes,logp)
def fastMCMC(self, niter, nburn, nthin=1): from Sampler import SimpleSample as sample from scipy import interpolate import pymc, numpy, time import ndinterp models = self.model.models data = self.data filters = data.keys() t = time.time() T1 = models[filters[0]] * 0. T2 = 0. for f in filters: T1 += (models[f] - data[f]['mag']) / data[f]['sigma']**2 T2 += 2.5 / self.data[f]['sigma']**2 M = T1 / T2 logp = 0. for f in filters: logp += -0.5 * (-2.5 * M + models[f] - data[f]['mag'])**2 / data[f]['sigma']**2 t = time.time() axes = {} i = 0 ax = {} ind = numpy.unravel_index(logp.argmax(), logp.shape) best = [] for key in self.model.axes_names: a = self.model.axes[key]['points'] axes[i] = interpolate.splrep(a, numpy.arange(a.size), k=1, s=0) ax[key] = i best.append(a[ind[i]]) i += 1 print logp.max() logpmodel = ndinterp.ndInterp(axes, logp, order=1) massmodel = ndinterp.ndInterp(axes, M, order=1) pars = [self.priors[key] for key in self.names] doExp = [] cube2par = [] i = 0 for key in self.names: if key.find('log') == 0: pntkey = key.split('log')[1] #self.priors[key].value = numpy.log10(best[ax[pntkey]]) doExp.append(True) else: pntkey = key doExp.append(False) #self.priors[key].value = best[ax[pntkey]] cube2par.append(ax[pntkey]) doExp = numpy.array(doExp) == True par2cube = numpy.argsort(cube2par) logp -= logp.max() p = numpy.exp(logp) p /= p.sum() i = 0 wmean = numpy.empty(p.ndim) axarr = [] for key in self.model.axes_names: a = self.model.axes[key]['points'] p0 = numpy.rollaxis(p, i, p.ndim) wmean[i] = (a * p0).sum() axarr.append(numpy.rollaxis(a + p0 * 0, p.ndim - 1, i)) i += 1 cov = numpy.empty((p.ndim, p.ndim)) #for i in range(p.ndim): # for j in range(i,p.ndim): # cov[i,j] = (p*(axarr[i]-wmean[i])*(axarr[j]-wmean[j])).sum() # cov[j,i] = cov[i,j] for i in range(p.ndim): k = cube2par[i] for j in range(i, p.ndim): l = cube2par[j] cov[i, j] = (p * (axarr[k] - wmean[k]) * (axarr[l] - wmean[l])).sum() cov[j, i] = cov[i, j] cov /= 1. - (p**2).sum() #for key in self.names: # if key.find('log')==0: # pntkey = key.split('log')[1] # self.priors[key].value = numpy.log10(wmean[ax[pntkey]]) # else: # self.priors[key].value = wmean[ax[key]] #self.priors['redshift'].value = 0.1 pnt = numpy.empty((len(self.priors), 1)) @pymc.deterministic def mass_and_logp(value=0., pars=pars): p = numpy.array(pars) p[doExp] = 10**p[doExp] p = numpy.atleast_2d(p[par2cube]) mass = massmodel.eval(p) if mass == 0.: return [0., -1e200] logp = logpmodel.eval(p) return [mass, logp] @pymc.observed def loglikelihood(value=0., lp=mass_and_logp): return lp[1] """ logp -= logp.max() p = numpy.exp(logp) p /= p.sum() i = 0 wmean = numpy.empty(p.ndim) for key in self.model.axes_names: a = self.model.axes[key]['points'] p0 = numpy.rollaxis(p,i,p.ndim) wmean[i] = (a*p0).sum() i += 1 cov = [] for key in self.names: if key=='age': cov.append(0.5) elif key=='logage': cov.append(0.03) elif key=='tau': cov.append(0.1) elif key=='logtau': cov.append(0.03) elif key=='tau_V': cov.append(self.priors[key]['prior'].value/20.) elif key=='logtau_V': cov.append(0.1) elif key=='Z': cov.append(self.priors[key]['prior'].value/20.) elif key=='logZ': cov.append(0.03) elif key=='redshift': cov.append(0.1) cov = numpy.array(cov) """ from SampleOpt import Sampler, AMAOpt S = AMAOpt(pars, [loglikelihood], [mass_and_logp], cov=cov) S.sample(nburn) logps, trace, dets = S.result() print logps.max() S = Sampler(pars, [loglikelihood], [mass_and_logp]) S.setCov(cov) S.sample(nburn / 2) logps, trace, dets = S.result() cov = numpy.cov(trace.T) S = Sampler(pars, [loglikelihood], [mass_and_logp]) S.setCov(cov) S.sample(niter) logps, trace, dets = S.result() mass, logL = dets['mass_and_logp'][:, :, 0].T o = {'logP': logps, 'logL': logL, 'logmass': mass} cnt = 0 for key in self.names: o[key] = trace[:, cnt].copy() cnt += 1 return o arg = logp.argmax() logp -= logp.max() p = numpy.exp(logp) p /= p.sum() print p.max() i = 0 for key in self.model.axes_names: a = self.model.axes[key]['points'] if key == 'redshift': a = a[::5] p0 = numpy.rollaxis(p, i, p.ndim) print key, (a * p0).sum() i += 1 print numpy.unravel_index(arg, logp.shape) logp -= max print(M * numpy.exp(logp)).sum() / numpy.exp(logp).sum() z = (M * 0. + 1) * self.model.axes['redshift']['points'][::5] print(z * numpy.exp(logp)).sum() / numpy.exp(logp).sum() f = open('check', 'wb') import cPickle cPickle.dump([M, logp], f, 2) f.close() mod = ndinterp.ndInterp(self.models.axes, logp)