def optimize(data, niter, oname=None, first=True): import pymc, pyfits, numpy import indexTricks as iT priors = data['PRIORS'] models = data['MODELS'] pars = data['PARAMS'] image = {} for key in data['IMG'].keys(): image[key] = data['IMG'][key].copy() ZP = data['ZP'] filters = [filt for filt in data['FILTERS']] sigmas = data['SIGMA'] if 'GAIN' in data.keys(): gain = data['GAIN'] doSigma = True else: doSigma = False if 'OVRS' in data.keys(): OVRS = data['OVRS'] else: OVRS = 1 MASK = data['MASK'].copy() mask = MASK == 0 mask_r = mask.ravel() key2index = {} i = 0 for key in filters: key2index[key] = i i += 1 model2index = {} i = 0 for key in filters: for model in models[key]: model2index[model.name] = i i += 1 imshape = MASK.shape yc, xc = iT.overSample(imshape, OVRS) if doSigma == True: nu = {} eta = {} background = {} counts = {} sigmask = {} for key in filters: nu[key] = pymc.Uniform('nu_%s' % key, -6, 6, value=log10(gain[key])) eta[key] = pymc.Uniform('eta_%s' % key, -4, 5, value=1.) background[key] = sigmas[key] sigmask[key] = image[key] > 1.5 * sigmas[key]**0.5 counts[key] = image[key][sigmask[key]].copy() pars.append(nu[key]) pars.append(eta[key]) def getSigma(n=nu, e=eta, b=background, c=counts, m=mask): sigma = b.copy() sigma[m] += ((10**n) * c)**e return numpy.sqrt(sigma).ravel() sigmas = [] for key in filters: parents = { 'n': nu[key], 'e': eta[key], 'b': background[key], 'c': counts[key], 'm': sigmask[key] } sigmas.append( pymc.Deterministic(eval=getSigma, name='sigma_%s' % key, parents=parents, doc='', trace=False, verbose=False)) else: for key in filters: sigmas[key] = sigmas[key].ravel() for key in filters: image[key] = image[key].ravel() @pymc.deterministic(trace=False) def logpAndMags(p=pars): lp = 0. mags = [] for key in filters: indx = key2index[key] if doSigma == True: sigma = sigmas[indx].value else: sigma = sigmas[key] simage = (image[key] / sigma)[mask_r] lp += linearmodelSB(p, simage, sigma[mask_r], mask, models[key], xc, yc, OVRS=OVRS) mags += [model.Mag(ZP[key]) for model in models[key]] return lp, mags @pymc.deterministic def lp(lpAM=logpAndMags): return lpAM[0] @pymc.deterministic def Mags(lpAM=logpAndMags): return lpAM[1] @pymc.observed def logpCost(value=0., logP=lp): return logP costs = [logpCost] if priors is not None: @pymc.observed def colorPrior(value=0., M=Mags): lp = 0. for p in priors: color = M[model2index[p[0]]] - M[model2index[p[1]]] lp += p[2](color) return lp costs.append(colorPrior) def resid(p): model = numpy.empty(0) for key in filters: indx = key2index[key] if doSigma == True: sigma = sigmas[indx].value else: sigma = sigmas[key] simage = (image[key] / sigma)[mask_r] model = numpy.append( model, linearmodelSB(p, simage, sigma[mask_r], mask, models[key], xc, yc, levMar=True, OVRS=OVRS)) return model print "Optimizing", niter from SampleOpt import AMAOpt as Opt, levMar as levMar default = numpy.empty(0) for key in filters: indx = key2index[key] if doSigma == True: sigma = sigmas[indx].value else: sigma = sigmas[key] simage = (image[key] / sigma)[mask_r] default = numpy.append(default, simage) # levMar(pars,resid,default) cov = None if 'COV' in data.keys(): cov = data['COV'] O = Opt(pars, costs, [lp, Mags], cov=cov) O.set_minprop(len(pars) * 2) O.sample(niter / 10) O = Opt(pars, costs, [lp, Mags], cov=cov) O.set_minprop(len(pars) * 2) O.cov = O.cov / 4. O.sample(niter / 4) O = Opt(pars, costs, [lp, Mags], cov=cov) O.set_minprop(len(pars) * 2) O.cov = O.cov / 10. O.sample(niter / 4) O = Opt(pars, costs, [lp, Mags], cov=cov) O.set_minprop(len(pars) * 2) O.cov = O.cov / 10. O.sample(niter) logp, trace, result = O.result() mags = numpy.array(result['Mags']) for key in model2index.keys(): result[key] = mags[:, model2index[key]].copy() del result['Mags'] output = {} for key in filters: indx = key2index[key] if doSigma == True: sigma = sigmas[indx].value else: sigma = sigmas[key] simage = (image[key] / sigma)[mask_r] m = linearmodelSB([p.value for p in pars], simage, sigma[mask_r], mask, models[key], xc, yc, noResid=True, OVRS=OVRS) output[key] = m return output, (logp, trace, result)
def resid(p): lp = -2*logP.value return self.imgs[0].ravel()*0 + lp optCov = None if optCov is None: optCov = numpy.array(cov) #S = levMar(pars,resid) #self.outPars = pars #return # use lensFit to calculate the likelihood at each point in the chain for i in range(1): S = AMAOpt(pars,[likelihood],[logP],cov=optCov/4.) S.set_minprop(len(pars)*2) S.sample(100*len(pars)**2) #S = AMAOpt(pars,[likelihood],[logP],cov=optCov/8.) #S.set_minprop(len(pars)*2) #S.sample(10*len(pars)**2) #S = AMAOpt(pars,[likelihood],[logP],cov=optCov/8.) #S.set_minprop(len(pars)*2) #S.sample(10*len(pars)**2) logp,trace,det = S.result() # log likelihoods; chain (steps * params); det['extShear PA'] = chain in this variable coeff = [] for i in range(len(pars)):
def runInference(self,optCov=None,getModel=False): import numpy from SampleOpt import AMAOpt,levMar cov = [c for c in self.cov] pars = [o for o in self.offsets] gals = [] srcs = [] lenses = [] self.gals = self.parent.galaxyManager.objs self.lenses = self.parent.lensManager.objs self.srcs = self.parent.srcManager.objs mask = self.parent.mask if len(self.gals)+len(self.srcs)==0: return None # if len(self.gals)+len(self.srcs)+len(self.lenses)==0: # return None for g in self.gals.keys(): gal = self.gals[g] gal.makeModel() gal,gpars,gcov = gal.model,gal.modelPars,gal.cov gals.append(gal) pars += gpars cov += gcov for s in self.srcs.keys(): src = self.srcs[s] src.makeModel() src,spars,scov = src.model,src.modelPars,src.cov srcs.append(src) pars += spars cov += scov for l in self.lenses.keys(): lens = self.lenses[l] lens.makeModel() lens,lpars,lcov = lens.model,lens.modelPars,lens.cov lenses.append(lens) pars += lpars cov += lcov if self.parent.shearFlag==True: shear = self.parent.shear shear.makeModel() lenses.append(shear.model) pars += shear.modelPars cov += shear.cov if getModel==True or len(pars)==0: if len(pars)==0: self.outPars = [] models = [] for i in range(len(self.imgs)): if i==0: x0 = 0. y0 = 0. else: x0 = pars[i*2-2].value y0 = pars[i*2-1].value print 'x0,y0',x0,y0 img = self.imgs[i] sig = self.sigs[i] psf = self.psfs[i] xc = self.xc[i] yc = self.yc[i] model = lensModel.lensFit(None,img,sig,gals,lenses,srcs,xc+x0, yc+y0,1,verbose=False,psf=psf, noResid=True,csub=1) models.append(model) return models # Trim images for faster convolution if masking xc = [] yc = [] imgs = [] sigs = [] psfs = [] if mask is not None: Y,X = numpy.where(mask) ylo,yhi,xlo,xhi = Y.min(),Y.max()+1,X.min(),X.max()+1 mask = mask[ylo:yhi,xlo:xhi] for i in range(len(self.imgs)): xc.append(self.xc[i][ylo:yhi,xlo:xhi].copy()) yc.append(self.yc[i][ylo:yhi,xlo:xhi].copy()) imgs.append(self.imgs[i][ylo:yhi,xlo:xhi].copy()) sigs.append(self.sigs[i][ylo:yhi,xlo:xhi].copy()) if self.psfs[i] is not None: PSF = self.psfImgs[i] psfs.append(convolve.convolve(imgs[-1],PSF)[1]) else: xc = [i for i in self.xc] yc = [i for i in self.yc] imgs = [i for i in self.imgs] sigs = [i for i in self.sigs] psfs = [i for i in self.psfs] @pymc.deterministic def logP(value=0.,p=pars): lp = 0. for i in range(len(imgs)): if i==0: x0 = 0. y0 = 0. else: x0 = pars[i*2-2].value y0 = pars[i*2-1].value img = imgs[i] sig = sigs[i] psf = psfs[i] lp += lensModel.lensFit(None,img,sig,gals,lenses,srcs,xc[i]+x0, yc[i]+y0,1,verbose=False,psf=psf, mask=mask,csub=1) return lp @pymc.observed def likelihood(value=0.,lp=logP): return lp def resid(p): lp = -2*logP.value return self.imgs[0].ravel()*0 + lp if optCov is None: optCov = numpy.array(cov) #S = levMar(pars,resid) #self.outPars = pars #return niter = 2*len(pars)**2 if niter<20: niter = 20 S = AMAOpt(pars,[likelihood],[logP],cov=optCov) S.set_minprop(10*len(pars)) S.sample(niter) self.Sampler = S self.outPars = pars return self.getModel()