def resid(p):
    lp = -2*logP.value
    return self.imgs[0].ravel()*0 + lp

optCov = None
if optCov is None:
    optCov = numpy.array(cov)


#S = levMar(pars,resid)
#self.outPars = pars
#return
# use lensFit to calculate the likelihood at each point in the chain
for i in range(1):
    S = AMAOpt(pars,[likelihood],[logP],cov=optCov/4.)
    S.set_minprop(len(pars)*2)
    S.sample(100*len(pars)**2)

    #S = AMAOpt(pars,[likelihood],[logP],cov=optCov/8.)
    #S.set_minprop(len(pars)*2)
    #S.sample(10*len(pars)**2)

    #S = AMAOpt(pars,[likelihood],[logP],cov=optCov/8.)
    #S.set_minprop(len(pars)*2)
    #S.sample(10*len(pars)**2)


logp,trace,det = S.result() # log likelihoods; chain (steps * params); det['extShear PA'] = chain in this variable

coeff = []
Пример #2
0
    def runInference(self,optCov=None,getModel=False):
        import numpy
        from SampleOpt import AMAOpt,levMar
        cov = [c for c in self.cov]
        pars = [o for o in self.offsets]
        gals = []
        srcs = []
        lenses = []

        self.gals = self.parent.galaxyManager.objs
        self.lenses = self.parent.lensManager.objs
        self.srcs = self.parent.srcManager.objs
        mask = self.parent.mask

        if len(self.gals)+len(self.srcs)==0:
            return None
#        if len(self.gals)+len(self.srcs)+len(self.lenses)==0:
#            return None
        for g in self.gals.keys():
            gal = self.gals[g]
            gal.makeModel()
            gal,gpars,gcov = gal.model,gal.modelPars,gal.cov
            gals.append(gal)
            pars += gpars
            cov += gcov
        for s in self.srcs.keys():
            src = self.srcs[s]
            src.makeModel()
            src,spars,scov = src.model,src.modelPars,src.cov
            srcs.append(src)
            pars += spars
            cov += scov
        for l in self.lenses.keys():
            lens = self.lenses[l]
            lens.makeModel()
            lens,lpars,lcov = lens.model,lens.modelPars,lens.cov
            lenses.append(lens)
            pars += lpars
            cov += lcov
        if self.parent.shearFlag==True:
            shear = self.parent.shear
            shear.makeModel()
            lenses.append(shear.model)
            pars += shear.modelPars
            cov += shear.cov

        if getModel==True or len(pars)==0:
            if len(pars)==0:
                self.outPars = []
            models = []
            for i in range(len(self.imgs)):
                if i==0:
                    x0 = 0.
                    y0 = 0.
                else:
                    x0 = pars[i*2-2].value
                    y0 = pars[i*2-1].value
                    print 'x0,y0',x0,y0
                img = self.imgs[i]
                sig = self.sigs[i]
                psf = self.psfs[i]
                xc = self.xc[i]
                yc = self.yc[i]
                model = lensModel.lensFit(None,img,sig,gals,lenses,srcs,xc+x0,
                                        yc+y0,1,verbose=False,psf=psf,
                                        noResid=True,csub=1)
                models.append(model)
            return models

        # Trim images for faster convolution if masking
        xc = []
        yc = []
        imgs = []
        sigs = []
        psfs = []
        if mask is not None:
            Y,X = numpy.where(mask)
            ylo,yhi,xlo,xhi = Y.min(),Y.max()+1,X.min(),X.max()+1
            mask = mask[ylo:yhi,xlo:xhi]
            for i in range(len(self.imgs)):
                xc.append(self.xc[i][ylo:yhi,xlo:xhi].copy())
                yc.append(self.yc[i][ylo:yhi,xlo:xhi].copy())
                imgs.append(self.imgs[i][ylo:yhi,xlo:xhi].copy())
                sigs.append(self.sigs[i][ylo:yhi,xlo:xhi].copy())
                if self.psfs[i] is not None:
                    PSF = self.psfImgs[i]
                    psfs.append(convolve.convolve(imgs[-1],PSF)[1])
        else:
            xc = [i for i in self.xc]
            yc = [i for i in self.yc]
            imgs = [i for i in self.imgs]
            sigs = [i for i in self.sigs]
            psfs = [i for i in self.psfs]

        @pymc.deterministic
        def logP(value=0.,p=pars):
            lp = 0.
            for i in range(len(imgs)):
                if i==0:
                    x0 = 0.
                    y0 = 0.
                else:
                    x0 = pars[i*2-2].value
                    y0 = pars[i*2-1].value
                img = imgs[i]
                sig = sigs[i]
                psf = psfs[i]
                lp += lensModel.lensFit(None,img,sig,gals,lenses,srcs,xc[i]+x0,
                                        yc[i]+y0,1,verbose=False,psf=psf,
                                        mask=mask,csub=1)
            return lp

        @pymc.observed
        def likelihood(value=0.,lp=logP):
            return lp

        def resid(p):
            lp = -2*logP.value
            return self.imgs[0].ravel()*0 + lp

        if optCov is None:
            optCov = numpy.array(cov)

        #S = levMar(pars,resid)
        #self.outPars = pars
        #return
        niter = 2*len(pars)**2
        if niter<20:
            niter = 20
        S = AMAOpt(pars,[likelihood],[logP],cov=optCov)
        S.set_minprop(10*len(pars))
        S.sample(niter)
        self.Sampler = S
        self.outPars = pars
        return self.getModel()
Пример #3
0
def optimize(data, niter, oname=None, first=True):
    import pymc, pyfits, numpy
    import indexTricks as iT

    priors = data['PRIORS']
    models = data['MODELS']
    pars = data['PARAMS']

    image = {}
    for key in data['IMG'].keys():
        image[key] = data['IMG'][key].copy()
    ZP = data['ZP']
    filters = [filt for filt in data['FILTERS']]

    sigmas = data['SIGMA']
    if 'GAIN' in data.keys():
        gain = data['GAIN']
        doSigma = True
    else:
        doSigma = False

    if 'OVRS' in data.keys():
        OVRS = data['OVRS']
    else:
        OVRS = 1

    MASK = data['MASK'].copy()
    mask = MASK == 0
    mask_r = mask.ravel()

    key2index = {}
    i = 0
    for key in filters:
        key2index[key] = i
        i += 1

    model2index = {}
    i = 0
    for key in filters:
        for model in models[key]:
            model2index[model.name] = i
            i += 1

    imshape = MASK.shape
    yc, xc = iT.overSample(imshape, OVRS)

    if doSigma == True:
        nu = {}
        eta = {}
        background = {}
        counts = {}
        sigmask = {}
        for key in filters:
            nu[key] = pymc.Uniform('nu_%s' % key,
                                   -6,
                                   6,
                                   value=log10(gain[key]))
            eta[key] = pymc.Uniform('eta_%s' % key, -4, 5, value=1.)
            background[key] = sigmas[key]
            sigmask[key] = image[key] > 1.5 * sigmas[key]**0.5
            counts[key] = image[key][sigmask[key]].copy()
            pars.append(nu[key])
            pars.append(eta[key])

        def getSigma(n=nu, e=eta, b=background, c=counts, m=mask):
            sigma = b.copy()
            sigma[m] += ((10**n) * c)**e
            return numpy.sqrt(sigma).ravel()

        sigmas = []
        for key in filters:
            parents = {
                'n': nu[key],
                'e': eta[key],
                'b': background[key],
                'c': counts[key],
                'm': sigmask[key]
            }
            sigmas.append(
                pymc.Deterministic(eval=getSigma,
                                   name='sigma_%s' % key,
                                   parents=parents,
                                   doc='',
                                   trace=False,
                                   verbose=False))
    else:
        for key in filters:
            sigmas[key] = sigmas[key].ravel()

    for key in filters:
        image[key] = image[key].ravel()

    @pymc.deterministic(trace=False)
    def logpAndMags(p=pars):
        lp = 0.
        mags = []
        for key in filters:
            indx = key2index[key]
            if doSigma == True:
                sigma = sigmas[indx].value
            else:
                sigma = sigmas[key]
            simage = (image[key] / sigma)[mask_r]
            lp += linearmodelSB(p,
                                simage,
                                sigma[mask_r],
                                mask,
                                models[key],
                                xc,
                                yc,
                                OVRS=OVRS)
            mags += [model.Mag(ZP[key]) for model in models[key]]
        return lp, mags

    @pymc.deterministic
    def lp(lpAM=logpAndMags):
        return lpAM[0]

    @pymc.deterministic
    def Mags(lpAM=logpAndMags):
        return lpAM[1]

    @pymc.observed
    def logpCost(value=0., logP=lp):
        return logP

    costs = [logpCost]
    if priors is not None:

        @pymc.observed
        def colorPrior(value=0., M=Mags):
            lp = 0.
            for p in priors:
                color = M[model2index[p[0]]] - M[model2index[p[1]]]
                lp += p[2](color)
            return lp

        costs.append(colorPrior)

    def resid(p):
        model = numpy.empty(0)
        for key in filters:
            indx = key2index[key]
            if doSigma == True:
                sigma = sigmas[indx].value
            else:
                sigma = sigmas[key]
            simage = (image[key] / sigma)[mask_r]
            model = numpy.append(
                model,
                linearmodelSB(p,
                              simage,
                              sigma[mask_r],
                              mask,
                              models[key],
                              xc,
                              yc,
                              levMar=True,
                              OVRS=OVRS))
        return model

    print "Optimizing", niter
    from SampleOpt import AMAOpt as Opt, levMar as levMar
    default = numpy.empty(0)
    for key in filters:
        indx = key2index[key]
        if doSigma == True:
            sigma = sigmas[indx].value
        else:
            sigma = sigmas[key]
        simage = (image[key] / sigma)[mask_r]
        default = numpy.append(default, simage)
#    levMar(pars,resid,default)

    cov = None
    if 'COV' in data.keys():
        cov = data['COV']

    O = Opt(pars, costs, [lp, Mags], cov=cov)
    O.set_minprop(len(pars) * 2)
    O.sample(niter / 10)

    O = Opt(pars, costs, [lp, Mags], cov=cov)
    O.set_minprop(len(pars) * 2)
    O.cov = O.cov / 4.
    O.sample(niter / 4)

    O = Opt(pars, costs, [lp, Mags], cov=cov)
    O.set_minprop(len(pars) * 2)
    O.cov = O.cov / 10.
    O.sample(niter / 4)

    O = Opt(pars, costs, [lp, Mags], cov=cov)
    O.set_minprop(len(pars) * 2)
    O.cov = O.cov / 10.
    O.sample(niter)
    logp, trace, result = O.result()
    mags = numpy.array(result['Mags'])

    for key in model2index.keys():
        result[key] = mags[:, model2index[key]].copy()
    del result['Mags']

    output = {}
    for key in filters:
        indx = key2index[key]
        if doSigma == True:
            sigma = sigmas[indx].value
        else:
            sigma = sigmas[key]
        simage = (image[key] / sigma)[mask_r]
        m = linearmodelSB([p.value for p in pars],
                          simage,
                          sigma[mask_r],
                          mask,
                          models[key],
                          xc,
                          yc,
                          noResid=True,
                          OVRS=OVRS)
        output[key] = m
    return output, (logp, trace, result)
Пример #4
0
    def fastMCMC(self,niter,nburn,nthin=1):
        from Sampler import SimpleSample as sample
        from scipy import interpolate
        import pymc,numpy,time
        import ndinterp

        models = self.model.models
        data = self.data
        filters = data.keys()

        # Remove redshift dict
        for key in models.keys():
            if type(models[key])==type({}):
                z = models[key].keys()[0]
                models[key] = models[key][z]

        pars = [self.priors[key] for key in self.names]

        ax = {}
        doExp = []
        cube2par = []
        i = 0
        for key in self.model.axes_names:
            ax[key] = i
            i += 1
        i = 0
        for key in self.names:
            if key[0]=='X':
                continue
            if key.find('log')==0:
                pntkey = key.split('log')[1]
                #self.priors[key].value = numpy.log10(best[ax[pntkey]])
                doExp.append(True)
            else:
                pntkey = key
                doExp.append(False)
                #self.priors[key].value = best[ax[pntkey]]
            cube2par.append(ax[pntkey])
        doExp = numpy.array(doExp)==True
        par2cube = numpy.argsort(cube2par)

        # add stellar mass parameters
        pars.append(pymc.Uniform('log_Mlens',9.,12.))
        pars.append(pymc.Uniform('log_Msrc',9.,12.))

        M = numpy.empty(len(filters))
        D = numpy.empty(len(filters))
        V = numpy.empty(len(filters))
        for i in range(D.size):
            f = filters[i]
            D[i] = data[f]['mag']
            V[i] = data[f]['sigma']**2
        @pymc.deterministic
        def mass_and_logp(value=0.,pars=pars):
            logp = 0
            p = numpy.array(pars[:-2])
            p[doExp] = 10**p[doExp]
            p = numpy.atleast_2d(p[par2cube])
            mlens, msrc = pars[-2],pars[-1]
            for i in range(M.size):
                filt = filters[i]
                M[i] = models[filt].eval(p)
                if M[i]==0:
                    return [-1.,-1e300]
                if len(D[i])==1:
                    # sdss magnitude
                    ml,ms = M[i][0] - 2.5*mlens, M[i][1] - 2.5*Msrc
                    f = 10**(-0.4*ml) + 10**(-0.4*ms)
                    f = -2.5*np.log10(f)
                    logp += -0.5*(f-D[i])**2./V[i]
                elif len(D[i]) ==2:
                    # HST/Keck magnitude
                    logp += -0.5*(M[i][0] - 2.5*mlens - D[i][0])**2./V[i][0] - 0.5*(M[i][1] - 2.5*msrc - D[i][1])**2./V[i][1]
            return logp

        @pymc.observed
        def loglikelihood(value=0.,lp=mass_and_logp):
            return lp

        cov = []
        for key in self.names:
            if key=='age':
                cov.append(0.5)
            elif key=='logage':
                cov.append(0.03)
            elif key=='tau':
                cov.append(0.1)
            elif key=='logtau':
                cov.append(0.03)
            elif key=='tau_V':
                cov.append(self.priors[key]['prior'].value/20.)
            elif key=='logtau_V':
                cov.append(0.1)
            elif key=='tauV':
                cov.append(self.priors[key]['prior'].value/20.)
            elif key=='logtauV':
                cov.append(0.1)
            elif key=='Z':
                cov.append(self.priors[key]['prior'].value/20.)
            elif key=='logZ':
                cov.append(0.03)
            elif key=='redshift':
                P = self.priors['redshift']
                if type(P)==type(pymc.Normal('t',0.,1)):
                    cov.append(P.parents['tau']**-0.5)
                elif type(P)==type(pymc.Uniform('t',0.,1.)):
                    cov.append((P.parents['upper']-P.parents['lower'])/10.)
                else:
                    cov.append(P.parents['cov'])
                #cov.append(0.1)
        cov += [0.5,0.5] # masses
        cov = numpy.array(cov)

        costs = self.constraints+[loglikelihood]
        from SampleOpt import Sampler,AMAOpt
        S = AMAOpt(pars,costs,[mass_and_logp],cov=cov)
        S.sample(nburn/4)

        S = Sampler(pars,costs,[mass_and_logp])
        S.setCov(cov)
        S.sample(nburn/4)

        S = Sampler(pars,costs,[mass_and_logp])
        S.setCov(cov)
        S.sample(nburn/2)

        logps,trace,dets = S.result()
        cov = numpy.cov(trace[nburn/4:].T)

        S = AMAOpt(pars,costs,[mass_and_logp],cov=cov/4.)
        S.sample(nburn/2)
        logps,trace,dets = S.result()

        S = Sampler(pars,costs,[mass_and_logp])
        S.setCov(cov)
        S.sample(nburn/2)

        logps,trace,dets = S.result()
        cov = numpy.cov(trace[nburn/4:].T)

        S = Sampler(pars,costs,[mass_and_logp])
        S.setCov(cov)
        S.sample(niter)

        logps,trace,dets = S.result()
        logL = dets['mass_and_logp'].T
        o = {'logP':logps,'logL':logL}
        cnt = 0
        for key in self.names:
            o[key] = trace[:,cnt].copy()
            cnt += 1
        return o
def likelihood(value=0.,lp=logP):
    return lp

def resid(p):
    lp = -2*logP.value
    return self.imgs[0].ravel()*0 + lp

optCov = None
if optCov is None:
    optCov = numpy.array(cov)

#S = levMar(pars,resid)
#self.outPars = pars
#return
for i in range(1):
    S = AMAOpt(pars,[likelihood],[logP],cov=optCov/4.)
    S.set_minprop(len(pars)*2)
    S.sample(8*len(pars)**2)

    S = AMAOpt(pars,[likelihood],[logP],cov=optCov/8.)
    S.set_minprop(len(pars)*2)
    S.sample(10*len(pars)**2)

    S = AMAOpt(pars,[likelihood],[logP],cov=optCov/8.)
    S.set_minprop(len(pars)*2)
    S.sample(10*len(pars)**2)


logp,trace,det = S.result()
coeff = []
for i in range(len(pars)):
Пример #6
0
    def fastMCMC(self,niter,nburn,nthin=1):
        from Sampler import SimpleSample as sample
        from scipy import interpolate
        import pymc,numpy,time
        import ndinterp

        if self.format=='new':
            models = self.model.models
        else:
            models = self.model
        data = self.data
        filters = data.keys()

        pars = [self.priors[key] for key in self.names]

        ax = {}
        doExp = []
        cube2par = []
        i = 0
        for key in self.model.axes_names:
            ax[key] = i
            i += 1
        i = 0
        for key in self.names:
            if key[0]=='X':
                continue
            if key.find('log')==0:
                pntkey = key.split('log')[1]
                #self.priors[key].value = numpy.log10(best[ax[pntkey]])
                doExp.append(True)
            else:
                pntkey = key
                doExp.append(False)
                #self.priors[key].value = best[ax[pntkey]]
            cube2par.append(ax[pntkey])
        doExp = numpy.array(doExp)==True
        par2cube = numpy.argsort(cube2par)

        M = numpy.empty(len(filters))
        D = numpy.empty(len(filters))
        V = numpy.empty(len(filters))
        for i in range(D.size):
            f = filters[i]
            D[i] = data[f]['mag']
            V[i] = data[f]['sigma']**2
        @pymc.deterministic
        def mass_and_logp(value=0.,pars=pars):
            p = numpy.array(pars)
            p[doExp] = 10**p[doExp]
            p = numpy.atleast_2d(p[par2cube])
            for i in range(M.size):
                filt = filters[i]
                if self.format=='new':
                    M[i] = models[filt].eval(p)
                else:
                    M[i] = models.eval(p,filt,data[filt]['redshift'])
                if M[i]==0:
                    return [-1.,-1e300]
            m = ((M-D)/V).sum()/(2.5/V).sum()
            logp = -0.5*((M-2.5*m-D)**2/V).sum()
            return [m,logp]

        @pymc.observed
        def loglikelihood(value=0.,lp=mass_and_logp):
            return lp[1]

        cov = []
        for key in self.names:
            if key=='age':
                cov.append(0.5)
            elif key=='logage':
                cov.append(0.03)
            elif key=='tau':
                cov.append(0.1)
            elif key=='logtau':
                cov.append(0.03)
            elif key=='tau_V':
                cov.append(self.priors[key]['prior'].value/20.)
            elif key=='logtau_V':
                cov.append(0.1)
            elif key=='Z':
                cov.append(self.priors[key]['prior'].value/20.)
            elif key=='logZ':
                cov.append(0.03)
            elif key=='redshift':
                P = self.priors['redshift']
                if type(P)==type(pymc.Normal('t',0.,1)):
                    cov.append(P.parents['tau']**-0.5)
                elif type(P)==type(pymc.Uniform('t',0.,1.)):
                    cov.append((P.parents['upper']-P.parents['lower'])/10.)
                else:
                    cov.append(P.parents['cov'])
                #cov.append(0.1)
        cov = numpy.array(cov)

        costs = self.constraints+[loglikelihood]
        from SampleOpt import Sampler,AMAOpt
        S = AMAOpt(pars,costs,[mass_and_logp],cov=cov)
        S.sample(nburn/4)

        S = Sampler(pars,costs,[mass_and_logp])
        S.setCov(cov)
        S.sample(nburn/4)

        S = Sampler(pars,costs,[mass_and_logp])
        S.setCov(cov)
        S.sample(nburn/2)

        logps,trace,dets = S.result()
        cov = numpy.cov(trace[nburn/4:].T)

        S = AMAOpt(pars,costs,[mass_and_logp],cov=cov/4.)
        S.sample(nburn/2)
        logps,trace,dets = S.result()

        S = Sampler(pars,costs,[mass_and_logp])
        S.setCov(cov)
        S.sample(nburn/2)

        logps,trace,dets = S.result()
        cov = numpy.cov(trace[nburn/4:].T)

        S = Sampler(pars,costs,[mass_and_logp])
        S.setCov(cov)
        S.sample(niter)

        logps,trace,dets = S.result()
        mass,logL = dets['mass_and_logp'].T
        o = {'logP':logps,'logL':logL,'logmass':mass}
        cnt = 0
        for key in self.names:
            o[key] = trace[:,cnt].copy()
            cnt += 1
        return o
Пример #7
0
    def fastMCMC(self,niter,nburn,nthin=1):
        from Sampler import SimpleSample as sample
        from scipy import interpolate
        import pymc,numpy,time
        import ndinterp

        models = self.model.models
        data = self.data
        filters = data.keys()
        t = time.time()
        T1 = models[filters[0]]*0.
        T2 = 0.
        for f in filters:
            T1 += (models[f]-data[f]['mag'])/data[f]['sigma']**2
            T2 += 2.5/self.data[f]['sigma']**2
        M = T1/T2
        logp = 0.
        for f in filters:
            logp += -0.5*(-2.5*M+models[f]-data[f]['mag'])**2/data[f]['sigma']**2
        t = time.time()
        axes = {}
        i = 0
        ax = {}
        ind = numpy.unravel_index(logp.argmax(),logp.shape)
        best = []
        for key in self.model.axes_names:
            a = self.model.axes[key]['points']
            axes[i] = interpolate.splrep(a,numpy.arange(a.size),k=1,s=0)
            ax[key] = i
            best.append(a[ind[i]])
            i += 1

        print logp.max()
        logpmodel = ndinterp.ndInterp(axes,logp,order=1)
        massmodel = ndinterp.ndInterp(axes,M,order=1)

        pars = [self.priors[key] for key in self.names]

        doExp = []
        cube2par = []
        i = 0
        for key in self.names:
            if key.find('log')==0:
                pntkey = key.split('log')[1]
                #self.priors[key].value = numpy.log10(best[ax[pntkey]])
                doExp.append(True)
            else:
                pntkey = key
                doExp.append(False)
                #self.priors[key].value = best[ax[pntkey]]
            cube2par.append(ax[pntkey])
        doExp = numpy.array(doExp)==True
        par2cube = numpy.argsort(cube2par)

        logp -= logp.max()
        p = numpy.exp(logp)
        p /= p.sum()
        i = 0
        wmean = numpy.empty(p.ndim)
        axarr = []
        for key in self.model.axes_names:
            a = self.model.axes[key]['points']
            p0 = numpy.rollaxis(p,i,p.ndim)
            wmean[i] = (a*p0).sum()
            axarr.append(numpy.rollaxis(a+p0*0,p.ndim-1,i))
            i += 1
        cov = numpy.empty((p.ndim,p.ndim))
        #for i in range(p.ndim):
        #    for j in range(i,p.ndim):
        #        cov[i,j] = (p*(axarr[i]-wmean[i])*(axarr[j]-wmean[j])).sum()
        #        cov[j,i] = cov[i,j]
        for i in range(p.ndim):
            k = cube2par[i]
            for j in range(i,p.ndim):
                l = cube2par[j]
                cov[i,j] = (p*(axarr[k]-wmean[k])*(axarr[l]-wmean[l])).sum()
                cov[j,i] = cov[i,j]
        cov /= 1.-(p**2).sum()
        #for key in self.names:
        #    if key.find('log')==0:
        #        pntkey = key.split('log')[1]
        #        self.priors[key].value = numpy.log10(wmean[ax[pntkey]])
        #    else:
        #        self.priors[key].value = wmean[ax[key]]

        #self.priors['redshift'].value = 0.1
        pnt = numpy.empty((len(self.priors),1))
        @pymc.deterministic
        def mass_and_logp(value=0.,pars=pars):
            p = numpy.array(pars)
            p[doExp] = 10**p[doExp]
            p = numpy.atleast_2d(p[par2cube])
            mass = massmodel.eval(p)
            if mass==0.:
                return [0.,-1e200]
            logp = logpmodel.eval(p)
            return [mass,logp]

        @pymc.observed
        def loglikelihood(value=0.,lp=mass_and_logp):
            return lp[1]

        """
        logp -= logp.max()
        p = numpy.exp(logp)
        p /= p.sum()
        i = 0
        wmean = numpy.empty(p.ndim)
        for key in self.model.axes_names:
            a = self.model.axes[key]['points']
            p0 = numpy.rollaxis(p,i,p.ndim)
            wmean[i] = (a*p0).sum()
            i += 1

        

        cov = []
        for key in self.names:
            if key=='age':
                cov.append(0.5)
            elif key=='logage':
                cov.append(0.03)
            elif key=='tau':
                cov.append(0.1)
            elif key=='logtau':
                cov.append(0.03)
            elif key=='tau_V':
                cov.append(self.priors[key]['prior'].value/20.)
            elif key=='logtau_V':
                cov.append(0.1)
            elif key=='Z':
                cov.append(self.priors[key]['prior'].value/20.)
            elif key=='logZ':
                cov.append(0.03)
            elif key=='redshift':
                cov.append(0.1)
        cov = numpy.array(cov)
        """

        from SampleOpt import Sampler,AMAOpt
        S = AMAOpt(pars,[loglikelihood],[mass_and_logp],cov=cov)
        S.sample(nburn)
        logps,trace,dets = S.result()
        print logps.max()

        S = Sampler(pars,[loglikelihood],[mass_and_logp])
        S.setCov(cov)
        S.sample(nburn/2)

        logps,trace,dets = S.result()
        cov = numpy.cov(trace.T)

        S = Sampler(pars,[loglikelihood],[mass_and_logp])
        S.setCov(cov)
        S.sample(niter)

        logps,trace,dets = S.result()
        mass,logL = dets['mass_and_logp'][:,:,0].T
        o = {'logP':logps,'logL':logL,'logmass':mass}
        cnt = 0
        for key in self.names:
            o[key] = trace[:,cnt].copy()
            cnt += 1
        return o
        
        arg = logp.argmax()
        logp -= logp.max()
        p = numpy.exp(logp)
        p /= p.sum()
        print p.max()
        i = 0
        for key in self.model.axes_names:
            a = self.model.axes[key]['points']
            if key=='redshift':
                a = a[::5]
            p0 = numpy.rollaxis(p,i,p.ndim)
            print key,(a*p0).sum()
            i += 1

        print numpy.unravel_index(arg,logp.shape)
        logp -= max
        print (M*numpy.exp(logp)).sum()/numpy.exp(logp).sum()
        z = (M*0.+1)*self.model.axes['redshift']['points'][::5]
        print (z*numpy.exp(logp)).sum()/numpy.exp(logp).sum()
        f = open('check','wb')
        import cPickle
        cPickle.dump([M,logp],f,2)
        f.close()
        mod = ndinterp.ndInterp(self.models.axes,logp)
Пример #8
0
    def fastMCMC(self, niter, nburn, nthin=1):
        from Sampler import SimpleSample as sample
        from scipy import interpolate
        import pymc, numpy, time
        import ndinterp

        models = self.model.models
        data = self.data
        filters = data.keys()
        t = time.time()
        T1 = models[filters[0]] * 0.
        T2 = 0.
        for f in filters:
            T1 += (models[f] - data[f]['mag']) / data[f]['sigma']**2
            T2 += 2.5 / self.data[f]['sigma']**2
        M = T1 / T2
        logp = 0.
        for f in filters:
            logp += -0.5 * (-2.5 * M + models[f] -
                            data[f]['mag'])**2 / data[f]['sigma']**2
        t = time.time()
        axes = {}
        i = 0
        ax = {}
        ind = numpy.unravel_index(logp.argmax(), logp.shape)
        best = []
        for key in self.model.axes_names:
            a = self.model.axes[key]['points']
            axes[i] = interpolate.splrep(a, numpy.arange(a.size), k=1, s=0)
            ax[key] = i
            best.append(a[ind[i]])
            i += 1

        print logp.max()
        logpmodel = ndinterp.ndInterp(axes, logp, order=1)
        massmodel = ndinterp.ndInterp(axes, M, order=1)

        pars = [self.priors[key] for key in self.names]

        doExp = []
        cube2par = []
        i = 0
        for key in self.names:
            if key.find('log') == 0:
                pntkey = key.split('log')[1]
                #self.priors[key].value = numpy.log10(best[ax[pntkey]])
                doExp.append(True)
            else:
                pntkey = key
                doExp.append(False)
                #self.priors[key].value = best[ax[pntkey]]
            cube2par.append(ax[pntkey])
        doExp = numpy.array(doExp) == True
        par2cube = numpy.argsort(cube2par)

        logp -= logp.max()
        p = numpy.exp(logp)
        p /= p.sum()
        i = 0
        wmean = numpy.empty(p.ndim)
        axarr = []
        for key in self.model.axes_names:
            a = self.model.axes[key]['points']
            p0 = numpy.rollaxis(p, i, p.ndim)
            wmean[i] = (a * p0).sum()
            axarr.append(numpy.rollaxis(a + p0 * 0, p.ndim - 1, i))
            i += 1
        cov = numpy.empty((p.ndim, p.ndim))
        #for i in range(p.ndim):
        #    for j in range(i,p.ndim):
        #        cov[i,j] = (p*(axarr[i]-wmean[i])*(axarr[j]-wmean[j])).sum()
        #        cov[j,i] = cov[i,j]
        for i in range(p.ndim):
            k = cube2par[i]
            for j in range(i, p.ndim):
                l = cube2par[j]
                cov[i, j] = (p * (axarr[k] - wmean[k]) *
                             (axarr[l] - wmean[l])).sum()
                cov[j, i] = cov[i, j]
        cov /= 1. - (p**2).sum()
        #for key in self.names:
        #    if key.find('log')==0:
        #        pntkey = key.split('log')[1]
        #        self.priors[key].value = numpy.log10(wmean[ax[pntkey]])
        #    else:
        #        self.priors[key].value = wmean[ax[key]]

        #self.priors['redshift'].value = 0.1
        pnt = numpy.empty((len(self.priors), 1))

        @pymc.deterministic
        def mass_and_logp(value=0., pars=pars):
            p = numpy.array(pars)
            p[doExp] = 10**p[doExp]
            p = numpy.atleast_2d(p[par2cube])
            mass = massmodel.eval(p)
            if mass == 0.:
                return [0., -1e200]
            logp = logpmodel.eval(p)
            return [mass, logp]

        @pymc.observed
        def loglikelihood(value=0., lp=mass_and_logp):
            return lp[1]

        """
        logp -= logp.max()
        p = numpy.exp(logp)
        p /= p.sum()
        i = 0
        wmean = numpy.empty(p.ndim)
        for key in self.model.axes_names:
            a = self.model.axes[key]['points']
            p0 = numpy.rollaxis(p,i,p.ndim)
            wmean[i] = (a*p0).sum()
            i += 1

        

        cov = []
        for key in self.names:
            if key=='age':
                cov.append(0.5)
            elif key=='logage':
                cov.append(0.03)
            elif key=='tau':
                cov.append(0.1)
            elif key=='logtau':
                cov.append(0.03)
            elif key=='tau_V':
                cov.append(self.priors[key]['prior'].value/20.)
            elif key=='logtau_V':
                cov.append(0.1)
            elif key=='Z':
                cov.append(self.priors[key]['prior'].value/20.)
            elif key=='logZ':
                cov.append(0.03)
            elif key=='redshift':
                cov.append(0.1)
        cov = numpy.array(cov)
        """

        from SampleOpt import Sampler, AMAOpt
        S = AMAOpt(pars, [loglikelihood], [mass_and_logp], cov=cov)
        S.sample(nburn)
        logps, trace, dets = S.result()
        print logps.max()

        S = Sampler(pars, [loglikelihood], [mass_and_logp])
        S.setCov(cov)
        S.sample(nburn / 2)

        logps, trace, dets = S.result()
        cov = numpy.cov(trace.T)

        S = Sampler(pars, [loglikelihood], [mass_and_logp])
        S.setCov(cov)
        S.sample(niter)

        logps, trace, dets = S.result()
        mass, logL = dets['mass_and_logp'][:, :, 0].T
        o = {'logP': logps, 'logL': logL, 'logmass': mass}
        cnt = 0
        for key in self.names:
            o[key] = trace[:, cnt].copy()
            cnt += 1
        return o

        arg = logp.argmax()
        logp -= logp.max()
        p = numpy.exp(logp)
        p /= p.sum()
        print p.max()
        i = 0
        for key in self.model.axes_names:
            a = self.model.axes[key]['points']
            if key == 'redshift':
                a = a[::5]
            p0 = numpy.rollaxis(p, i, p.ndim)
            print key, (a * p0).sum()
            i += 1

        print numpy.unravel_index(arg, logp.shape)
        logp -= max
        print(M * numpy.exp(logp)).sum() / numpy.exp(logp).sum()
        z = (M * 0. + 1) * self.model.axes['redshift']['points'][::5]
        print(z * numpy.exp(logp)).sum() / numpy.exp(logp).sum()
        f = open('check', 'wb')
        import cPickle
        cPickle.dump([M, logp], f, 2)
        f.close()
        mod = ndinterp.ndInterp(self.models.axes, logp)