コード例 #1
0
    def fastMCMC(self,niter,nburn,nthin=1):
        from Sampler import SimpleSample as sample
        from scipy import interpolate
        import pymc,numpy,time
        import ndinterp

        models = self.model.models
        data = self.data
        filters = data.keys()

        # Remove redshift dict
        for key in models.keys():
            if type(models[key])==type({}):
                z = models[key].keys()[0]
                models[key] = models[key][z]

        pars = [self.priors[key] for key in self.names]

        ax = {}
        doExp = []
        cube2par = []
        i = 0
        for key in self.model.axes_names:
            ax[key] = i
            i += 1
        i = 0
        for key in self.names:
            if key[0]=='X':
                continue
            if key.find('log')==0:
                pntkey = key.split('log')[1]
                #self.priors[key].value = numpy.log10(best[ax[pntkey]])
                doExp.append(True)
            else:
                pntkey = key
                doExp.append(False)
                #self.priors[key].value = best[ax[pntkey]]
            cube2par.append(ax[pntkey])
        doExp = numpy.array(doExp)==True
        par2cube = numpy.argsort(cube2par)

        # add stellar mass parameters
        pars.append(pymc.Uniform('log_Mlens',9.,12.))
        pars.append(pymc.Uniform('log_Msrc',9.,12.))

        M = numpy.empty(len(filters))
        D = numpy.empty(len(filters))
        V = numpy.empty(len(filters))
        for i in range(D.size):
            f = filters[i]
            D[i] = data[f]['mag']
            V[i] = data[f]['sigma']**2
        @pymc.deterministic
        def mass_and_logp(value=0.,pars=pars):
            logp = 0
            p = numpy.array(pars[:-2])
            p[doExp] = 10**p[doExp]
            p = numpy.atleast_2d(p[par2cube])
            mlens, msrc = pars[-2],pars[-1]
            for i in range(M.size):
                filt = filters[i]
                M[i] = models[filt].eval(p)
                if M[i]==0:
                    return [-1.,-1e300]
                if len(D[i])==1:
                    # sdss magnitude
                    ml,ms = M[i][0] - 2.5*mlens, M[i][1] - 2.5*Msrc
                    f = 10**(-0.4*ml) + 10**(-0.4*ms)
                    f = -2.5*np.log10(f)
                    logp += -0.5*(f-D[i])**2./V[i]
                elif len(D[i]) ==2:
                    # HST/Keck magnitude
                    logp += -0.5*(M[i][0] - 2.5*mlens - D[i][0])**2./V[i][0] - 0.5*(M[i][1] - 2.5*msrc - D[i][1])**2./V[i][1]
            return logp

        @pymc.observed
        def loglikelihood(value=0.,lp=mass_and_logp):
            return lp

        cov = []
        for key in self.names:
            if key=='age':
                cov.append(0.5)
            elif key=='logage':
                cov.append(0.03)
            elif key=='tau':
                cov.append(0.1)
            elif key=='logtau':
                cov.append(0.03)
            elif key=='tau_V':
                cov.append(self.priors[key]['prior'].value/20.)
            elif key=='logtau_V':
                cov.append(0.1)
            elif key=='tauV':
                cov.append(self.priors[key]['prior'].value/20.)
            elif key=='logtauV':
                cov.append(0.1)
            elif key=='Z':
                cov.append(self.priors[key]['prior'].value/20.)
            elif key=='logZ':
                cov.append(0.03)
            elif key=='redshift':
                P = self.priors['redshift']
                if type(P)==type(pymc.Normal('t',0.,1)):
                    cov.append(P.parents['tau']**-0.5)
                elif type(P)==type(pymc.Uniform('t',0.,1.)):
                    cov.append((P.parents['upper']-P.parents['lower'])/10.)
                else:
                    cov.append(P.parents['cov'])
                #cov.append(0.1)
        cov += [0.5,0.5] # masses
        cov = numpy.array(cov)

        costs = self.constraints+[loglikelihood]
        from SampleOpt import Sampler,AMAOpt
        S = AMAOpt(pars,costs,[mass_and_logp],cov=cov)
        S.sample(nburn/4)

        S = Sampler(pars,costs,[mass_and_logp])
        S.setCov(cov)
        S.sample(nburn/4)

        S = Sampler(pars,costs,[mass_and_logp])
        S.setCov(cov)
        S.sample(nburn/2)

        logps,trace,dets = S.result()
        cov = numpy.cov(trace[nburn/4:].T)

        S = AMAOpt(pars,costs,[mass_and_logp],cov=cov/4.)
        S.sample(nburn/2)
        logps,trace,dets = S.result()

        S = Sampler(pars,costs,[mass_and_logp])
        S.setCov(cov)
        S.sample(nburn/2)

        logps,trace,dets = S.result()
        cov = numpy.cov(trace[nburn/4:].T)

        S = Sampler(pars,costs,[mass_and_logp])
        S.setCov(cov)
        S.sample(niter)

        logps,trace,dets = S.result()
        logL = dets['mass_and_logp'].T
        o = {'logP':logps,'logL':logL}
        cnt = 0
        for key in self.names:
            o[key] = trace[:,cnt].copy()
            cnt += 1
        return o
コード例 #2
0
ファイル: zmass_estimator.py プロジェクト: bnord/LensPop
    def fastMCMC(self,niter,nburn,nthin=1):
        from Sampler import SimpleSample as sample
        from scipy import interpolate
        import pymc,numpy,time
        import ndinterp

        models = self.model.models
        data = self.data
        filters = data.keys()
        t = time.time()
        T1 = models[filters[0]]*0.
        T2 = 0.
        for f in filters:
            T1 += (models[f]-data[f]['mag'])/data[f]['sigma']**2
            T2 += 2.5/self.data[f]['sigma']**2
        M = T1/T2
        logp = 0.
        for f in filters:
            logp += -0.5*(-2.5*M+models[f]-data[f]['mag'])**2/data[f]['sigma']**2
        t = time.time()
        axes = {}
        i = 0
        ax = {}
        ind = numpy.unravel_index(logp.argmax(),logp.shape)
        best = []
        for key in self.model.axes_names:
            a = self.model.axes[key]['points']
            axes[i] = interpolate.splrep(a,numpy.arange(a.size),k=1,s=0)
            ax[key] = i
            best.append(a[ind[i]])
            i += 1

        print logp.max()
        logpmodel = ndinterp.ndInterp(axes,logp,order=1)
        massmodel = ndinterp.ndInterp(axes,M,order=1)

        pars = [self.priors[key] for key in self.names]

        doExp = []
        cube2par = []
        i = 0
        for key in self.names:
            if key.find('log')==0:
                pntkey = key.split('log')[1]
                #self.priors[key].value = numpy.log10(best[ax[pntkey]])
                doExp.append(True)
            else:
                pntkey = key
                doExp.append(False)
                #self.priors[key].value = best[ax[pntkey]]
            cube2par.append(ax[pntkey])
        doExp = numpy.array(doExp)==True
        par2cube = numpy.argsort(cube2par)

        logp -= logp.max()
        p = numpy.exp(logp)
        p /= p.sum()
        i = 0
        wmean = numpy.empty(p.ndim)
        axarr = []
        for key in self.model.axes_names:
            a = self.model.axes[key]['points']
            p0 = numpy.rollaxis(p,i,p.ndim)
            wmean[i] = (a*p0).sum()
            axarr.append(numpy.rollaxis(a+p0*0,p.ndim-1,i))
            i += 1
        cov = numpy.empty((p.ndim,p.ndim))
        #for i in range(p.ndim):
        #    for j in range(i,p.ndim):
        #        cov[i,j] = (p*(axarr[i]-wmean[i])*(axarr[j]-wmean[j])).sum()
        #        cov[j,i] = cov[i,j]
        for i in range(p.ndim):
            k = cube2par[i]
            for j in range(i,p.ndim):
                l = cube2par[j]
                cov[i,j] = (p*(axarr[k]-wmean[k])*(axarr[l]-wmean[l])).sum()
                cov[j,i] = cov[i,j]
        cov /= 1.-(p**2).sum()
        #for key in self.names:
        #    if key.find('log')==0:
        #        pntkey = key.split('log')[1]
        #        self.priors[key].value = numpy.log10(wmean[ax[pntkey]])
        #    else:
        #        self.priors[key].value = wmean[ax[key]]

        #self.priors['redshift'].value = 0.1
        pnt = numpy.empty((len(self.priors),1))
        @pymc.deterministic
        def mass_and_logp(value=0.,pars=pars):
            p = numpy.array(pars)
            p[doExp] = 10**p[doExp]
            p = numpy.atleast_2d(p[par2cube])
            mass = massmodel.eval(p)
            if mass==0.:
                return [0.,-1e200]
            logp = logpmodel.eval(p)
            return [mass,logp]

        @pymc.observed
        def loglikelihood(value=0.,lp=mass_and_logp):
            return lp[1]

        """
        logp -= logp.max()
        p = numpy.exp(logp)
        p /= p.sum()
        i = 0
        wmean = numpy.empty(p.ndim)
        for key in self.model.axes_names:
            a = self.model.axes[key]['points']
            p0 = numpy.rollaxis(p,i,p.ndim)
            wmean[i] = (a*p0).sum()
            i += 1

        

        cov = []
        for key in self.names:
            if key=='age':
                cov.append(0.5)
            elif key=='logage':
                cov.append(0.03)
            elif key=='tau':
                cov.append(0.1)
            elif key=='logtau':
                cov.append(0.03)
            elif key=='tau_V':
                cov.append(self.priors[key]['prior'].value/20.)
            elif key=='logtau_V':
                cov.append(0.1)
            elif key=='Z':
                cov.append(self.priors[key]['prior'].value/20.)
            elif key=='logZ':
                cov.append(0.03)
            elif key=='redshift':
                cov.append(0.1)
        cov = numpy.array(cov)
        """

        from SampleOpt import Sampler,AMAOpt
        S = AMAOpt(pars,[loglikelihood],[mass_and_logp],cov=cov)
        S.sample(nburn)
        logps,trace,dets = S.result()
        print logps.max()

        S = Sampler(pars,[loglikelihood],[mass_and_logp])
        S.setCov(cov)
        S.sample(nburn/2)

        logps,trace,dets = S.result()
        cov = numpy.cov(trace.T)

        S = Sampler(pars,[loglikelihood],[mass_and_logp])
        S.setCov(cov)
        S.sample(niter)

        logps,trace,dets = S.result()
        mass,logL = dets['mass_and_logp'][:,:,0].T
        o = {'logP':logps,'logL':logL,'logmass':mass}
        cnt = 0
        for key in self.names:
            o[key] = trace[:,cnt].copy()
            cnt += 1
        return o
        
        arg = logp.argmax()
        logp -= logp.max()
        p = numpy.exp(logp)
        p /= p.sum()
        print p.max()
        i = 0
        for key in self.model.axes_names:
            a = self.model.axes[key]['points']
            if key=='redshift':
                a = a[::5]
            p0 = numpy.rollaxis(p,i,p.ndim)
            print key,(a*p0).sum()
            i += 1

        print numpy.unravel_index(arg,logp.shape)
        logp -= max
        print (M*numpy.exp(logp)).sum()/numpy.exp(logp).sum()
        z = (M*0.+1)*self.model.axes['redshift']['points'][::5]
        print (z*numpy.exp(logp)).sum()/numpy.exp(logp).sum()
        f = open('check','wb')
        import cPickle
        cPickle.dump([M,logp],f,2)
        f.close()
        mod = ndinterp.ndInterp(self.models.axes,logp)
コード例 #3
0
    def fastMCMC(self,niter,nburn,nthin=1):
        from Sampler import SimpleSample as sample
        from scipy import interpolate
        import pymc,numpy,time
        import ndinterp

        if self.format=='new':
            models = self.model.models
        else:
            models = self.model
        data = self.data
        filters = data.keys()

        pars = [self.priors[key] for key in self.names]

        ax = {}
        doExp = []
        cube2par = []
        i = 0
        for key in self.model.axes_names:
            ax[key] = i
            i += 1
        i = 0
        for key in self.names:
            if key[0]=='X':
                continue
            if key.find('log')==0:
                pntkey = key.split('log')[1]
                #self.priors[key].value = numpy.log10(best[ax[pntkey]])
                doExp.append(True)
            else:
                pntkey = key
                doExp.append(False)
                #self.priors[key].value = best[ax[pntkey]]
            cube2par.append(ax[pntkey])
        doExp = numpy.array(doExp)==True
        par2cube = numpy.argsort(cube2par)

        M = numpy.empty(len(filters))
        D = numpy.empty(len(filters))
        V = numpy.empty(len(filters))
        for i in range(D.size):
            f = filters[i]
            D[i] = data[f]['mag']
            V[i] = data[f]['sigma']**2
        @pymc.deterministic
        def mass_and_logp(value=0.,pars=pars):
            p = numpy.array(pars)
            p[doExp] = 10**p[doExp]
            p = numpy.atleast_2d(p[par2cube])
            for i in range(M.size):
                filt = filters[i]
                if self.format=='new':
                    M[i] = models[filt].eval(p)
                else:
                    M[i] = models.eval(p,filt,data[filt]['redshift'])
                if M[i]==0:
                    return [-1.,-1e300]
            m = ((M-D)/V).sum()/(2.5/V).sum()
            logp = -0.5*((M-2.5*m-D)**2/V).sum()
            return [m,logp]

        @pymc.observed
        def loglikelihood(value=0.,lp=mass_and_logp):
            return lp[1]

        cov = []
        for key in self.names:
            if key=='age':
                cov.append(0.5)
            elif key=='logage':
                cov.append(0.03)
            elif key=='tau':
                cov.append(0.1)
            elif key=='logtau':
                cov.append(0.03)
            elif key=='tau_V':
                cov.append(self.priors[key]['prior'].value/20.)
            elif key=='logtau_V':
                cov.append(0.1)
            elif key=='Z':
                cov.append(self.priors[key]['prior'].value/20.)
            elif key=='logZ':
                cov.append(0.03)
            elif key=='redshift':
                P = self.priors['redshift']
                if type(P)==type(pymc.Normal('t',0.,1)):
                    cov.append(P.parents['tau']**-0.5)
                elif type(P)==type(pymc.Uniform('t',0.,1.)):
                    cov.append((P.parents['upper']-P.parents['lower'])/10.)
                else:
                    cov.append(P.parents['cov'])
                #cov.append(0.1)
        cov = numpy.array(cov)

        costs = self.constraints+[loglikelihood]
        from SampleOpt import Sampler,AMAOpt
        S = AMAOpt(pars,costs,[mass_and_logp],cov=cov)
        S.sample(nburn/4)

        S = Sampler(pars,costs,[mass_and_logp])
        S.setCov(cov)
        S.sample(nburn/4)

        S = Sampler(pars,costs,[mass_and_logp])
        S.setCov(cov)
        S.sample(nburn/2)

        logps,trace,dets = S.result()
        cov = numpy.cov(trace[nburn/4:].T)

        S = AMAOpt(pars,costs,[mass_and_logp],cov=cov/4.)
        S.sample(nburn/2)
        logps,trace,dets = S.result()

        S = Sampler(pars,costs,[mass_and_logp])
        S.setCov(cov)
        S.sample(nburn/2)

        logps,trace,dets = S.result()
        cov = numpy.cov(trace[nburn/4:].T)

        S = Sampler(pars,costs,[mass_and_logp])
        S.setCov(cov)
        S.sample(niter)

        logps,trace,dets = S.result()
        mass,logL = dets['mass_and_logp'].T
        o = {'logP':logps,'logL':logL,'logmass':mass}
        cnt = 0
        for key in self.names:
            o[key] = trace[:,cnt].copy()
            cnt += 1
        return o
コード例 #4
0
ファイル: zmass_estimator.py プロジェクト: peterrwi/LensPop
    def fastMCMC(self, niter, nburn, nthin=1):
        from Sampler import SimpleSample as sample
        from scipy import interpolate
        import pymc, numpy, time
        import ndinterp

        models = self.model.models
        data = self.data
        filters = data.keys()
        t = time.time()
        T1 = models[filters[0]] * 0.
        T2 = 0.
        for f in filters:
            T1 += (models[f] - data[f]['mag']) / data[f]['sigma']**2
            T2 += 2.5 / self.data[f]['sigma']**2
        M = T1 / T2
        logp = 0.
        for f in filters:
            logp += -0.5 * (-2.5 * M + models[f] -
                            data[f]['mag'])**2 / data[f]['sigma']**2
        t = time.time()
        axes = {}
        i = 0
        ax = {}
        ind = numpy.unravel_index(logp.argmax(), logp.shape)
        best = []
        for key in self.model.axes_names:
            a = self.model.axes[key]['points']
            axes[i] = interpolate.splrep(a, numpy.arange(a.size), k=1, s=0)
            ax[key] = i
            best.append(a[ind[i]])
            i += 1

        print logp.max()
        logpmodel = ndinterp.ndInterp(axes, logp, order=1)
        massmodel = ndinterp.ndInterp(axes, M, order=1)

        pars = [self.priors[key] for key in self.names]

        doExp = []
        cube2par = []
        i = 0
        for key in self.names:
            if key.find('log') == 0:
                pntkey = key.split('log')[1]
                #self.priors[key].value = numpy.log10(best[ax[pntkey]])
                doExp.append(True)
            else:
                pntkey = key
                doExp.append(False)
                #self.priors[key].value = best[ax[pntkey]]
            cube2par.append(ax[pntkey])
        doExp = numpy.array(doExp) == True
        par2cube = numpy.argsort(cube2par)

        logp -= logp.max()
        p = numpy.exp(logp)
        p /= p.sum()
        i = 0
        wmean = numpy.empty(p.ndim)
        axarr = []
        for key in self.model.axes_names:
            a = self.model.axes[key]['points']
            p0 = numpy.rollaxis(p, i, p.ndim)
            wmean[i] = (a * p0).sum()
            axarr.append(numpy.rollaxis(a + p0 * 0, p.ndim - 1, i))
            i += 1
        cov = numpy.empty((p.ndim, p.ndim))
        #for i in range(p.ndim):
        #    for j in range(i,p.ndim):
        #        cov[i,j] = (p*(axarr[i]-wmean[i])*(axarr[j]-wmean[j])).sum()
        #        cov[j,i] = cov[i,j]
        for i in range(p.ndim):
            k = cube2par[i]
            for j in range(i, p.ndim):
                l = cube2par[j]
                cov[i, j] = (p * (axarr[k] - wmean[k]) *
                             (axarr[l] - wmean[l])).sum()
                cov[j, i] = cov[i, j]
        cov /= 1. - (p**2).sum()
        #for key in self.names:
        #    if key.find('log')==0:
        #        pntkey = key.split('log')[1]
        #        self.priors[key].value = numpy.log10(wmean[ax[pntkey]])
        #    else:
        #        self.priors[key].value = wmean[ax[key]]

        #self.priors['redshift'].value = 0.1
        pnt = numpy.empty((len(self.priors), 1))

        @pymc.deterministic
        def mass_and_logp(value=0., pars=pars):
            p = numpy.array(pars)
            p[doExp] = 10**p[doExp]
            p = numpy.atleast_2d(p[par2cube])
            mass = massmodel.eval(p)
            if mass == 0.:
                return [0., -1e200]
            logp = logpmodel.eval(p)
            return [mass, logp]

        @pymc.observed
        def loglikelihood(value=0., lp=mass_and_logp):
            return lp[1]

        """
        logp -= logp.max()
        p = numpy.exp(logp)
        p /= p.sum()
        i = 0
        wmean = numpy.empty(p.ndim)
        for key in self.model.axes_names:
            a = self.model.axes[key]['points']
            p0 = numpy.rollaxis(p,i,p.ndim)
            wmean[i] = (a*p0).sum()
            i += 1

        

        cov = []
        for key in self.names:
            if key=='age':
                cov.append(0.5)
            elif key=='logage':
                cov.append(0.03)
            elif key=='tau':
                cov.append(0.1)
            elif key=='logtau':
                cov.append(0.03)
            elif key=='tau_V':
                cov.append(self.priors[key]['prior'].value/20.)
            elif key=='logtau_V':
                cov.append(0.1)
            elif key=='Z':
                cov.append(self.priors[key]['prior'].value/20.)
            elif key=='logZ':
                cov.append(0.03)
            elif key=='redshift':
                cov.append(0.1)
        cov = numpy.array(cov)
        """

        from SampleOpt import Sampler, AMAOpt
        S = AMAOpt(pars, [loglikelihood], [mass_and_logp], cov=cov)
        S.sample(nburn)
        logps, trace, dets = S.result()
        print logps.max()

        S = Sampler(pars, [loglikelihood], [mass_and_logp])
        S.setCov(cov)
        S.sample(nburn / 2)

        logps, trace, dets = S.result()
        cov = numpy.cov(trace.T)

        S = Sampler(pars, [loglikelihood], [mass_and_logp])
        S.setCov(cov)
        S.sample(niter)

        logps, trace, dets = S.result()
        mass, logL = dets['mass_and_logp'][:, :, 0].T
        o = {'logP': logps, 'logL': logL, 'logmass': mass}
        cnt = 0
        for key in self.names:
            o[key] = trace[:, cnt].copy()
            cnt += 1
        return o

        arg = logp.argmax()
        logp -= logp.max()
        p = numpy.exp(logp)
        p /= p.sum()
        print p.max()
        i = 0
        for key in self.model.axes_names:
            a = self.model.axes[key]['points']
            if key == 'redshift':
                a = a[::5]
            p0 = numpy.rollaxis(p, i, p.ndim)
            print key, (a * p0).sum()
            i += 1

        print numpy.unravel_index(arg, logp.shape)
        logp -= max
        print(M * numpy.exp(logp)).sum() / numpy.exp(logp).sum()
        z = (M * 0. + 1) * self.model.axes['redshift']['points'][::5]
        print(z * numpy.exp(logp)).sum() / numpy.exp(logp).sum()
        f = open('check', 'wb')
        import cPickle
        cPickle.dump([M, logp], f, 2)
        f.close()
        mod = ndinterp.ndInterp(self.models.axes, logp)