Exemple #1
0
def make_M2d_grid(Nr=100,
                  Nalpha=9,
                  Rmin=0.01,
                  Rmax=100.,
                  alpha_min=0.1,
                  alpha_max=0.5):

    print 'calculating grid of enclosed projected masses...'
    import ndinterp
    R_grid = np.logspace(np.log10(Rmin), np.log10(Rmax), Nr)
    R_spline = splrep(R_grid, np.arange(Nr))
    alpha_grid = np.linspace(alpha_min, alpha_max, Nalpha)
    alpha_spline = splrep(alpha_grid, np.arange(Nalpha))
    axes = {0: alpha_spline, 1: R_spline}

    R, A = np.meshgrid(R_grid, alpha_grid)
    M2d_grid = np.empty((Nalpha, Nr))

    for i in range(Nalpha):
        print 'alpha %3.2f' % alpha_grid[i]
        for j in range(Nr):
            M2d_grid[i, j] = M2d(R_grid[j], 1., alpha_grid[i])
    thing = ndinterp.ndInterp(axes, M2d_grid, order=3)
    f = open(grid_dir + '/einasto_M2d_grid.dat', 'w')
    pickle.dump(thing, f)
    f.close()
Exemple #2
0
def make_Sigma_grid(Nr=100,
                    Nalpha=9,
                    Rmin=0.01,
                    Rmax=100.,
                    alpha_min=0.1,
                    alpha_max=0.5):

    print('calculating grid of surface mass density...')
    import ndinterp
    R_grid = np.logspace(np.log10(Rmin), np.log10(Rmax), Nr)
    R_spline = splrep(R_grid, np.arange(Nr))
    alpha_grid = np.linspace(alpha_min, alpha_max, Nalpha)
    alpha_spline = splrep(alpha_grid, np.arange(Nalpha))
    axes = {0: alpha_spline, 1: R_spline}

    R, A = np.meshgrid(R_grid, alpha_grid)
    Sigma_grid = np.empty((Nalpha, Nr))

    for i in range(Nalpha):
        print('alpha: %3.2f' % alpha_grid[i])
        for j in range(Nr):
            Sigma_grid[i, j] = Sigma(R_grid[j], 1., alpha_grid[i])
    thing = ndinterp.ndInterp(axes, Sigma_grid, order=3)
    f = open(grid_dir + '/einasto_Sigma_grid.dat', 'wb')
    pickle.dump(thing, f)
    f.close()
Exemple #3
0
    def create_grid(self, x, y, q, eta):
        import numpy
        xgrid = numpy.empty((x.size, y.size, q.size, eta.size))
        ygrid = xgrid.copy()

        shape = (x.size, y.size)
        xnew = x.repeat(y.size)
        ynew = y.repeat(x.size).reshape((y.size, x.size)).T.flatten()

        for i in range(q.size):
            self.q = q[i]
            for j in range(eta.size):
                self.eta = eta[j]
                a, b = self.deflection_angles(xnew, ynew)
                xgrid[:, :, i, j] = a.reshape(shape)
                ygrid[:, :, i, j] = b.reshape(shape)
        ax = [x, y, q, eta]
        alphax = ndInterp()
        alphax.ndI_setup(ax, xgrid)
        alphay = ndInterp()
        alphay.ndI_setup(ax, ygrid)

        self.xmodel = alphax
        self.ymodel = alphay
Exemple #4
0
    def create_grid(self,x,y,q,eta):
        import numpy
        xgrid = numpy.empty((x.size,y.size,q.size,eta.size))
        ygrid = xgrid.copy()

        shape = (x.size,y.size)
        xnew = x.repeat(y.size)
        ynew = y.repeat(x.size).reshape((y.size,x.size)).T.flatten()

        for i in range(q.size):
            self.q = q[i]
            for j in range(eta.size):
                self.eta = eta[j]
                a,b = self.deflection_angles(xnew,ynew)
                xgrid[:,:,i,j] = a.reshape(shape)
                ygrid[:,:,i,j] = b.reshape(shape)
        ax = [x,y,q,eta]
        alphax = ndInterp()
        alphax.ndI_setup(ax,xgrid)
        alphay = ndInterp()
        alphay.ndI_setup(ax,ygrid)

        self.xmodel = alphax
        self.ymodel = alphay
Exemple #5
0
    def create_models(self):
        import numpy,cPickle
        from stellarpop import tools
        from ndinterp import ndInterp

        wave = self.axes[2]
        self.axes_names = self.axes[1]
        nmodels = self.spex.size/wave.size
        spex = self.spex.reshape(nmodels,wave.size)
        spex = spex*self.luminosity_correction()

        axes = self.axes[0]

        out = {}
        for F in self.filter_names:
            filt = self.filters[F]
            mags = numpy.zeros(nmodels)
            for i in range(nmodels):
                mags[i] = tools.ABFM(filt,[wave,spex[i]],self.redshift)
            mags = mags.reshape(self.spex.shape[:-1])
            out[F] = ndInterp(axes,mags)
        return out
Exemple #6
0
    def create_models(self):
        import numpy, cPickle
        from stellarpop import tools
        from ndinterp import ndInterp

        wave = self.axes[2]
        self.axes_names = self.axes[1]
        nmodels = self.spex.size / wave.size
        spex = self.spex.reshape(nmodels, wave.size)
        spex = spex * self.luminosity_correction()

        axes = self.axes[0]

        out = {}
        for F in self.filter_names:
            filt = self.filters[F]
            mags = numpy.zeros(nmodels)
            for i in range(nmodels):
                mags[i] = tools.ABFM(filt, [wave, spex[i]], self.redshift)
            mags = mags.reshape(self.spex.shape[:-1])
            out[F] = ndInterp(axes, mags)
        return out
Exemple #7
0
def make_M2dRbetam3_grid(Nr=100,Nb=28,Rmin=0.1,Rmax=100.):
    #this code calculates the quantity M2d(R,rs=rsgrid,beta)*R**(3-beta) on a grid of values of R between Rmin and Rmax, and values of the inner slope beta between 0.1 and 2.8.
    #the reason for the multiplication by R**(3-beta) is to make interpolation easier by having a function as flat as possible.

    print 'calculating grid of enclosed projected masses...'
    import ndinterp
    reins = np.logspace(np.log10(Rmin),np.log10(Rmax),Nr)
    spl_rein = splrep(reins,np.arange(Nr))
    betas = np.linspace(0.1,2.8,Nb)
    spl_beta = splrep(betas,np.arange(Nb))
    axes = {0:spl_beta,1:spl_rein}

    R,B = np.meshgrid(reins,betas)
    M2d_grid = np.empty((Nb,Nr))
    
    for i in range(0,Nb):
        print 'inner slope %4.2f'%betas[i]
        for j in range(0,Nr):
            M2d_grid[i,j] = M2d(reins[j],rsgrid,betas[i])
    thing = ndinterp.ndInterp(axes,M2d_grid*R**(B-3.),order=3)
    f = open('gNFW_rs%d_M2d_grid.dat'%int(rsgrid),'w')
    pickle.dump(thing,f)
    f.close()
Exemple #8
0
def make_lenspot_grid(Nr=100,Nn=15,Rmin=0.01,Rmax=10.):
    #this code calculates the psi(R,rs=rsgrid,beta)*R**(3-beta) on a grid of values of R between Rmin and Rmax, and values of the inner slope beta between 0.1 and 2.8.
    #the reason for the multiplication by R**(3-beta) is to make interpolation easier by having a function as flat as possible.

    print 'calculating grid of lensing potential...'
    import ndinterp
    reins = np.logspace(np.log10(Rmin),np.log10(Rmax),Nr)
    spl_rein = splrep(reins,np.arange(Nr))
    ns = np.linspace(1.0,8.0,Nn)
    spl_n = splrep(ns,np.arange(Nn))
    axes = {0:spl_n,1:spl_rein}

    R,B = np.meshgrid(reins,ns)
    pot_grid = np.empty((Nn,Nr))
    
    for i in range(0,Nn):
        print 'sersic index %4.2f'%ns[i]
        for j in range(0,Nr):
            pot_grid[i,j] = lenspot(reins[j],ns[i],1.)
    thing = ndinterp.ndInterp(axes,pot_grid,order=3)
    f = open('sersic_lenspot_grid.dat','w')
    pickle.dump(thing,f)
    f.close()
Exemple #9
0
def make_M2d_Rbetam3_grid(Nr=100, Nb=28, Rmin=0.01, Rmax=100.):
    #this code calculates the quantity M2d(R, rs=1, beta)*R**(3-beta) on a grid of values of R between Rmin and Rmax, and values of the inner slope beta between 0.1 and 2.8.
    #the reason for the multiplication by R**(3-beta) is to make interpolation easier by having a function as flat as possible.

    print 'calculating grid of enclosed projected masses...'
    import ndinterp
    R_grid = np.logspace(np.log10(Rmin), np.log10(Rmax), Nr)
    R_spline = splrep(R_grid, np.arange(Nr))
    beta_grid = np.linspace(0.1, 2.8, Nb)
    beta_spline = splrep(beta_grid, np.arange(Nb))
    axes = {0:beta_spline, 1:R_spline}

    R,B = np.meshgrid(R_grid, beta_grid)
    M2d_grid = np.empty((Nb, Nr))
    
    for i in range(Nb):
        print 'inner slope %4.2f'%beta_grid[i]
        for j in range(Nr):
            M2d_grid[i,j] = M2d(R_grid[j], 1., beta_grid[i])
    thing = ndinterp.ndInterp(axes, M2d_grid*R**(B-3.), order=3)
    f = open(grid_dir+'/gNFW_M2d_Rbetam3_grid.dat','w')
    pickle.dump(thing, f)
    f.close()
Exemple #10
0
n0 = numpy.linspace(0.5,6.,12)
q0 = numpy.linspace(0.1,1.,19)

x,y,n,q = ndinterp.create_axes_array([x0,x0,n0,q0])
yout = x*0.
xout = y*0.
for i in range(x.shape[2]):
    for j in range(x.shape[3]):
        X = x[:,:,i,j]
        Y = y[:,:,i,j]
        N = n[0,0,i,j]
        Q = q[0,0,i,j]
        k = 2.*N-1./3+4./(405.*N)+46/(25515.*N**2)
        amp = k**(2*N)/(2*N*gamma(2*N))
        yi,xi = sersic.sersicdeflections(-Y.ravel(),X.ravel(),amp,1.,N,Q)
        yout[:,:,i,j] = -1*yi.reshape(Y.shape)
        xout[:,:,i,j] = xi.reshape(X.shape)

axes = {}
axes[0] = interpolate.splrep(x0,numpy.arange(x0.size))
axes[1] = interpolate.splrep(x0,numpy.arange(x0.size))
axes[2] = interpolate.splrep(n0,numpy.arange(n0.size))
axes[3] = interpolate.splrep(q0,numpy.arange(q0.size))

xmodel = ndinterp.ndInterp(axes,xout)
ymodel = ndinterp.ndInterp(axes,yout)

f = open('serModelsHDR.dat','wb')
cPickle.dump([xmodel,ymodel],f,2)
f.close()
    lmstar_grid = grid_file['lmstar_grid'].value.copy()
    nmstar = len(lmstar_grid)

    lm200_grid = grid_file['lm200_grid'].value.copy()
    nm200 = len(lm200_grid)

    lc200_grid = grid_file['lc200_grid'].value.copy()
    nc200 = len(lc200_grid)

    axes = {
        0: splrep(lm200_grid, np.arange(nm200)),
        1: splrep(lmstar_grid, np.arange(nmstar)),
        2: splrep(lc200_grid, np.arange(nc200))
    }

    tein_grid = ndinterp.ndInterp(axes, grid_file['tein_grid'].value, order=1)
    crosssect_grid = ndinterp.ndInterp(axes,
                                       grid_file['crosssect_grid'].value,
                                       order=1)

    tein_grids.append(tein_grid)
    crosssect_grids.append(crosssect_grid)

    lm200_grids.append(lm200_grid)
    lc200_grids.append(lc200_grid)
    lmstar_grids.append(lmstar_grid)

    grid_file.close()

# reads in the file with the cross-section calculated over an important sample with interim prior
nimpsamp = 10
Exemple #12
0
    def fastMCMC(self, niter, nburn, nthin=1):
        from Sampler import SimpleSample as sample
        from scipy import interpolate
        import pymc, numpy, time
        import ndinterp

        models = self.model.models
        data = self.data
        filters = data.keys()

        pars = [self.priors[key] for key in self.names]

        ax = {}
        doExp = []
        cube2par = []
        i = 0
        for key in self.model.axes_names:
            ax[key] = i
            i += 1
        i = 0
        for key in self.names:
            if key[0] == 'X':
                continue
            if key.find('log') == 0:
                pntkey = key.split('log')[1]
                #self.priors[key].value = numpy.log10(best[ax[pntkey]])
                doExp.append(True)
            else:
                pntkey = key
                doExp.append(False)
                #self.priors[key].value = best[ax[pntkey]]
            cube2par.append(ax[pntkey])
        doExp = numpy.array(doExp) == True
        par2cube = numpy.argsort(cube2par)

        M = numpy.empty(len(filters))
        D = numpy.empty(len(filters))
        V = numpy.empty(len(filters))
        for i in range(D.size):
            f = filters[i]
            D[i] = data[f]['mag']
            V[i] = data[f]['sigma']**2

        @pymc.deterministic
        def mass_and_logp(value=0., pars=pars):
            p = numpy.array(pars)
            p[doExp] = 10**p[doExp]
            p = numpy.atleast_2d(p[par2cube])
            for i in range(M.size):
                M[i] = models[filters[i]].eval(p)
                if M[i] == 0:
                    return [-1., -1e300]
            m = ((M - D) / V).sum() / (2.5 / V).sum()
            logp = -0.5 * ((M - 2.5 * m - D)**2 / V).sum()
            return [m, logp]

        @pymc.observed
        def loglikelihood(value=0., lp=mass_and_logp):
            return lp[1]

        cov = []
        for key in self.names:
            if key == 'age':
                cov.append(0.5)
            elif key == 'logage':
                cov.append(0.03)
            elif key == 'tau':
                cov.append(0.1)
            elif key == 'logtau':
                cov.append(0.03)
            elif key == 'tau_V':
                cov.append(self.priors[key]['prior'].value / 20.)
            elif key == 'logtau_V':
                cov.append(0.1)
            elif key == 'Z':
                cov.append(self.priors[key]['prior'].value / 20.)
            elif key == 'logZ':
                cov.append(0.03)
            elif key == 'redshift':
                P = self.priors['redshift']
                if type(P) == type(pymc.Normal('t', 0., 1)):
                    cov.append(P.parents['tau']**-0.5)
                elif type(P) == type(pymc.Uniform('t', 0., 1.)):
                    cov.append((P.parents['upper'] - P.parents['lower']) / 10.)
                else:
                    cov.append(P.parents['cov'])
                #cov.append(0.1)
        cov = numpy.array(cov)

        costs = self.constraints + [loglikelihood]
        from SampleOpt import Sampler, AMAOpt
        S = AMAOpt(pars, costs, [mass_and_logp], cov=cov)
        S.sample(nburn / 4)

        S = Sampler(pars, costs, [mass_and_logp])
        S.setCov(cov)
        S.sample(nburn / 4)

        S = Sampler(pars, costs, [mass_and_logp])
        S.setCov(cov)
        S.sample(nburn / 2)

        logps, trace, dets = S.result()
        cov = numpy.cov(trace[nburn / 4:].T)

        S = AMAOpt(pars, costs, [mass_and_logp], cov=cov / 4.)
        S.sample(nburn / 2)
        logps, trace, dets = S.result()

        S = Sampler(pars, costs, [mass_and_logp])
        S.setCov(cov)
        S.sample(nburn / 2)

        logps, trace, dets = S.result()
        cov = numpy.cov(trace[nburn / 4:].T)

        S = Sampler(pars, costs, [mass_and_logp])
        S.setCov(cov)
        S.sample(niter)

        logps, trace, dets = S.result()
        mass, logL = dets['mass_and_logp'].T
        o = {'logP': logps, 'logL': logL, 'logmass': mass}
        cnt = 0
        for key in self.names:
            o[key] = trace[:, cnt].copy()
            cnt += 1
        return o

        arg = logp.argmax()
        logp -= logp.max()
        p = numpy.exp(logp)
        p /= p.sum()
        print p.max()
        i = 0
        for key in self.model.axes_names:
            a = self.model.axes[key]['points']
            if key == 'redshift':
                a = a[::5]
            p0 = numpy.rollaxis(p, i, p.ndim)
            print key, (a * p0).sum()
            i += 1

        print numpy.unravel_index(arg, logp.shape)
        logp -= max
        print(M * numpy.exp(logp)).sum() / numpy.exp(logp).sum()
        z = (M * 0. + 1) * self.model.axes['redshift']['points'][::5]
        print(z * numpy.exp(logp)).sum() / numpy.exp(logp).sum()
        f = open('check', 'wb')
        import cPickle
        cPickle.dump([M, logp], f, 2)
        f.close()
        mod = ndinterp.ndInterp(self.models.axes, logp)
Exemple #13
0
    def fasterMCMC(self,niter,nburn,nthin=1,grid=False):
        from Sampler import SimpleSample as sample
        import pymc,numpy,ndinterp

        axn = [a for a in self.model.axes_names]
        axes = self.model.axes

        pmags = []
        lpModel = None
        magModel = None
        Modaxes = None
        magUC = 0.
        order = None
        for i in range(len(self.data)):
            f = self.data[i]['filter']
            z = self.data[i]['redshift']
            pmags.append(self.model.models[f][z].z)
            if magModel is None:
                magModel = self.model.models[f][z].z*0.
                lpModel = magModel.copy()
                Modaxes = self.model.models[f][z].axes
                order = self.model.models[f][z].order
            m,me = self.data[i]['mag'],self.data[i]['error']**2
            d = (m-pmags[-1])
            lpModel += -0.5*d**2/me
            magModel += d/me
            magUC += -0.5/me

        for i in range(len(self.data)-1):
            d1 = self.data[i]
            d2 = self.data[i+1]
            f1,m1,me1 = d1['filter'],d1['mag'],d1['error']
            f2,m2,me2 = d2['filter'],d2['mag'],d2['error']
            c = pmags[i]-pmags[i+1]
            dc = m1-m2
            if i==0:
                logp = (c-dc)**2/(me1**2+me2**2)
            else:
                logp += (c-dc)**2/(me1**2+me2**2)

        indx = numpy.unravel_index(logp.argmin(),logp.shape)
        m = 0.
        w = 0.
        for i in range(len(self.data)):
            d,e = self.data[i]['mag'],self.data[i]['error']
            M = (pmags[i][indx]-d)/2.5
            m += M/e**2
            w += 1./e**2
        m /= w

        M = numpy.linspace(m-0.6,m+0.6,13)
        a = []
        for i in range(len(self.model.axes_names)):
            a.append(self.model.axes[self.model.axes_names[i]]['points'].copy())
        for key in self.names:
            p = self.priors[key]['prior']
            if key.find('mass')>=0:
                continue
            if key.find('log')==0:
                key = key[3:]
                a[axn.index(key)] = numpy.log10(a[axn.index(key)])
            i = axn.index(key)
            for j in range(len(a[i])):
                p.value = a[i][j]
                try:
                    a[i][j] = p.logp
                except:
                    a[i][j] = -1e300
        logp = lpModel+ndinterp.create_axes_array(a).sum(0)

        logp = numpy.expand_dims(logp,logp.ndim).repeat(M.size,logp.ndim)
        for i in range(M.size):
            M0 = M[i]*-2.5
            logp[:,:,:,:,i] += magModel*M0 +  M0**2*magUC
        logp -= logp.max()

        wt = numpy.exp(logp)
        wt /= wt.sum()
        a = []
        for i in range(len(self.model.axes_names)):
            a.append(self.model.axes[self.model.axes_names[i]]['points'])
        a.append(M)
        for key in self.names:
            if key.find('mass')>=0:
                if key.find('log')!=0:
                    a[-1] == 10**a[-1]
                axn.append('mass')
            elif key.find('log')==0:
                key = key[3:]
                a[axn.index(key)] = numpy.log10(a[axn.index(key)])
        vals = ndinterp.create_axes_array(a)
        if grid==True:
            m = (wt*vals[-1]).sum()
            st = ((wt*(vals[-1]-m)**2).sum())**0.5
            return m,st

        pars = []
        cov = []

        for key in self.names:
            pars.append(self.priors[key]['prior'])
            if key.find('log')==0:
                key = key[3:]
            i = axn.index(key)
            m = (wt*vals[i]).sum()
            st = ((wt*(vals[i]-m)**2).sum())**0.5
            pars[-1].value = m
            cov.append(st)

        for key in self.names:
            continue
            pars.append(self.priors[key]['prior'])
            if key=='logmass':
                pars[-1].value = m
                cov.append(0.03)
            elif key=='mass':
                pars[-1].value = 10**m
                cov.append(self.priors[key]['prior'].value/20.)
            else:
                if key.find('log')!=0:
                    i = axn.index(key)
                    mn = (vals[i]*wt).sum()
                    cov.append(((wt*(vals[i]-mn)**2).sum())**0.5)
                    pars[-1].value = mn
                else:
                    key = key.split('log')[1]
                    i = axn.index(key)
                    mn = (numpy.log10(vals[i])*wt).sum()
                    cov.append(((wt*(numpy.log10(vals[i])-mn)**2).sum())**0.5)
                    pars[-1].value = mn
        for key in self.names:
            continue
            pars.append(self.priors[key]['prior'])
            if key=='logmass':
                pars[-1].value = m
                cov.append(0.03)
            elif key=='mass':
                pars[-1].value = 10**m
                cov.append(self.priors[key]['prior'].value/20.)
            elif key=='age':
                pars[-1].value = axes['age']['points'][indx[axn.index('age')]]
                cov.append(0.5)
            elif key=='logage':
                pars[-1].value = numpy.log10(axes['age']['points'][indx[axn.index('age')]])
                cov.append(0.03)
            elif key=='tau':
                pars[-1].value = axes['tau']['points'][indx[axn.index('tau')]]
                cov.append(0.2)
            elif key=='logtau':
                pars[-1].value = numpy.log10(axes['tau']['points'][indx[axn.index('tau')]])
                cov.append(0.03)
            elif key=='tau_V':
                pars[-1].value = axes['tau_V']['points'][indx[axn.index('tau_V')]]
                cov.append(self.priors[key]['prior'].value/20.)
            elif key=='logtau_V':
                pars[-1].value = numpy.log10(axes['tau_V']['points'][indx[axn.index('tau_V')]])
                cov.append(0.1)
            elif key=='Z':
                pars[-1].value = axes['Z']['points'][indx[axn.index('Z')]]
                cov.append(self.priors[key]['prior'].value/20.)
            elif key=='logZ':
                pars[-1].value = numpy.log10(axes['Z']['points'][indx[axn.index('Z')]])
                cov.append(0.05)

        lpModel = ndinterp.ndInterp(Modaxes,lpModel,order)
        magModel = ndinterp.ndInterp(Modaxes,magModel,order)

        cov = numpy.array(cov)
        @pymc.observed
        def loglikelihood(value=0.,pars=pars):
            from math import log10
            points = numpy.zeros((1,len(pars)-1))
            i = 0
            for key in self.names:
                if key=='mass':
                    M = -2.5*log10(pars[i])
                elif key=='logmass':
                    M = -2.5*pars[i]
                elif key.find('log')==0:
                    key = key.split('log')[1]
                    points[0,axn.index(key)] = 10**pars[i]
                else:
                    points[0,axn.index(key)] = pars[i]
                i += 1
            lp = lpModel.eval(points)
            if lp==0:
                return -1e300  # Short circuit if out of range
            lp += magModel.eval(points)*M + M**2*magUC
            return lp


        costs = [loglikelihood]
        logps,trace,dets = sample(pars,costs,[],nburn/2,cov=cov,jump=[0.,0.])
#        cov = numpy.cov(trace.T)
        logps,trace,dets = sample(pars,costs,[],nburn/2,cov=cov,jump=[0.,0.])
        cov = numpy.cov(trace.T)
        logps,trace,dets = sample(pars,costs,[],niter,cov=cov,jump=[0.,0.])
        self.proposal_cov = cov
        self.trace= trace
        self.logp = logps
        cnt = 0
        o = {'logp':logps}
        for key in self.names:
            o[key] = trace[:,cnt].copy()
            cnt += 1
        return o
Exemple #14
0
    def fastMCMC(self,niter,nburn,nthin=1):
        from Sampler import SimpleSample as sample
        from scipy import interpolate
        import pymc,numpy,time
        import ndinterp

        models = self.model.models
        data = self.data
        filters = data.keys()
        t = time.time()
        T1 = models[filters[0]]*0.
        T2 = 0.
        for f in filters:
            T1 += (models[f]-data[f]['mag'])/data[f]['sigma']**2
            T2 += 2.5/self.data[f]['sigma']**2
        M = T1/T2
        logp = 0.
        for f in filters:
            logp += -0.5*(-2.5*M+models[f]-data[f]['mag'])**2/data[f]['sigma']**2
        t = time.time()
        axes = {}
        i = 0
        ax = {}
        ind = numpy.unravel_index(logp.argmax(),logp.shape)
        best = []
        for key in self.model.axes_names:
            a = self.model.axes[key]['points']
            axes[i] = interpolate.splrep(a,numpy.arange(a.size),k=1,s=0)
            ax[key] = i
            best.append(a[ind[i]])
            i += 1

        print logp.max()
        logpmodel = ndinterp.ndInterp(axes,logp,order=1)
        massmodel = ndinterp.ndInterp(axes,M,order=1)

        pars = [self.priors[key] for key in self.names]

        doExp = []
        cube2par = []
        i = 0
        for key in self.names:
            if key.find('log')==0:
                pntkey = key.split('log')[1]
                #self.priors[key].value = numpy.log10(best[ax[pntkey]])
                doExp.append(True)
            else:
                pntkey = key
                doExp.append(False)
                #self.priors[key].value = best[ax[pntkey]]
            cube2par.append(ax[pntkey])
        doExp = numpy.array(doExp)==True
        par2cube = numpy.argsort(cube2par)

        logp -= logp.max()
        p = numpy.exp(logp)
        p /= p.sum()
        i = 0
        wmean = numpy.empty(p.ndim)
        axarr = []
        for key in self.model.axes_names:
            a = self.model.axes[key]['points']
            p0 = numpy.rollaxis(p,i,p.ndim)
            wmean[i] = (a*p0).sum()
            axarr.append(numpy.rollaxis(a+p0*0,p.ndim-1,i))
            i += 1
        cov = numpy.empty((p.ndim,p.ndim))
        #for i in range(p.ndim):
        #    for j in range(i,p.ndim):
        #        cov[i,j] = (p*(axarr[i]-wmean[i])*(axarr[j]-wmean[j])).sum()
        #        cov[j,i] = cov[i,j]
        for i in range(p.ndim):
            k = cube2par[i]
            for j in range(i,p.ndim):
                l = cube2par[j]
                cov[i,j] = (p*(axarr[k]-wmean[k])*(axarr[l]-wmean[l])).sum()
                cov[j,i] = cov[i,j]
        cov /= 1.-(p**2).sum()
        #for key in self.names:
        #    if key.find('log')==0:
        #        pntkey = key.split('log')[1]
        #        self.priors[key].value = numpy.log10(wmean[ax[pntkey]])
        #    else:
        #        self.priors[key].value = wmean[ax[key]]

        #self.priors['redshift'].value = 0.1
        pnt = numpy.empty((len(self.priors),1))
        @pymc.deterministic
        def mass_and_logp(value=0.,pars=pars):
            p = numpy.array(pars)
            p[doExp] = 10**p[doExp]
            p = numpy.atleast_2d(p[par2cube])
            mass = massmodel.eval(p)
            if mass==0.:
                return [0.,-1e200]
            logp = logpmodel.eval(p)
            return [mass,logp]

        @pymc.observed
        def loglikelihood(value=0.,lp=mass_and_logp):
            return lp[1]

        """
        logp -= logp.max()
        p = numpy.exp(logp)
        p /= p.sum()
        i = 0
        wmean = numpy.empty(p.ndim)
        for key in self.model.axes_names:
            a = self.model.axes[key]['points']
            p0 = numpy.rollaxis(p,i,p.ndim)
            wmean[i] = (a*p0).sum()
            i += 1

        

        cov = []
        for key in self.names:
            if key=='age':
                cov.append(0.5)
            elif key=='logage':
                cov.append(0.03)
            elif key=='tau':
                cov.append(0.1)
            elif key=='logtau':
                cov.append(0.03)
            elif key=='tau_V':
                cov.append(self.priors[key]['prior'].value/20.)
            elif key=='logtau_V':
                cov.append(0.1)
            elif key=='Z':
                cov.append(self.priors[key]['prior'].value/20.)
            elif key=='logZ':
                cov.append(0.03)
            elif key=='redshift':
                cov.append(0.1)
        cov = numpy.array(cov)
        """

        from SampleOpt import Sampler,AMAOpt
        S = AMAOpt(pars,[loglikelihood],[mass_and_logp],cov=cov)
        S.sample(nburn)
        logps,trace,dets = S.result()
        print logps.max()

        S = Sampler(pars,[loglikelihood],[mass_and_logp])
        S.setCov(cov)
        S.sample(nburn/2)

        logps,trace,dets = S.result()
        cov = numpy.cov(trace.T)

        S = Sampler(pars,[loglikelihood],[mass_and_logp])
        S.setCov(cov)
        S.sample(niter)

        logps,trace,dets = S.result()
        mass,logL = dets['mass_and_logp'][:,:,0].T
        o = {'logP':logps,'logL':logL,'logmass':mass}
        cnt = 0
        for key in self.names:
            o[key] = trace[:,cnt].copy()
            cnt += 1
        return o
        
        arg = logp.argmax()
        logp -= logp.max()
        p = numpy.exp(logp)
        p /= p.sum()
        print p.max()
        i = 0
        for key in self.model.axes_names:
            a = self.model.axes[key]['points']
            if key=='redshift':
                a = a[::5]
            p0 = numpy.rollaxis(p,i,p.ndim)
            print key,(a*p0).sum()
            i += 1

        print numpy.unravel_index(arg,logp.shape)
        logp -= max
        print (M*numpy.exp(logp)).sum()/numpy.exp(logp).sum()
        z = (M*0.+1)*self.model.axes['redshift']['points'][::5]
        print (z*numpy.exp(logp)).sum()/numpy.exp(logp).sum()
        f = open('check','wb')
        import cPickle
        cPickle.dump([M,logp],f,2)
        f.close()
        mod = ndinterp.ndInterp(self.models.axes,logp)
saved = False

for i in range(nv200):
    for j in range(nvorb):
        print i, j

        pop.evolve(z_low=0., imf_recipe='mstar-vdisp', imf_coeff=imf_coeff, vdisp_coeff=vdisp_coeff, ximin=ximin, v200sigma_rat2=v200sigma_rat_grid[i], vorbv200_rat=vorbv200_rat_grid[j])

        vdisp_b_grid[i, j] = pop.vdisp_coeff[20, 0]
        vdisp_a_grid[i, j] = pop.vdisp_coeff[20, 1]

        if not saved:
            f = open('popevol_for_grid_1000.dat', 'w')
            pickle.dump(pop, f)
            f.close()

            saved = True

b_ndinterp = ndinterp.ndInterp(axes, vdisp_b_grid, order=1)
a_ndinterp = ndinterp.ndInterp(axes, vdisp_a_grid, order=1)

f = open('vdisp_grid_b2.dat', 'w')
pickle.dump(b_ndinterp, f)
f.close()

f = open('vdisp_grid_a2.dat', 'w')
pickle.dump(a_ndinterp, f)
f.close()

Exemple #16
0
    grid_file.close()

else:
    grid_file = h5py.File(gridname, 'r')

    R_grid = grid_file['R_grid'][()]
    beta_grid = grid_file['beta_grid'][()]
    R, B = np.meshgrid(R_grid, beta_grid)

    Sigma_grid = grid_file['Sigma_grid'][()]
    M2d_grid = grid_file['M2d_grid'][()]
    M3d_grid = grid_file['M3d_grid'][()]

    grid_file.close()

Sigma_interp = ndinterp.ndInterp(axes, Sigma_grid * R, order=3)
M2d_interp = ndinterp.ndInterp(axes, M2d_grid * R**(B - 3.), order=3)
M3d_interp = ndinterp.ndInterp(axes, M3d_grid * R**(B - 3.), order=3)


def fast_M2d(R, rs, beta):
    R = np.atleast_1d(R)
    rs = np.atleast_1d(rs)
    beta = np.atleast_1d(beta)
    length = max(len(beta), len(R), len(rs))
    sample = np.array([beta * np.ones(length),
                       R / rs * np.ones(length)]).reshape((2, length)).T
    M2d = M2d_interp.eval(sample) * (R / rs)**(3. - beta)
    return M2d

Exemple #17
0
    def create_models(self):
        import numpy, cPickle
        from stellarpop import tools
        from ndinterp import ndInterp

        index = {}
        shape = []
        axes = {}
        axes_index = 0
        for key in self.axes_names:
            index[key] = {}
            shape.append(self.axes[key]['points'].size)
            axes[axes_index] = self.axes[key]['eval']
            axes_index += 1
            for i in range(self.axes[key]['points'].size):
                index[key][self.axes[key]['points'][i]] = i
        self.redshifts = self.axes['redshift']['points']
        self.corrections = self.luminosity_correction()

        zindex = self.axes_names.index('redshift')

        models = {}
        model = numpy.empty(shape) * numpy.nan
        for f in self.filter_names:
            models[f] = model.copy()

        for file in self.files:
            f = open(file, 'rb')
            data = cPickle.load(f)
            wave = cPickle.load(f)
            f.close()
            for key in data.keys():
                obj = data[key]
                jj = key
                spec = obj['sed']
                ind = []
                for key in self.axes_names:
                    if key == 'redshift':
                        continue
                    try:
                        ind.append([index[key][obj[key]]])
                    except:
                        print key, index[key]
                        print obj
                        df
                ind.insert(zindex, None)
                for f in self.filter_names:
                    for i in range(self.redshifts.size):
                        z = self.redshifts[i]
                        # correction is the units correction factor
                        correction = self.corrections[i]
                        sed = [wave, spec * correction]
                        mag = tools.ABFilterMagnitude(self.filters[f], sed, z)
                        if numpy.isnan(mag) == True:
                            df
                        ind[zindex] = i
                        models[f][ind] = mag
        self.interpAxes = axes
        return models
        for f in self.filter_names:
            model = models[f].copy()
            if numpy.isnan(model).any():
                models[f] = None
            else:
                models[f] = ndInterp(axes, model, order=1)
        return models
s2_grid = np.zeros((ng, nb, nreff))

for i in range(ng):
    print i

    for j in range(nb):
        print j
        rhos = rho(r_grid, gamma_grid[i], beta_grid[j])

        rs0 = np.array([0.] + list(r_grid))
        mp0 = np.array([0.] + list(4.*np.pi*rhos*r_grid**2))

        mprime_spline = splrep(rs0, mp0)

        m3d_grid = 0.*r3d_grid
        for k in range(nr3d):
            m3d_grid[k] = splint(0., r3d_grid[k], mprime_spline)

        for k in range(nreff):
            s2_grid[i, j, k] = sigma_model.sigma2general((r3d_grid, m3d_grid), 0.5 * reff_grid[k], lp_pars=reff_grid[k], seeing=None, light_profile=deVaucouleurs)

f = open('broken_alpha_s2_grid.dat', 'w')
pickle.dump(s2_grid, f)
f.close()

s2_ndinterp = ndinterp.ndInterp(axes, s2_grid, order=1)
f = open('broken_alpha_s2_ndinterp.dat', 'w')
pickle.dump(s2_ndinterp, f)
f.close()

n0 = numpy.linspace(0.5, 6., 12)
q0 = numpy.linspace(0.1, 1., 19)

x, y, n, q = ndinterp.create_axes_array([x0, x0, n0, q0])
yout = x * 0.
xout = y * 0.
for i in range(x.shape[2]):
    for j in range(x.shape[3]):
        X = x[:, :, i, j]
        Y = y[:, :, i, j]
        N = n[0, 0, i, j]
        Q = q[0, 0, i, j]
        k = 2. * N - 1. / 3 + 4. / (405. * N) + 46 / (25515. * N**2)
        amp = k**(2 * N) / (2 * N * gamma(2 * N))
        yi, xi = sersic.sersicdeflections(-Y.ravel(), X.ravel(), amp, 1., N, Q)
        yout[:, :, i, j] = -1 * yi.reshape(Y.shape)
        xout[:, :, i, j] = xi.reshape(X.shape)

axes = {}
axes[0] = interpolate.splrep(x0, numpy.arange(x0.size))
axes[1] = interpolate.splrep(x0, numpy.arange(x0.size))
axes[2] = interpolate.splrep(n0, numpy.arange(n0.size))
axes[3] = interpolate.splrep(q0, numpy.arange(q0.size))

xmodel = ndinterp.ndInterp(axes, xout)
ymodel = ndinterp.ndInterp(axes, yout)

f = open('serModelsHDR.dat', 'wb')
cPickle.dump([xmodel, ymodel], f, 2)
f.close()
Exemple #20
0
def main():
    theta_E = 1.25
    # check if all the output files are created #
    str = []
    nl = 0
    for line in open('sig_files/list', 'r'):
        str.append(line)
        nl += 1
    print nl
    for j in range(nl):
        string_ = str[j]
        str[j] = string_[41:-5]
    print len(str)
    num = map(int, str)
    num = sort(num)
    print num
    # finding the missing file index #
    nmiss = 0
    missings = []
    for i in range(nl):
        if i != num[i]:
            print i
            missings.append(i)
            num = insert(num, i, i)
            nmiss += 1
    f = open('missing_idx', 'w')
    for i in range(nmiss):
        f.write('%s\n' % (missings[i]))
    f.close()
    print nmiss
    print nmiss + nl
    #	return
    ngam = 121
    nbout = 13
    nbin = 13
    nrani = 121
    re = 1.85
    gamma_arr = linspace(1.01, 2.99, ngam)
    rani_arr = logspace(log10(0.5 * re), log10(5. * re), nrani)
    bin_arr = linspace(-0.6, 0.6, nbin)
    bout_arr = linspace(-0.6, 0.6, nbout)
    np = 32
    nsamp = 13 * 13
    data_grid = zeros((ngam, nrani, nbin, nbout), 'float64')
    asec_to_rad = 4.84e-6
    # i is the index of vdisp file #
    for i in range(77323):
        nline = 0
        if i % 1000 == 0:
            print '...reading %d th file...' % i
        vdisp_file = 'sig_files/log_grid_spl_idx_fft_vdisp_out_fine_full_%d.txt' % (
            i)
        for line in open(vdisp_file, 'r'):
            nline += 1
        #		if i%100 ==0:
        #			print '...number of lines : %d...'%nline
        vdisp_data = zeros((nline, 3), 'float64')
        count = 0
        for line in open(vdisp_file, 'r'):
            if (line.find('#') == -1):
                vdisp_data[count] = line.split()
                count += 1
        gal_idx = vdisp_data[:, 0]
        vdisp_norm = vdisp_data[:, 1]
        vdisp_raw = vdisp_data[:, 2]
        idxs = i * np
        idxf = (i + 1) * np - 1
        if idxf > ngam * nrani * nbin * nbout:
            idxf = ngam * nrani * nbin * nbout - 1
        # nfiles is the index of param file #
        nfiles = idxs / nsamp
        nfilef = idxf / nsamp
        iis = nfiles / ngam
        jjs = nfiles % ngam
        iif = nfilef / ngam
        jjf = nfilef % ngam
        idx_offset = idxs - nfiles * nsamp
        file = 'param/grid_gam%03drani%03d.dat' % (iis, jjs)
        data2 = zeros((nsamp, 8), 'float64')
        data = zeros((nsamp, 8), 'float64')
        count = 0
        for line in open(file, 'r'):
            if (line.find('#') == -1):
                data[count] = line.split()
                count += 1
        file2 = 'param/grid_gam%03drani%03d.dat' % (iif, jjf)
        data2 = zeros((nsamp, 8), 'float64')
        count = 0
        for line in open(file2, 'r'):
            if (line.find('#') == -1):
                data2[count] = line.split()
                count += 1
        if jjs != jjf:
            data = concatenate((data, data2), axis=0)
        Dd = data[:, 0] * 1000000.
        gamma = data[:, 1]
        kext = data[:, 2]
        theta_E = data[:, 3]
        mE = data[:, 4]
        mass = mE * (1. - kext)
        if iis == 0 and jjs == 0:
            print mE
        from scipy.special import gamma as gfunc
        rho = -mass * theta_E**(gamma - 3) * gfunc(
            gamma / 2.) * pi**(-1.5) / gfunc(0.5 * (gamma - 3))
        norm = 4. * pi * rho / (3. - gamma)

        #	        data = zeros((nsamp,8),'float64')
        #	        count = 0
        #	        for line in open(file,'r'):
        #      	        	if(line.find('#')==-1):
        #                      	data[count] = line.split()
        #                     	count += 1
        gal_idx = gal_idx.astype(int)
        gamma_idx = gal_idx / (nbout * nbin * nrani)
        rani_idx = (gal_idx / (nbout * nbin)) % nrani
        bin_idx = (gal_idx / nbout) % nbin
        bout_idx = gal_idx % nbout
        # data grid is guaranteed to be positive #
        # check what's happening with interpolation #
        for m in range(nline):
            #data_grid[gamma_idx[m],rani_idx[m],bin_idx[m],bout_idx[m]] = (vdisp_raw[m])**2./norm[idx_offset+m]/(asec_to_rad*Dd[idx_offset+m])
            data_grid[gamma_idx[m], rani_idx[m], bin_idx[m],
                      bout_idx[m]] = (vdisp_norm[m])**2. / norm[
                          idx_offset + m] * (Dd[idx_offset + m])
    print 'data grid created'
    import ndinterp
    from scipy import interpolate
    axes = {}
    axes[0] = interpolate.splrep(gamma_arr, arange(ngam), k=3, s=0)
    axes[1] = interpolate.splrep(rani_arr, arange(nrani), k=3, s=0)
    axes[2] = interpolate.splrep(bin_arr, arange(nbin), k=3, s=0)
    axes[3] = interpolate.splrep(bout_arr, arange(nbout), k=3, s=0)
    model = ndinterp.ndInterp(axes, data_grid)

    filesigv = 'filesigv_adriparam'
    import cPickle
    f = open(filesigv, 'wb')
    cPickle.dump(model, f, 2)
    f.close()
    print '1 grid interpolated and saved'

    model2 = interpolate.RegularGridInterpolator(
        (gamma_arr, rani_arr, bin_arr, bout_arr), data_grid, method='nearest')

    filesigv = 'filesigv_adriparam_reg_grid_norm_1115'
    import cPickle
    f = open(filesigv, 'wb')
    cPickle.dump(model2, f, 2)
    f.close()
    print '2 grid interpolated and saved'
    return
    numpy.random.seed(10)

    c = 8.39e-10
    Grav = 6.67408e-11 * (3.086e22)**-3. * 1.989e30 / (1.157e-5)**2.

    zl = 0.6304
    zs = 1.394

    reff = 0.58
    ######## number of beta_in and beta_out : each has 13 sample points in [-0.6,0.6] ########
    nbin = 13
    nbout = 13
    ######## number of samples in each parameter file ########
    nsamp = nbin * nbout
    ######## for grid creation, pick a single value ########
    Ddmax = 2100
    Ddmin = 700
    Dd_samp = numpy.random.random(nsamp) * (Ddmax - Ddmin) + Ddmin

    DdsDsmax = 0.70
    DdsDsmin = 0.30
    DdsDs_samp = numpy.random.random(nsamp) * (DdsDsmax - DdsDsmin) + DdsDsmin

    SigCrit_samp = c**2. / (4. * pi * Grav * Dd_samp * DdsDs_samp)
    #	SigCrit = c**2./(4.*pi*Grav*Dd*DdsDs)

    thE_samp = [0.826 for i in range(nsamp)]
    #	thE = 0.826

    mE_samp = SigCrit_samp * pi * thE_samp * thE_samp * Dd_samp * Dd_samp * asec_to_rad * asec_to_rad
    #	mE = SigCrit * pi * thE * thE * Dd * Dd * asec_to_rad * asec_to_rad
    # number of desired gamma / rani for the grid #
    ngam = 121
    nrani = 121
    count = 0
    kextfile = 'kextcounts_for_Inh_16-0623.dat'
    for line in open(kextfile, 'r'):
        count += 1
    kext = numpy.zeros(count)
    kext_samp = numpy.zeros(nsamp)
    m = 0
    for line in open(kextfile, 'r'):
        kext[m] = line
        m += 1

    gam_samp = numpy.linspace(1.01, 2.99, ngam)
    rani_samp = numpy.logspace(log10(0.5 * reff), log10(5 * reff), nrani)
    bi_samp = numpy.linspace(-0.6, 0.6, nbin)
    bo_samp = numpy.linspace(-0.6, 0.6, nbout)
    # give a random seed to numpy random so that one can reproduce the same results #
    for i2 in range(nsamp):
        idx = numpy.random.randint(low=0, high=m)
        kext_samp[i2] = kext[idx]
        mass_samp = (1 - kext_samp) * mE_samp
    for i1 in range(ngam):
        gam = gam_samp[i1]
        for j1 in range(nrani):
            rani = rani_samp[j1]
            paramfile = 'param/grid_gam%03drani%03d.dat' % (i1, j1)

            f = open(paramfile, 'w')
            f.write(
                "# Dd, gamma, kext, theta_E, m_E, rani, beta_in, beta_out\n")

            #	model_Inh = numpy.load('filesigv_Inh_100CF')
            for i3 in range(nbin):
                bin = bi_samp[i3]
                for j3 in range(nbout):
                    bout = bo_samp[j3]
                    idxp = i3 * nbout + j3
                    #	sig_ref = numpy.sqrt(2.)*sm.getSigmaFromGrid2(mass_samp,thE_samp, gam_samp, rani_samp, model_Inh, Dd_samp)
                    f.write("%le\t%le\t%le\t%le\t%le\t%le\t%le\t%le\n" %
                            (Dd_samp[idxp], gam, kext_samp[idxp],
                             thE_samp[idxp], mE_samp[idxp], rani, bin, bout))
            f.close()
Exemple #21
0
    def fastMCMC(self, niter, nburn, nthin=1):
        from Sampler import SimpleSample as sample
        from scipy import interpolate
        import pymc, numpy, time
        import ndinterp

        models = self.model.models
        data = self.data
        filters = data.keys()
        t = time.time()
        T1 = models[filters[0]] * 0.
        T2 = 0.
        for f in filters:
            T1 += (models[f] - data[f]['mag']) / data[f]['sigma']**2
            T2 += 2.5 / self.data[f]['sigma']**2
        M = T1 / T2
        logp = 0.
        for f in filters:
            logp += -0.5 * (-2.5 * M + models[f] -
                            data[f]['mag'])**2 / data[f]['sigma']**2
        t = time.time()
        axes = {}
        i = 0
        ax = {}
        ind = numpy.unravel_index(logp.argmax(), logp.shape)
        best = []
        for key in self.model.axes_names:
            a = self.model.axes[key]['points']
            axes[i] = interpolate.splrep(a, numpy.arange(a.size), k=1, s=0)
            ax[key] = i
            best.append(a[ind[i]])
            i += 1

        print logp.max()
        logpmodel = ndinterp.ndInterp(axes, logp, order=1)
        massmodel = ndinterp.ndInterp(axes, M, order=1)

        pars = [self.priors[key] for key in self.names]

        doExp = []
        cube2par = []
        i = 0
        for key in self.names:
            if key.find('log') == 0:
                pntkey = key.split('log')[1]
                #self.priors[key].value = numpy.log10(best[ax[pntkey]])
                doExp.append(True)
            else:
                pntkey = key
                doExp.append(False)
                #self.priors[key].value = best[ax[pntkey]]
            cube2par.append(ax[pntkey])
        doExp = numpy.array(doExp) == True
        par2cube = numpy.argsort(cube2par)

        logp -= logp.max()
        p = numpy.exp(logp)
        p /= p.sum()
        i = 0
        wmean = numpy.empty(p.ndim)
        axarr = []
        for key in self.model.axes_names:
            a = self.model.axes[key]['points']
            p0 = numpy.rollaxis(p, i, p.ndim)
            wmean[i] = (a * p0).sum()
            axarr.append(numpy.rollaxis(a + p0 * 0, p.ndim - 1, i))
            i += 1
        cov = numpy.empty((p.ndim, p.ndim))
        #for i in range(p.ndim):
        #    for j in range(i,p.ndim):
        #        cov[i,j] = (p*(axarr[i]-wmean[i])*(axarr[j]-wmean[j])).sum()
        #        cov[j,i] = cov[i,j]
        for i in range(p.ndim):
            k = cube2par[i]
            for j in range(i, p.ndim):
                l = cube2par[j]
                cov[i, j] = (p * (axarr[k] - wmean[k]) *
                             (axarr[l] - wmean[l])).sum()
                cov[j, i] = cov[i, j]
        cov /= 1. - (p**2).sum()
        #for key in self.names:
        #    if key.find('log')==0:
        #        pntkey = key.split('log')[1]
        #        self.priors[key].value = numpy.log10(wmean[ax[pntkey]])
        #    else:
        #        self.priors[key].value = wmean[ax[key]]

        #self.priors['redshift'].value = 0.1
        pnt = numpy.empty((len(self.priors), 1))

        @pymc.deterministic
        def mass_and_logp(value=0., pars=pars):
            p = numpy.array(pars)
            p[doExp] = 10**p[doExp]
            p = numpy.atleast_2d(p[par2cube])
            mass = massmodel.eval(p)
            if mass == 0.:
                return [0., -1e200]
            logp = logpmodel.eval(p)
            return [mass, logp]

        @pymc.observed
        def loglikelihood(value=0., lp=mass_and_logp):
            return lp[1]

        """
        logp -= logp.max()
        p = numpy.exp(logp)
        p /= p.sum()
        i = 0
        wmean = numpy.empty(p.ndim)
        for key in self.model.axes_names:
            a = self.model.axes[key]['points']
            p0 = numpy.rollaxis(p,i,p.ndim)
            wmean[i] = (a*p0).sum()
            i += 1

        

        cov = []
        for key in self.names:
            if key=='age':
                cov.append(0.5)
            elif key=='logage':
                cov.append(0.03)
            elif key=='tau':
                cov.append(0.1)
            elif key=='logtau':
                cov.append(0.03)
            elif key=='tau_V':
                cov.append(self.priors[key]['prior'].value/20.)
            elif key=='logtau_V':
                cov.append(0.1)
            elif key=='Z':
                cov.append(self.priors[key]['prior'].value/20.)
            elif key=='logZ':
                cov.append(0.03)
            elif key=='redshift':
                cov.append(0.1)
        cov = numpy.array(cov)
        """

        from SampleOpt import Sampler, AMAOpt
        S = AMAOpt(pars, [loglikelihood], [mass_and_logp], cov=cov)
        S.sample(nburn)
        logps, trace, dets = S.result()
        print logps.max()

        S = Sampler(pars, [loglikelihood], [mass_and_logp])
        S.setCov(cov)
        S.sample(nburn / 2)

        logps, trace, dets = S.result()
        cov = numpy.cov(trace.T)

        S = Sampler(pars, [loglikelihood], [mass_and_logp])
        S.setCov(cov)
        S.sample(niter)

        logps, trace, dets = S.result()
        mass, logL = dets['mass_and_logp'][:, :, 0].T
        o = {'logP': logps, 'logL': logL, 'logmass': mass}
        cnt = 0
        for key in self.names:
            o[key] = trace[:, cnt].copy()
            cnt += 1
        return o

        arg = logp.argmax()
        logp -= logp.max()
        p = numpy.exp(logp)
        p /= p.sum()
        print p.max()
        i = 0
        for key in self.model.axes_names:
            a = self.model.axes[key]['points']
            if key == 'redshift':
                a = a[::5]
            p0 = numpy.rollaxis(p, i, p.ndim)
            print key, (a * p0).sum()
            i += 1

        print numpy.unravel_index(arg, logp.shape)
        logp -= max
        print(M * numpy.exp(logp)).sum() / numpy.exp(logp).sum()
        z = (M * 0. + 1) * self.model.axes['redshift']['points'][::5]
        print(z * numpy.exp(logp)).sum() / numpy.exp(logp).sum()
        f = open('check', 'wb')
        import cPickle
        cPickle.dump([M, logp], f, 2)
        f.close()
        mod = ndinterp.ndInterp(self.models.axes, logp)
Exemple #22
0
        m200_grids.append(m200_grid)

        lc200_grid = grid_file['lc200_grid'][()]
        lc200_grids.append(lc200_grid)

        axes = {
            0: splrep(mstar_grid, np.arange(len(mstar_grid))),
            1: splrep(m200_grid, np.arange(len(m200_grid))),
            2: splrep(lc200_grid, np.arange(len(lc200_grid)))
        }

        grid_here = grid_file['wl_like_grid'][()]
        grid_here -= grid_here.max()

        # PREPARES A 3D INTERPOLATOR OBJECT ON THE GRID
        grid = ndinterp.ndInterp(axes, grid_here, order=1)
        grids.append(grid)

        # PREPARES SCALE-FREE GAUSSIAN SAMPLES FOR MC INTEGRATION
        mstar_impsamp.append(np.random.normal(0., 1., nint))
        m200_impsamp.append(np.random.normal(0., 1., nint))
        lc200_impsamp.append(np.random.normal(0., 1., nint))

        grid_file.close()

        mstar_samp.append(mstar[i])
        merr_samp.append(merr[i])

mstar_samp = np.array(mstar_samp)
merr_samp = np.array(merr_samp)
Exemple #23
0
Z_spline = splrep(Z_grid, np.arange(len(Z_grid)), k=1, s=0)

tau_grid = mags_grid_file['tau_grid'][()]
tau_spline = splrep(tau_grid, np.arange(len(tau_grid)), k=3, s=0)

tau_V_grid = mags_grid_file['tau_V_grid'][()]
tau_V_spline = splrep(tau_V_grid, np.arange(len(tau_V_grid)), k=3, s=0)

age_grid = mags_grid_file['age_grid'][()]
age_spline = splrep(age_grid, np.arange(len(age_grid)), k=3, s=0)

axes = {0: Z_spline, 1: tau_spline, 2: tau_V_spline, 3: age_spline}

interp = {}
for band in bands:
    interp[band] = ndinterp.ndInterp(axes, mags_grid_file['%s_mag' % band][()])

# guesses stellar mass given reasonable values of other parameters
uniage = pygalaxev_cosmology.uniage(redshift) / 1e9

start_pnt = {}
start_pnt['tau'] = 1.
start_pnt['age'] = 0.8 * uniage
start_pnt['Z'] = 0.02
start_pnt['tau_V'] = 0.1

par_list = ['Z', 'tau', 'tau_V', 'age']
start_arr = np.empty(4)
for i in range(4):
    start_arr[i] = start_pnt[par_list[i]]
Exemple #24
0
    def create_models(self,old=None):
        import numpy,cPickle
        from stellarpop import tools
        from ndinterp import ndInterp

        index = {}
        shape = []
        axes = {}
        inferRedshift = True
        axes_index = 0
        # Only axes with more than one element are interesting
        for key in self.axes_names:
            if self.axes[key]['points'].size<=1:
                continue
            index[key] = {}
            shape.append(self.axes[key]['points'].size)
            axes[axes_index] = self.axes[key]['eval']
            axes_index += 1
            for i in range(self.axes[key]['points'].size):
                index[key][self.axes[key]['points'][i]] = i
        outKeys = index.keys()
        self.redshifts = self.axes['redshift']['points']
        if len(self.redshifts)==1:
            inferRedshift = False
        self.corrections = self.luminosity_correction()

        if old is not None:
            models = old
            for f in self.filter_names:
                model = models[f].copy()
                if numpy.isnan(model).any():
                    models[f] = None
                else:
                    models[f] = ndInterp(axes,model,order=1)
            return models

        zindex = self.axes_names.index('redshift')

        models = {}
        model = numpy.empty(shape)*numpy.nan
        for f in self.filter_names:
            models[f] = model.copy()

        for file in self.files:
            f = open(file,'rb')
            data = cPickle.load(f)
            wave = cPickle.load(f)
            f.close()
            for key in data.keys():
                obj = data[key]
                jj = key
                spec = obj['sed']
                ind = []
                for key in self.axes_names:
                    if key=='redshift' or key not in outKeys:
                        continue
                    try:
                        ind.append([index[key][obj[key]]])
                    except:
                        print key,index[key]
                        print obj
                        df
                if inferRedshift:
                    ind.insert(zindex,None)
                for f in self.filter_names:
                    for i in range(self.redshifts.size):
                        z = self.redshifts[i]
                        # correction is the units correction factor
                        correction = self.corrections[i]
                        sed = [wave,spec*correction]
                        mag = tools.ABFilterMagnitude(self.filters[f],sed,z)
                        if numpy.isnan(mag)==True:
                            df
                        if inferRedshift:
                            ind[zindex] = i
                        models[f][ind] = mag
        self.interpAxes = axes
        for f in self.filter_names:
            model = models[f].copy()
            if numpy.isnan(model).any():
                models[f] = None
            else:
                models[f] = ndInterp(axes,model,order=1)
        return models
psi3 = np.zeros((ng, nb))

lens = lens_models.broken_alpha_powerlaw(rein=1.)

for i in range(ng):
    lens.gamma = gamma_grid[i]
    for j in range(nb):
        lens.beta = beta_grid[j]
        psi2[i, j] = lens.psi2()
        psi3[i, j] = lens.psi3()

print psi2.min(), psi2.max()
print psi3.min(), psi3.max()

psi2_ndinterp = ndinterp.ndInterp(axes, psi2, order=1)
psi3_ndinterp = ndinterp.ndInterp(axes, psi3, order=1)

start = np.array((2., 0.))
bounds = np.array(((gamma_min, gamma_max), (beta_min, beta_max)))
scale_free_bounds = 0.*bounds
scale_free_bounds[:, 1] = 1.

scale_free_guess = (start - bounds[:, 0])/(bounds[:, 1] - bounds[:, 0])

eps = 1e-5

minimizer_kwargs = dict(method="L-BFGS-B", bounds=scale_free_bounds, tol=eps)

gammas_grid = np.linspace(1.4, 2.6, 11)
betas_grid = np.linspace(-0.8, 0.8, 101)
Exemple #26
0
    def fasterMCMC(self, niter, nburn, nthin=1, grid=False):
        from Sampler import SimpleSample as sample
        import pymc, numpy, ndinterp

        axn = [a for a in self.model.axes_names]
        axes = self.model.axes

        pmags = []
        lpModel = None
        magModel = None
        Modaxes = None
        magUC = 0.
        order = None
        for i in range(len(self.data)):
            f = self.data[i]['filter']
            z = self.data[i]['redshift']
            pmags.append(self.model.models[f][z].z)
            if magModel is None:
                magModel = self.model.models[f][z].z * 0.
                lpModel = magModel.copy()
                Modaxes = self.model.models[f][z].axes
                order = self.model.models[f][z].order
            m, me = self.data[i]['mag'], self.data[i]['error']**2
            d = (m - pmags[-1])
            lpModel += -0.5 * d**2 / me
            magModel += d / me
            magUC += -0.5 / me

        for i in range(len(self.data) - 1):
            d1 = self.data[i]
            d2 = self.data[i + 1]
            f1, m1, me1 = d1['filter'], d1['mag'], d1['error']
            f2, m2, me2 = d2['filter'], d2['mag'], d2['error']
            c = pmags[i] - pmags[i + 1]
            dc = m1 - m2
            if i == 0:
                logp = (c - dc)**2 / (me1**2 + me2**2)
            else:
                logp += (c - dc)**2 / (me1**2 + me2**2)

        indx = numpy.unravel_index(logp.argmin(), logp.shape)
        m = 0.
        w = 0.
        for i in range(len(self.data)):
            d, e = self.data[i]['mag'], self.data[i]['error']
            M = (pmags[i][indx] - d) / 2.5
            m += M / e**2
            w += 1. / e**2
        m /= w

        M = numpy.linspace(m - 0.6, m + 0.6, 13)
        a = []
        for i in range(len(self.model.axes_names)):
            a.append(
                self.model.axes[self.model.axes_names[i]]['points'].copy())
        for key in self.names:
            p = self.priors[key]['prior']
            if key.find('mass') >= 0:
                continue
            if key.find('log') == 0:
                key = key[3:]
                a[axn.index(key)] = numpy.log10(a[axn.index(key)])
            i = axn.index(key)
            for j in range(len(a[i])):
                p.value = a[i][j]
                try:
                    a[i][j] = p.logp
                except:
                    a[i][j] = -1e300
        logp = lpModel + ndinterp.create_axes_array(a).sum(0)

        logp = numpy.expand_dims(logp, logp.ndim).repeat(M.size, logp.ndim)
        for i in range(M.size):
            M0 = M[i] * -2.5
            logp[:, :, :, :, i] += magModel * M0 + M0**2 * magUC
        logp -= logp.max()

        wt = numpy.exp(logp)
        wt /= wt.sum()
        a = []
        for i in range(len(self.model.axes_names)):
            a.append(self.model.axes[self.model.axes_names[i]]['points'])
        a.append(M)
        for key in self.names:
            if key.find('mass') >= 0:
                if key.find('log') != 0:
                    a[-1] == 10**a[-1]
                axn.append('mass')
            elif key.find('log') == 0:
                key = key[3:]
                a[axn.index(key)] = numpy.log10(a[axn.index(key)])
        vals = ndinterp.create_axes_array(a)
        if grid == True:
            m = (wt * vals[-1]).sum()
            st = ((wt * (vals[-1] - m)**2).sum())**0.5
            return m, st

        pars = []
        cov = []

        for key in self.names:
            pars.append(self.priors[key]['prior'])
            if key.find('log') == 0:
                key = key[3:]
            i = axn.index(key)
            m = (wt * vals[i]).sum()
            st = ((wt * (vals[i] - m)**2).sum())**0.5
            pars[-1].value = m
            cov.append(st)

        for key in self.names:
            continue
            pars.append(self.priors[key]['prior'])
            if key == 'logmass':
                pars[-1].value = m
                cov.append(0.03)
            elif key == 'mass':
                pars[-1].value = 10**m
                cov.append(self.priors[key]['prior'].value / 20.)
            else:
                if key.find('log') != 0:
                    i = axn.index(key)
                    mn = (vals[i] * wt).sum()
                    cov.append(((wt * (vals[i] - mn)**2).sum())**0.5)
                    pars[-1].value = mn
                else:
                    key = key.split('log')[1]
                    i = axn.index(key)
                    mn = (numpy.log10(vals[i]) * wt).sum()
                    cov.append(
                        ((wt * (numpy.log10(vals[i]) - mn)**2).sum())**0.5)
                    pars[-1].value = mn
        for key in self.names:
            continue
            pars.append(self.priors[key]['prior'])
            if key == 'logmass':
                pars[-1].value = m
                cov.append(0.03)
            elif key == 'mass':
                pars[-1].value = 10**m
                cov.append(self.priors[key]['prior'].value / 20.)
            elif key == 'age':
                pars[-1].value = axes['age']['points'][indx[axn.index('age')]]
                cov.append(0.5)
            elif key == 'logage':
                pars[-1].value = numpy.log10(
                    axes['age']['points'][indx[axn.index('age')]])
                cov.append(0.03)
            elif key == 'tau':
                pars[-1].value = axes['tau']['points'][indx[axn.index('tau')]]
                cov.append(0.2)
            elif key == 'logtau':
                pars[-1].value = numpy.log10(
                    axes['tau']['points'][indx[axn.index('tau')]])
                cov.append(0.03)
            elif key == 'tau_V':
                pars[-1].value = axes['tau_V']['points'][indx[axn.index(
                    'tau_V')]]
                cov.append(self.priors[key]['prior'].value / 20.)
            elif key == 'logtau_V':
                pars[-1].value = numpy.log10(
                    axes['tau_V']['points'][indx[axn.index('tau_V')]])
                cov.append(0.1)
            elif key == 'Z':
                pars[-1].value = axes['Z']['points'][indx[axn.index('Z')]]
                cov.append(self.priors[key]['prior'].value / 20.)
            elif key == 'logZ':
                pars[-1].value = numpy.log10(
                    axes['Z']['points'][indx[axn.index('Z')]])
                cov.append(0.05)

        lpModel = ndinterp.ndInterp(Modaxes, lpModel, order)
        magModel = ndinterp.ndInterp(Modaxes, magModel, order)

        cov = numpy.array(cov)

        @pymc.observed
        def loglikelihood(value=0., pars=pars):
            from math import log10
            points = numpy.zeros((1, len(pars) - 1))
            i = 0
            for key in self.names:
                if key == 'mass':
                    M = -2.5 * log10(pars[i])
                elif key == 'logmass':
                    M = -2.5 * pars[i]
                elif key.find('log') == 0:
                    key = key.split('log')[1]
                    points[0, axn.index(key)] = 10**pars[i]
                else:
                    points[0, axn.index(key)] = pars[i]
                i += 1
            lp = lpModel.eval(points)
            if lp == 0:
                return -1e300  # Short circuit if out of range
            lp += magModel.eval(points) * M + M**2 * magUC
            return lp

        costs = [loglikelihood]
        logps, trace, dets = sample(pars,
                                    costs, [],
                                    nburn / 2,
                                    cov=cov,
                                    jump=[0., 0.])
        #        cov = numpy.cov(trace.T)
        logps, trace, dets = sample(pars,
                                    costs, [],
                                    nburn / 2,
                                    cov=cov,
                                    jump=[0., 0.])
        cov = numpy.cov(trace.T)
        logps, trace, dets = sample(pars,
                                    costs, [],
                                    niter,
                                    cov=cov,
                                    jump=[0., 0.])
        self.proposal_cov = cov
        self.trace = trace
        self.logp = logps
        cnt = 0
        o = {'logp': logps}
        for key in self.names:
            o[key] = trace[:, cnt].copy()
            cnt += 1
        return o
Exemple #27
0
    print('calculating grid of enclosed projected masses...')
    M2d_grid = np.zeros((rgrid_n, ngrid_n))

    for i in range(rgrid_n):
        for j in range(ngrid_n):
            M2d_grid[i, j] = M2d(rr[i], nn[j], 1.)

    grid_file = h5py.File(grid2dfilename, 'w')
    grid_file.create_dataset('grid', data=M2d_grid)
    grid_file.close()
else:
    grid_file = h5py.File(grid2dfilename, 'r')
    M2d_grid = grid_file['grid'][()].copy()
    grid_file.close()

M2d_interp = ndinterp.ndInterp(axes, M2d_grid, order=3)

def fast_M2d(x, nser):

    xarr = np.atleast_1d(x)
    narr = np.atleast_1d(nser)

    xlen = len(xarr)
    nlen = len(narr)

    if xlen == nlen:
        point = np.array((xarr, narr)).reshape((2, xlen)).T
    elif nlen == 1:
        point = np.array((xarr, nser*np.ones(xlen))).reshape((2, xlen)).T
    elif xlen == 1:
        xarr = x*np.ones(nlen)