Пример #1
0
def mc_goodness_fit(par, drop, niter, zlo=-1.):
    # Determine goodness of fit given the parameters.
    # Draw the same number of points from the model as observed, then calculate the
    # loglikelihood of the drawn points. Calculate the probability that a simulated
    # observation has lower loglikelihood than the observed points.
    if drop == 'b':
        cat1 = 'bdrops_gf_v2.cat'
        cat2 = 'bdrops_udf_gf_v2.cat'
        kgrid1 = mlutil.readkgrid('kernel_I.p')
        kgrid2 = mlutil.readkgrid('kernel_I_udf.p')
        zdf1 = 'zdgrid_bdrops.p'
        zdf2 = 'zdgrid_bdrops_udf.p'
        mcfile = 'M1500_to_i.txt'
        mc = bl.mconvert(mcfile)
        mk = mc(4.0)
        chisqnulim = [0.4, 5.0]
    elif drop == 'v':
        cat1 = 'vdrops_gf_v2.cat'
        cat2 = 'vdrops_udf_gf_v2.cat'
        kgrid1 = mlutil.readkgrid('kernel_Z.p')
        kgrid2 = mlutil.readkgrid('kernel_Z_udf.p')
        zdf1 = 'zdgrid_vdrops.p'
        zdf2 = 'zdgrid_vdrops_udf.p'
        mcfile = 'M1500_to_z.txt'
        mc = bl.mconvert(mcfile)
        mk = mc(5.0)
        chisqnulim = [0.5, 5.0]
    cullflags = [0, 1, 2, 3, 4, 12, 13, 14, 18]
    limits1 = array([[21.0, 26.5], [-2.0, 3.0]])
    limits2 = array([[23.0, 28.5], [-2.0, 3.0]])
    #limits1 = bl.limits1
    #limits2 = bl.limits2
    pixdx = array([0.02, 0.02])
    mag1, re1, crit1 = fl.cleandata(cat1,
                                    chisqnulim=chisqnulim[0],
                                    magautolim=26.5,
                                    cullflags=cullflags,
                                    limits=limits1,
                                    zlo=zlo,
                                    drop=drop)
    mag2, re2, crit2 = fl.cleandata(cat2,
                                    chisqnulim=chisqnulim[1],
                                    magautolim=28.5,
                                    cullflags=cullflags,
                                    limits=limits2,
                                    zlo=zlo,
                                    drop=drop)
    data1 = array([mag1, log10(re1)])
    data2 = array([mag2, log10(re2)])
    N1 = len(mag1)
    N2 = len(mag2)
    print N1, N2, N1 + N2
    model1 = bl.bivariate_lf(par, limits1, pixdx, drop, 'goods', kgrid=kgrid1, zdgridfile=zdf1,\
       mcfile=mcfile, meankcorr=mk, add_interloper=True, norm=-1.)
    model2 = bl.bivariate_lf(par, limits2, pixdx, drop, 'udf', kgrid=kgrid2, zdgridfile=zdf2,\
       mcfile=mcfile, meankcorr=mk, add_interloper=True, norm=-1.)
    sum1 = sum(model1.model.ravel()) * pixdx[0] * pixdx[1]
    sum2 = sum(model2.model.ravel()) * pixdx[0] * pixdx[1]
    phistar_mod = float(N1 + N2) / (sum1 + sum2)
    print phistar_mod
    model1.model = phistar_mod * model1.model
    model2.model = phistar_mod * model2.model
    logl_ref = bf.loglikelihood(data1, model1, floor=0.) + bf.loglikelihood(
        data2, model2, floor=0.)
    #logl_ref = bf.mlfunc(par, data1, data2, limits1, limits2, pixdx, kgrid1, kgrid2,
    #   1.0, 1.0, -21.0, zdf1, zdf2, mcfile, 1, mk, 0, -1., 'phistar', drop)
    print "logl_ref", logl_ref
    simlogl_arr = zeros(niter)  # actually -1*logL...
    print "Start drawing simulated observations..."
    t1 = time.time()
    for i in range(niter):
        if i % 1000 == 0: print i
        simdata1 = mlutil.draw_from_pdf(N1, model1, model1.limits)
        simdata2 = mlutil.draw_from_pdf(N2, model2, model2.limits)
        simlogl1 = bf.loglikelihood(simdata1, model1)
        simlogl2 = bf.loglikelihood(simdata2, model2)
        simlogl = simlogl1 + simlogl2
        simlogl_arr[i] = simlogl
    t2 = time.time()
    dt = t2 - t1
    dtmin = int(floor(dt)) / 60
    dtsec = dt % 60
    n_worse = sum(simlogl_arr > logl_ref)
    print "%d iterations took %d min %.1f sec" % (niter, dtmin, dtsec)
    print "Percentage of simulated observations with lower log-likelihood: %.2f %%" % (
        100. * float(n_worse) / float(niter))
    return logl_ref, simlogl_arr
Пример #2
0
def reduced_chi2(par,
                 drop,
                 mbins1,
                 rbins1,
                 mbins2,
                 rbins2,
                 chisqnulim=(0.4, 5.0),
                 zlo=-1):
    """
   Calculate the reduced chi2 of the best-fit model
   mbins, rbins should include the upper limits.
   """
    if drop == 'b':
        cat1 = 'bdrops_gf_v2.cat'
        cat2 = 'bdrops_udf_gf_v2.cat'
        kgrid1 = mlutil.readkgrid('kernel_I.p')
        kgrid2 = mlutil.readkgrid('kernel_I_udf.p')
        zdf1 = 'zdgrid_bdrops.p'
        zdf2 = 'zdgrid_bdrops_udf.p'
        mcfile = 'M1500_to_i.txt'
        mc = bl.mconvert(mcfile)
        mk = mc(4.0)
    elif drop == 'v':
        cat1 = 'vdrops_gf_v2.cat'
        cat2 = 'vdrops_udf_gf_v2.cat'
        kgrid1 = mlutil.readkgrid('kernel_Z.p')
        kgrid2 = mlutil.readkgrid('kernel_Z_udf.p')
        zdf1 = 'zdgrid_vdrops.p'
        zdf2 = 'zdgrid_vdrops_udf.p'
        mcfile = 'M1500_to_z.txt'
        mc = bl.mconvert(mcfile)
        mk = mc(5.0)
    cullflags = [0, 1, 2, 3, 4, 12, 13, 14, 18, 19]
    limits1 = array([[21.0, 26.5], [-2.0, 3.0]])
    limits2 = array([[23.0, 28.5], [-2.0, 3.0]])
    pixdx = array([0.02, 0.02])
    modshape1 = (limits1[:, 1] - limits1[:, 0]) / pixdx
    modshape1 = around(modshape1).astype('int')
    modshape2 = (limits2[:, 1] - limits2[:, 0]) / pixdx
    modshape2 = around(modshape2).astype('int')
    # bin the points & tally the counts
    mag1, re1, crit1 = cleandata(cat1,
                                 chisqnulim=chisqnulim[0],
                                 magautolim=26.5,
                                 cullflags=cullflags,
                                 limits=limits1,
                                 zlo=zlo)
    mag2, re2, crit2 = cleandata(cat2,
                                 chisqnulim=chisqnulim[1],
                                 magautolim=28.5,
                                 cullflags=cullflags,
                                 limits=limits2,
                                 zlo=zlo)
    bincounts1 = histogram2d(mag1, log10(re1),
                             bins=[mbins1, rbins1])[0].astype('float')
    bincounts2 = histogram2d(mag2, log10(re2),
                             bins=[mbins2, rbins2])[0].astype('float')
    #print bincounts1, bincounts2

    # calculate the best-fit models
    model1 = bl.bivariate_lf(par, limits1, pixdx, kgrid=kgrid1, zdgridfile=zdf1, \
       mcfile=mcfile, drop=drop, field='goods', meankcorr=mk, add_interloper=True)
    model2 = bl.bivariate_lf(par, limits2, pixdx, kgrid=kgrid2, zdgridfile=zdf2, \
       mcfile=mcfile, drop=drop, field='udf', meankcorr=mk, add_interloper=True)
    phistar_mod = phistar(par, drop, zlo=zlo)
    model1.model = phistar_mod * model1.model
    model2.model = phistar_mod * model2.model
    #model1.model = ones(modshape1)/(modshape1[0]*modshape1[1]*pixdx[0]*pixdx[1])*len(mag1)
    #model2.model = ones(modshape2)/(modshape2[0]*modshape2[1]*pixdx[0]*pixdx[1])*len(mag2)
    print sum(model1.model.ravel()) * pixdx[0] * pixdx[1]

    chi2tot = 0.
    nbins = 0

    mindex1 = (mbins1 - 21.0) / 0.02
    mindex1 = around(mindex1).astype('int')
    rindex1 = (rbins1 - (-2.0)) / 0.02
    rindex1 = around(rindex1).astype('int')
    mindex2 = (mbins2 - 23.0) / 0.02
    mindex2 = around(mindex2).astype('int')
    rindex2 = (rbins2 - (-2.0)) / 0.02
    rindex2 = around(rindex2).astype('int')

    num_exp1 = []  # number of expected
    num_exp2 = []
    num_obs1 = bincounts1.ravel()[bincounts1.ravel() >= 5]
    num_obs2 = bincounts2.ravel()[bincounts2.ravel() >= 5]
    # iterate through all bins and calculate the chi2
    for i in range(len(mbins1) - 1):
        for j in range(len(rbins1) - 1):
            if bincounts1[i, j] >= 5:
                num_mod = sum(model1.model[mindex1[i]:mindex1[i + 1],
                                           rindex1[j]:rindex1[j + 1]].ravel())
                num_mod = num_mod * pixdx[0] * pixdx[1]
                num_exp1 += [num_mod]
                #chi2 = (bincounts1[i,j] - num_mod)**2 / num_mod
                #print bincounts1[i,j], num_mod
                #chi2tot += chi2
                #nbins += 1
                #if bincounts1[i,j] < nmin: nmin = bincounts1[i,j]
    for i in range(len(mbins2) - 1):
        for j in range(len(rbins2) - 1):
            if bincounts2[i, j] >= 5:
                num_mod = sum(model2.model[mindex2[i]:mindex2[i + 1],
                                           rindex2[j]:rindex2[j + 1]].ravel())
                num_mod = num_mod * pixdx[0] * pixdx[1]
                num_exp2 += [num_mod]
                #chi2 = (bincounts2[i,j] - num_mod)**2 / num_mod
                #chi2tot += chi2
                #nbins += 1
    print "nbins", nbins

    # Run chi-square test
    num_exp = concatenate([num_exp1, num_exp2])
    num_obs = concatenate([num_obs1, num_obs2])
    ndeg = len(
        num_exp) - 1  # degree of freedom = num. of contributing bins - 1
    print "ndeg", ndeg
    chi2, pval = stats.mstats.chisquare(num_obs, f_exp=num_exp)
    print chi2, pval
    #print num_exp, num_obs
    #chi2nu = chi2tot / float(ndeg)
    return chi2, pval, num_exp, num_obs
Пример #3
0
from numpy import *
import bivariate_lf as bl
import bivariate_fit as bf
import fit_lbg as fl
import mlutil
import zdist
import os, sys, time
from multiprocessing import Queue, Process, Pool

## initialize necessary things here
parv = array([-1.68527018, -20.50967054, 0.8757289, 0.85187255, 0.26594964])
dlimits1 = array([[24.0, 25.0], [-2.0, 3.0]])
dlimits2 = dlimits1.copy()
mlimits1 = array([[21.0, 26.5], [-2.0, 3.0]])
mlimits2 = array([[23.0, 28.5], [-2.0, 3.0]])
kgrid1 = mlutil.readkgrid('kernel_Z.p')
kgrid2 = mlutil.readkgrid('kernel_Z_udf.p')
zdgrid1 = zdist.read_zdgrid('zdgrid_vdrops.p')
zdgrid2 = zdist.read_zdgrid('zdgrid_vdrops_udf.p')
mc = bl.mconvert('M1500_to_z.txt')
logr0_arr = arange(0.7, 1.0, 0.005)
sigma_arr = arange(1.0, 1.3, 0.005)
logr0_grid, sigma_grid = meshgrid(logr0_arr, sigma_arr)
logr0_grid, sigma_grid = map(ravel, [logr0_grid, sigma_grid])
#print logr0_grid
nprocs = 3
#chunksize = float(niter) / nproc
q_logl = Queue()
q_pars = Queue()
par = parv.copy()
N = len(logr0_arr) * len(sigma_arr)
Пример #4
0
#!/usr/bin/env python

from numpy import *
from pygoods import *
import bivariate_lf as bl
import bivariate_fit as bf
import fit_lbg as fl
import mlutil

parb = array([-1.61711035, -20.53430348, 0.81505052, 0.76553555, 0.20435599])
parv = array([-1.68527018, -20.50967054, 0.8757289, 0.85187255, 0.26594964])
kgrid1 = mlutil.readkgrid('kernel_I.p')
kgrid2 = mlutil.readkgrid('kernel_I_udf.p')
kgrid3 = mlutil.readkgrid('kernel_Z.p')
kgrid4 = mlutil.readkgrid('kernel_Z_udf.p')
mci = bl.mconvert('M1500_to_i.txt')
mcz = bl.mconvert('M1500_to_z.txt')


def logl_sigma_bdrops(sarr=arange(0.3, 1.0, 0.05)):
    mag1, re1, crit1 = fl.cleandata('bdrops_gf_v2.cat',
                                    chisqnulim=0.4,
                                    magautolim=26.5,
                                    limits=bl.limits1,
                                    drop='b')
    mag2, re2, crit2 = fl.cleandata('bdrops_udf_gf_v2.cat',
                                    chisqnulim=5.0,
                                    magautolim=28.5,
                                    limits=bl.limits2,
                                    drop='b')
    data1 = array([mag1, log10(re1)])
Пример #5
0
def plot_sizedist(parb, parv):
    mod_lograrr = arange(bl.limits1[1][0], bl.limits1[1][1], 0.02)
    magb1, reb1, critb1 = fl.cleandata('bdrops_gf_v2.cat',
                                       drop='b',
                                       limits=bl.limits1,
                                       zlo=3.0)
    magb2, reb2, critb2 = fl.cleandata('bdrops_udf_gf_v2.cat', chisqnulim=5.0, magautolim=28.5,\
       limits=bl.limits2, drop='b', zlo=3.0)
    kgridb1 = mlutil.readkgrid('kernel_I.p')
    kgridb2 = mlutil.readkgrid('kernel_I_udf.p')
    mci = bl.mconvert('M1500_to_i.txt')
    reb = concatenate((reb1, reb2))
    modelb1 = bl.bivariate_lf(parb,
                              bl.limits1,
                              bl.pixdx,
                              'b',
                              'goods',
                              kgrid=kgridb1,
                              zdgridfile='zdgrid_bdrops.p',
                              mcfile='M1500_to_i.txt',
                              meankcorr=mci(4.0))
    modelb2 = bl.bivariate_lf(parb,
                              bl.limits2,
                              bl.pixdx,
                              'b',
                              'udf',
                              kgrid=kgridb2,
                              zdgridfile='zdgrid_bdrops_udf.p',
                              mcfile='M1500_to_i.txt',
                              meankcorr=mci(4.0))
    sizedist_b = (modelb1.model.sum(axis=0) +
                  modelb2.model.sum(axis=0)) / 10.**mod_lograrr
    magv1, rev1, critv1 = fl.cleandata('vdrops_gf_v2.cat',
                                       chisqnulim=0.5,
                                       drop='v',
                                       limits=bl.limits1,
                                       zlo=4.0)
    magv2, rev2, critv2 = fl.cleandata('vdrops_udf_gf_v2.cat', chisqnulim=5.0, magautolim=28.5,\
       limits=bl.limits2, drop='v', zlo=4.0)
    rev = concatenate((rev1, rev2))
    kgridv1 = mlutil.readkgrid('kernel_Z.p')
    kgridv2 = mlutil.readkgrid('kernel_Z_udf.p')
    mcz = bl.mconvert('M1500_to_z.txt')
    modelv1 = bl.bivariate_lf(parv,
                              bl.limits1,
                              bl.pixdx,
                              'v',
                              'goods',
                              kgrid=kgridv1,
                              zdgridfile='zdgrid_vdrops.p',
                              mcfile='M1500_to_z.txt',
                              meankcorr=mcz(5.0))
    modelv2 = bl.bivariate_lf(parv,
                              bl.limits2,
                              bl.pixdx,
                              'v',
                              'udf',
                              kgrid=kgridv2,
                              zdgridfile='zdgrid_vdrops_udf.p',
                              mcfile='M1500_to_z.txt',
                              meankcorr=mcz(5.0))
    sizedist_v = (modelv1.model.sum(axis=0) +
                  modelb2.model.sum(axis=0)) / 10.**mod_lograrr
    # fit the uncorrected size distribution with lognormal function
    xout_b = fln.fit_lognormal(drop='b')
    print xout_b
    xout_v = fln.fit_lognormal(drop='v')
    print xout_v

    # plot
    fig = plt.figure(figsize=(8, 10))
    ax1 = fig.add_subplot(211)
    rarr = arange(0.001, 41., 1.)
    pl_rarr = arange(0.001, 41., 0.01)

    h1 = ax1.hist(reb, rarr, color='gray', ec='none')
    fb = fln.lognormal(xout_b[0], xout_b[1], pl_rarr)
    ax1.plot(pl_rarr,
             fb * max(h1[0]) / max(fb),
             color='black',
             label=r'$\sigma=%.2f$' % xout_b[1])
    ax1.plot(10.**mod_lograrr,
             sizedist_b * max(h1[0]) / max(sizedist_b),
             color='red',
             label=r'$\sigma=%.2f$; corrected' % parb[3])

    ax2 = fig.add_subplot(212)
    h2 = ax2.hist(rev, rarr, color='gray', ec='none')
    fv = fln.lognormal(xout_v[0], xout_v[1], pl_rarr)
    ax2.plot(pl_rarr,
             fv * max(h2[0]) / max(fv),
             color='black',
             label=r'$\sigma=%.2f$' % xout_v[1])
    ax2.plot(10.**mod_lograrr,
             sizedist_v * max(h2[0]) / max(sizedist_v),
             color='red',
             label=r'$\sigma=%.2f$; corrected' % parv[3])

    ax1.set_xlim(0, 40)
    ax2.set_xlim(0, 40)
    ax1.set_xlabel('Re [0.03" / pixel]')
    ax2.set_xlabel('Re [0.03" / pixel]')
    ax1.legend(loc=1)
    ax2.legend(loc=1)
    ax1.set_title('B-dropouts (z~4)')
    ax2.set_title('V-dropouts (z~5)')

    return fig
Пример #6
0
def show_model(par,
               drop,
               field,
               newfig=True,
               axCent=None,
               fig1=None,
               lfbw=0.2,
               sdbw=0.2,
               colors=['blue', 'green', 'black', 'red']):
    if drop == 'b':
        zmean = 4.0
        mc = bl.mconvert('M1500_to_i.txt')
        if field == 'goods':
            cat = 'bdrops_gf_v3.cat'
            zdgrid = zdist.read_zdgrid('zdgrid/zdgrid_bdrops_nolya.p')
            limits = limitsb1
            magautolim = 26.5
            chisqnulim = 0.4
            reerrlim = 0.6
            kgridfile = 'tfkernel/kernel_I.p'
            dataset = 'GOODS'
        elif field == 'udf':
            cat = 'bdrops_udf_gf_v3.cat'
            zdgrid = zdist.read_zdgrid('zdgrid/zdgrid_bdrops_udf_nolya.p')
            limits = limitsb2
            magautolim = 28.5
            chisqnulim = 5.0
            reerrlim = 0.6
            kgridfile = 'tfkernel/kernel_I_udf.p'
            dataset = 'HUDF'
    elif drop == 'v':
        zmean = 5.0
        mc = bl.mconvert('M1500_to_z.txt')
        if field == 'goods':
            cat = 'vdrops_gf_v2.cat'
            zdgrid = zdist.read_zdgrid('zdgrid/zdgrid_vdrops_nolya_bston5.p')
            limits = limitsv1
            magautolim = 26.5
            chisqnulim = 0.5
            reerrlim = 0.6
            kgridfile = 'tfkernel/kernel_Z.p'
            dataset = 'GOODS'
        elif field == 'udf':
            cat = 'vdrops_udf_gf_v2.cat'
            zdgrid = zdist.read_zdgrid(
                'zdgrid/zdgrid_vdrops_udf_nolya_bston5.p')
            limits = limitsv2
            magautolim = 28.5
            chisqnulim = 5.0
            reerrlim = 0.6
            kgridfile = 'tfkernel/kernel_Z_udf.p'
            dataset = 'HUDF'

    kgrid = mlutil.readkgrid(kgridfile)

    meankcorr = mc(zmean)
    fp = matplotlib.font_manager.FontProperties(size=9)
    if newfig:
        fig1 = plt.figure(figsize=(10, 15))
        axCent = plt.subplot(111)
    divider = make_axes_locatable(axCent)

    restlim = array([[-26.5, -15.5], [-0.5, 2.0]])
    pixdx = array([0.02, 0.02])

    if drop == 'b': z0 = 3.0
    else: z0 = 4.0
    zd_flat = zdist.zdgrid(-25.0, -15.0, 0.5, -0.6, 1.8, 0.2, z0, 6.0, 0.1,
                           drop, zdgrid.area)
    zd_flat.flat_zdgrid(zlo=zmean - 0.5, zhi=zmean + 0.5)
    f = open('zdgrid_flat.p', 'w')
    cPickle.dump(zd_flat, f, 2)
    f.close()

    model0 = bl.bivariate_lf(par, limits, pixdx, drop, field, mc=mc, meankcorr=meankcorr,\
       zdgrid=None)
    V0 = zdgrid.dVdz[(zdgrid.zarr >= (zmean - 0.5))
                     & (zdgrid.zarr <= (zmean + 0.5))]
    model0.model = model0.model * sum(V0)

    model_ds = bl.bivariate_lf(par, limits, pixdx, drop, field, kgrid=None, meankcorr=meankcorr,\
       zdgrid=zdgrid, mc=mc, norm=-1)
    model = bl.bivariate_lf(par,
                            limits,
                            pixdx,
                            drop,
                            field,
                            kgrid=kgrid,
                            meankcorr=meankcorr,
                            M0=-21.0,
                            zdgrid=zdgrid,
                            mc=mc,
                            norm=-1)
    # Show model + data points
    #axCent.imshow(model.model.swapaxes(0,1), origin = 'lower', vmin = vmin, vmax = vmax,
    #   aspect = 'auto')
    mag, re, crit = fl.cleandata(cat,
                                 chisqnulim=chisqnulim,
                                 reerr_ratiolim=reerrlim,
                                 limits=limits,
                                 drop=drop)
    npts = len(mag)
    print npts
    axCent.scatter(mag, log10(re), s=4, color='black')
    axCent.contour(arange(limits[0][0],limits[0][1],pixdx[0]),\
       arange(limits[1][0],limits[1][1],pixdx[1]),model.model.swapaxes(0,1),6,colors=colors[0])

    yf = 0.5
    axCent.set_yticks(arange(limits[1][0], limits[1][1], 0.5))
    axCent.set_yticklabels(arange(limits[1][0], limits[1][1], 0.5))
    if drop == 'b':
        axCent.set_xlabel(r'GALFIT MAG in $i_{775}$')
        axCent.set_ylabel(r'GALFIT $\log_{10}(R_e)$ in $i_{775}$ [pixel]')
    elif drop == 'v':
        axCent.set_xlabel(r'GALFIT MAG in $z_{850}$')
        axCent.set_ylabel(r'GALFIT $\log_{10}(R_e)$ in $z_{850}$ [pixel]')
    #plt.suptitle(r'$\vec{\mathbf{P}}=[%.2f, %.2f, %.2f$",$ %.2f, %.2f]$' % (par[0],par[1],
    #   10.**par[2]*0.03,par[3],par[4]),size=20)
    #axCent.text(0.1,0.1,"par = [%.2f,%.2f,%.2f,%.2f,%.2f]"%tuple(par),transform=axCent.transAxes,
    #   color='black')
    # plot a straight line through the observed points
    ydata = findline(mag, log10(re))
    xr = arange(limits[0][0], limits[0][1], 0.02)
    axCent.plot(xr, ydata[0] * xr + ydata[1], ':', c='black')
    # plot the straight line corresponding to the power-law relation with logR0 and beta
    m0 = -21.0 + mc(zmean)
    b = par[2] + 0.4 * par[4] * m0
    axCent.plot(xr, -0.4 * par[4] * xr + b, '--', lw=2.5, c=colors[3])

    axCent.text(0.1,
                0.9,
                dataset,
                transform=axCent.transAxes,
                color='black',
                size=14)

    # Show LF
    axLF = divider.append_axes("top", size=1.2, pad=0.0, sharex=axCent)
    n, bins = histogram(mag, arange(limits[0, 0], limits[0, 1] + lfbw, lfbw))
    nerr = [sqrt(n), sqrt(n)]
    for i in range(len(n)):
        if n[i] == 1: nerr[0][i] = 1. - 1.e-3
    axLF.errorbar(bins[:-1]+lfbw/2., n, yerr=nerr, fmt='.', ms=14.,\
       mfc='black', ls='None', mec='black', ecolor='black', capsize=6)
    LF = model.model.sum(axis=1)  # LF here contains the volume already
    LFtot = sum(LF) * pixdx[0] / lfbw
    normfactor = npts / LFtot  # normalize the LF to predict the total number of points
    LF = LF * normfactor
    LF0 = model0.model.sum(axis=1)
    LF0 = LF0 * normfactor
    LF_ds = model_ds.model.sum(axis=1)
    LF_ds = LF_ds * normfactor
    axLF.semilogy(arange(limits[0,0],limits[0,1],pixdx[0]), LF, color = colors[2],\
       nonposy='mask', label='GALFIT TF')
    axLF.semilogy(arange(limits[0,0],limits[0,1],pixdx[0]), LF_ds, color = colors[1],\
       ls=':',lw=2,nonposy='mask',label='w/ dropout sel. kernel')
    axLF.semilogy(arange(limits[0,0],limits[0,1],pixdx[0]), LF0, color=colors[0],\
       ls='--',lw=2,nonposy='mask',label='Schechter')
    axLF.set_yticks([1.e-2, 1., 1.e2])
    axLF.set_ylim(1.e-2, max(n) * 50.)
    xf = 1.0
    axCent.set_xticks(arange(limits[0][0], limits[0][1] + 1., 1.))
    axCent.set_xticklabels(arange(limits[0][0], limits[0][1] + 1., 1.))
    axCent.set_xlim(limits[0][0], limits[0][1])
    #axLF.legend(loc='upper left', prop=fp)

    # Show size distribution
    axSD = divider.append_axes("right", size="35%", pad=0.0, sharey=axCent)
    n, bins = histogram(log10(re),
                        arange(limits[1, 0], limits[1, 1] + sdbw, sdbw))
    nerr = [sqrt(n), sqrt(n)]
    for i in range(len(n)):
        if n[i] == 1: nerr[0][i] = 1. - 1.e-3
    axSD.errorbar(n, bins[:-1]+sdbw/2., xerr=nerr, fmt='.', ms=14,\
       mfc='black', ls='None', mec='black', ecolor='black', capsize=6)
    SD = model.model.sum(axis=0)
    SDtot = sum(SD) * pixdx[1] / sdbw
    normfactor = npts / SDtot
    SD = SD * normfactor
    sizer = arange(limits[1, 0], limits[1, 1], pixdx[1])
    axSD.semilogx(SD, sizer, color=colors[2], label='GALFIT TF')
    SD0 = model0.model.sum(axis=0)
    SD0 = SD0 * normfactor
    SD_ds = model_ds.model.sum(axis=0)
    SD_ds = SD_ds * normfactor
    axSD.semilogx(SD_ds,
                  sizer,
                  color=colors[1],
                  ls=':',
                  lw=2,
                  label='w/ dropout sel. kernel')
    axSD.semilogx(SD0,
                  sizer,
                  color=colors[0],
                  ls='--',
                  lw=2,
                  label='lognormal')
    axSD.set_xticks([1., 10., 1.e2])
    axSD.set_xlim(1.e-2, max(SD) * 5)
    #axSD.legend(loc='lower right', prop=fp)
    #axCent.set_ylim(limits[1][0], limits[1][1])
    axCent.set_ylim(-0.6, 1.8)
    plt.draw()
    fig1.show()

    for tl in axLF.get_xticklabels():
        tl.set_visible(False)
    for tl in axSD.get_yticklabels():
        tl.set_visible(False)
    return model, axCent, axSD, axLF, fig1