Ejemplo n.º 1
0
def getRs(clusterz, mass, errs):

    median_rs = nfwutils.RsMassInsideR(mass, 4.0, clusterz, 1.5)
    low_rs = nfwutils.RsMassInsideR(mass - errs[0], 4.0, clusterz, 1.5)
    high_rs = nfwutils.RsMassInsideR(mass + errs[1], 4.0, clusterz, 1.5)

    return median_rs, np.array([median_rs - low_rs, high_rs - median_rs])
Ejemplo n.º 2
0
    def nloglike(Mguess):
        #units of 1e14
        Mguess = Mguess * 1e14

        rs_guess = nfwutils.RsMassInsideR(Mguess, c, zcluster, massrad)

        return -pdzperturbtools.nloglike_loop(r_points, z_points, ghat_points,
                                              betas, pdz, gsig, rs_guess, c,
                                              zcluster)
Ejemplo n.º 3
0
        def r_scale(mass=parts.mass_15mpc,
                    concentration=parts.concentration,
                    zcluster=parts.zcluster):

            try:
                rs = nfwutils.RsMassInsideR(mass, concentration, zcluster, 1.5)
            except ValueError:
                raise pymc.ZeroProbability

            return rs
Ejemplo n.º 4
0
def createCutoutSuite(zs,
                      massrange,
                      goodbpz,
                      sizes,
                      snratios,
                      outputdir,
                      simcats=None,
                      sourcecat=None,
                      shape_distro=__DEFAULT_SHAPE_DISTRO__,
                      shape_distro_kw_sets=100 * [{
                          'sigma': 0.25
                      }],
                      idcol='ID'):

    if simcats is None:
        simcats = []
        for i in range(len(shape_distro_kw_sets)):
            simsource = extractField(sourcecat, sizes, snratios)
            simcats.append(simsource)

    for curz in zs:
        print 'z = %2.2f' % curz
        for cur_mass in massrange:
            print '\tmass = %2.2f' % (cur_mass / 1e14)

            for i, simsource, kw_set in zip(range(len(simcats)), simcats,
                                            shape_distro_kw_sets):

                print '\t\t%d' % i

                #adam-old# base = '%s/cutout_z=%1.2f_mass=%2.2f_%d' % (outputdir, curz, cur_mass / 1e14, i)
                base = '%s/cutout_z_drawn_z=%1.2f_mass=%2.2f_%d' % (
                    outputdir, curz, cur_mass / 1e14, i)

                #cur_rs = nfwutils.rscaleConstM(cur_mass, 4.0, curz, 500)
                cur_rs = nfwutils.RsMassInsideR(cur_mass, 4.0, curz, 1.5)

                simsource, simbpz = commonSubset(simsource, goodbpz, id2=idcol)

                simcat, momento = createCatalog(simbpz,
                                                simsource['size'],
                                                simsource['snratio'],
                                                4.0,
                                                cur_rs,
                                                curz,
                                                ngals=None,
                                                shape_distro=shape_distro,
                                                shape_distro_kw=kw_set,
                                                radii_pix=simsource['r_pix'],
                                                idcol=idcol)

                simcat.saveas('%s.cat' % base, clobber=True)
                output = open('%s.momento' % base, 'wb')
                cPickle.dump(momento, output, -1)
                output.close()
Ejemplo n.º 5
0
def readCCSummary_diffR(dir,
                        clusters,
                        bootrange,
                        diffR,
                        cluster_zs,
                        concentration=4.):

    masses = {}
    mask = {}

    for cluster in clusters:

        workdir = '%s/%s' % (dir, cluster)

        zcluster = cluster_zs[cluster]

        masses[cluster] = np.zeros(len(bootrange))
        mask[cluster] = np.ones(len(bootrange))

        for i, bootnum in enumerate(bootrange):
            inputfile = '%s/bootstrap_%d.cc.out' % (workdir, bootnum)

            if not os.path.exists(inputfile):
                print inputfile
                mask[cluster][i] = 0
                masses[cluster][i] = -1
                continue

            input = open(inputfile)
            for line in input.readlines():
                if cc_regex.match(line):
                    tokens = line.split()
                    mass = float(tokens[-1])
                    if mass == 0.:
                        mass = 1e13

                    rscale = nfwutils.RsMassInsideR(mass, concentration,
                                                    zcluster, 1.5)
                    masses[cluster][i] = nfwutils.massInsideR(
                        rscale, concentration, zcluster, diffR)
                    break
            input.close()

        print cluster, len(masses[cluster])

    return masses, mask
Ejemplo n.º 6
0
def createNLikelihood(mcluster,
                      zcluster,
                      z0,
                      sigz0,
                      rmin=0.75,
                      rmax=3.0,
                      c=4.0,
                      massrad=1.5,
                      gsig=0.25,
                      npoints=200000):

    rs = nfwutils.RsMassInsideR(mcluster, c, zcluster, massrad)

    linmin = rmin / np.sqrt(2)
    linmax = rmax / np.sqrt(2)
    r_points = np.sqrt(
        np.random.uniform(linmin, linmax, size=npoints)**2 +
        np.random.uniform(linmin, linmax, size=npoints)**2)

    z_points = z0 + sigz0 * np.random.standard_normal(npoints)

    gamma_inf = nfwmodeltools.NFWShear(r_points, c, rs, zcluster)
    kappa_inf = nfwmodeltools.NFWKappa(r_points, c, rs, zcluster)
    beta_s = nfwutils.beta_s(z_points, zcluster)

    g0 = beta_s * gamma_inf / (1 - beta_s * kappa_inf)

    ghat_points = g0 + gsig * np.random.standard_normal(npoints)

    dzt = 0.01
    pdzrange = np.arange(z0 - 5 * sigz0, z0 + 5 * sigz0, dzt)
    pdz = stats.Gaussian(pdzrange, z0, sigz0) * dzt

    betas = np.array(nfwutils.beta_s(pdzrange, zcluster))

    def nloglike(Mguess):
        #units of 1e14
        Mguess = Mguess * 1e14

        rs_guess = nfwutils.RsMassInsideR(Mguess, c, zcluster, massrad)

        return -pdzperturbtools.nloglike_loop(r_points, z_points, ghat_points,
                                              betas, pdz, gsig, rs_guess, c,
                                              zcluster)

    return nloglike
Ejemplo n.º 7
0
def stackClusters(data = None, doPlot = True, cosmology = nfwutils.std_cosmology):

    workdir = '/u/ki/dapple/ki06/catalog_backup_2012-02-08'
    outdir = '/u/ki/dapple/subaru/doug/publication/baseline_2012-02-08'

    if data is None:
        data = {}

    if 'items' not in data:
        data['items'] =  readtxtfile('worklist')
    items = data['items']

    if 'zbins' not in data:
        data['zbins'] = np.unique(np.hstack([np.linspace(0., 1., 3.), np.linspace(1., 5., 10.), np.linspace(5., 10., 5.)]))
    zbins = data['zbins']
    bincenters = (zbins[1:] + zbins[:-1])/2.

    if 'clusters' not in data:
        clusters = {}
        for cluster, filter, image in items:
            key = (cluster, filter, image)
            clusters[key] = {}
            controller = driver.makeController()
            options, args = controller.modelbuilder.createOptions()
            options, args = controller.filehandler.createOptions(options = options, args = args, 
                             workdir = workdir, 
                             incatalog = '%s/%s.%s.%s.lensingbase.cat' % (workdir, cluster, filter, image),
                             cluster = cluster, filter = filter, image = image,
                             shapecut = True, 
                             redseqcat = '%s/%s.%s.%s.redsequence.cat' % (workdir, cluster, filter, image))

            controller.load(options, args)

            stats = cPickle.load(open('%s/%s.%s.%s.out.mass15mpc.mass.summary.pkl' % (outdir, cluster, filter, image)))
            mass = stats['quantiles'][50]

            rs = nfwutils.RsMassInsideR(mass, 4.0, controller.zcluster, 1.5)
            
            scaledZ, estimators = sr.scaleShear(controller, rs, 4.0, cosmology = cosmology)

            bins, weights, aveshear = sr.calcZBinWeights(scaledZ, controller.pdz, estimators, zbins)

            clusters[key]['weights'] = weights
            clusters[key]['shears'] = aveshear

        data['clusters'] = clusters

    else:
        clusters = data['clusters']

    
    if 'median' not in data:
        allweights = np.vstack([clusters[tuple(item)]['weights'] for item in items])
        allshears = np.vstack([clusters[tuple(item)]['shears'] for item in items])

        median, sig1, sig2 = sr.calcBinDistro(zbins, allweights, allshears)

        data['median'] = median
        data['sig1'] = sig1
        data['sig2'] = sig2

    else:
        
        median = data['median']
        sig1 = data['sig1']
        sig2 = data['sig2']

    
    if doPlot:

        fig = pylab.figure()
        ax = fig.add_axes([0.12, 0.12, 0.95 - 0.12, 0.95 - 0.12])

        ax.errorbar(bincenters, median, sig2, fmt='bo')
        ax.errorbar(bincenters, median, sig1, fmt='ro')

        xplot = np.arange(0., np.max(zbins), 0.01)
        ax.plot(xplot, sr.shearScaling(xplot), 'k-', linewidth=1.5)

        ax.set_xlabel('Scaled Redshift')
        ax.set_ylabel('Lensing Power')
        ax.set_title('All Cluster Stack -- Restricted to Fit Data')
        fig.savefig('notes/shearratio/stack_restricted_manypoints.pdf')

        return fig, data

    return data
Ejemplo n.º 8
0
def calcBootstrapCovar(bootstrap):

    sourcedir = '/u/ki/dapple/ki06/catalog_backup_2012-02-08'
    bootdir = '/u/ki/dapple/ki06/bootstrap_2012-02-08'


    data = {}

    if 'items' not in data:
        data['items'] =  readtxtfile('worklist')
    items = data['items']

    if 'zbins' not in data:
        data['zbins'] = np.unique(np.hstack([np.linspace(0., 1., 3.), np.linspace(1., 5., 10.), np.linspace(5., 10., 5.)]))
    zbins = data['zbins']
    bincenters = (zbins[1:] + zbins[:-1])/2.

    if 'clusters' not in data:
        clusters = {}
        for cluster, filter, image in items:
            key = (cluster, filter, image)
            clusters[key] = {}

            clusterdir='%s/%s' % (bootdir, cluster)

            i = bootstrap

            controller = driver.makeController()
            options, args = controller.modelbuilder.createOptions()
            options, args = controller.filehandler.createOptions(options = options, args = args, 
                                                                 workdir = sourcedir, 
                         incatalog = '%s/bootstrap_%d.ml.cat' % (clusterdir, i),
                         cluster = cluster, filter = filter, image = image)

            controller.load(options, args)

            stats = cPickle.load(open('%s/bootstrap_%d.ml.out.mass15mpc.mass.summary.pkl' % (clusterdir, i)))
            mass = stats['quantiles'][50]

            rs = nfwutils.RsMassInsideR(mass, 4.0, controller.zcluster, 1.5)

            scaledZ, estimators = sr.scaleShear(controller, rs, 4.0)

            bins, weights, aveshear = sr.calcZBinWeights(scaledZ, controller.pdz, estimators, zbins)

            clusters[key]['weights'] = weights
            clusters[key]['shears'] = aveshear



        data['clusters'] = clusters

    else:
        clusters = data['clusters']

    
    if 'medians' not in data:

        clusterboot = np.random.randint(0, len(items), len(items))
        
        allweights = np.vstack([clusters[tuple(items[j])]['weights'] for j in clusterboot])
        allshears =  np.vstack([clusters[tuple(items[j])]['shears']  for j in clusterboot])

        median, sig1, sig2 = sr.calcBinDistro(zbins, allweights, allshears)

        data['median'] = median
        data['sig1'] = sig1
        data['sig2'] = sig2


    return data
Ejemplo n.º 9
0
def plotOneCluster(cluster, filter, image, workdir = '/u/ki/dapple/ki06/catalog_backup_2012-02-08', 
                   outdir = '/u/ki/dapple/subaru/doug/publication/baseline_2012-02-08', data = None):

    if data is None:
        data = {}

    if 'controller' not in data:

        controller = driver.makeController()
        options, args = controller.modelbuilder.createOptions(zcut = None)
        options, args = controller.filehandler.createOptions(options = options, args = args, 
                                                         workdir = workdir, 
                                                         incatalog = '%s/%s.%s.%s.lensingbase.cat' % (workdir, cluster, filter, image),
                                                         cluster = cluster, filter = filter, image = image,
                                                         shapecut = True, 
                                                         redseqcat = '%s/%s.%s.%s.redsequence.cat' % (workdir, cluster, filter, image))

        controller.load(options, args)

        data['controller'] = controller

    else:

        controller = data['controller']

    if 'rs' not in data:

        stats = cPickle.load(open('%s/%s.%s.%s.out.mass15mpc.mass.summary.pkl' % (outdir, cluster, filter, image)))
        mass = stats['quantiles'][50]

        rs = nfwutils.RsMassInsideR(mass, 4.0, controller.zcluster, 1.5)

        data['rs'] = rs
        
    else:

        rs = data['rs']

    if 'median' not in data:

        scaledZ, estimators = sr.scaleShear(controller, rs, 4.0)

        bins = np.unique(np.hstack([np.linspace(0., 1., 3.), np.linspace(1., np.max(scaledZ), 5.)]))

        scaledZbins, weights, aveEst = sr.calcZBinWeights(scaledZ, controller.pdz, estimators, bins)

    
        median, sig1, sig2 = sr.calcBinDistro(scaledZbins, weights, aveEst)

        data['bins'] = bins
        data['median'] = median
        data['sig1'] = sig1
        data['sig2'] = sig2

    else:

        bins = data['bins']
        median = data['median']
        sig1 = data['sig1']
        sig2 = data['sig2']

        
    
    fig = pylab.figure()
    ax = fig.add_axes([0.12, 0.12, 0.95 - 0.12, 0.95 - 0.12])

    bincenters = (bins[1:] + bins[:-1])/2.
    
    ax.errorbar(bincenters, median, sig2, fmt='bo')
    ax.errorbar(bincenters, median, sig1, fmt='ro')

    xplot = np.arange(0., np.max(bins), 0.01)
    ax.plot(xplot, sr.shearScaling(xplot), 'k-', linewidth=1.5)

    ax.set_xlabel('Scaled Redshift')
    ax.set_ylabel('Lensing Power')
    ax.set_title('%s %s %s' % (cluster, filter, image))
    fig.savefig('notes/shearratio/%s.%s.%s.pdf' % (cluster, filter, image))

    return fig, data
Ejemplo n.º 10
0
def stackClusters(data = None, cosmology = nfwutils.std_cosmology, outdir = '/u/ki/dapple/subaru/doug/publication/baseline_2012-05-17'):

    workdir = '/u/ki/dapple/ki06/catalog_backup_2012-05-17'

    if data is None:
        data = {}

    if 'items' not in data:
        data['items'] =  readtxtfile('worklist')
    items = data['items']


    if 'clusters' not in data:
        clusters = {}
        for cluster, filter, image in items:
            key = (cluster, filter, image)
            clusters[key] = {}
            controller = driver.makeController()
            options, args = controller.modelbuilder.createOptions(zcut= None)
            options, args = controller.filehandler.createOptions(options = options, args = args, 
                             workdir = workdir, 
                             incatalog = '%s/%s.%s.%s.lensingbase.cat' % (workdir, cluster, filter, image),
                             cluster = cluster, filter = filter, image = image,
                             shapecut = True, 
                             redseqcat = '%s/%s.%s.%s.redsequence.cat' % (workdir, cluster, filter, image))

            controller.load(options, args)

            stats = cPickle.load(open('%s/%s.%s.%s.out.mass15mpc.mass.summary.pkl' % (outdir, cluster, filter, image)))
            mass = stats['quantiles'][50]

            rs = nfwutils.RsMassInsideR(mass, 4.0, controller.zcluster, 1.5)
            
            scaledZ, estimators = sr.scaleShear(controller, rs, 4.0, cosmology = cosmology)

            clusters[key]['scaledZ'] = scaledZ
            clusters[key]['estimators'] = estimators
            clusters[key]['pdz'] = controller.pdz

        data['clusters'] = clusters

    else:
        clusters = data['clusters']



    maxScaledZ = -1
    for key in clusters.keys():
        localMax = np.max(clusters[key]['scaledZ'])
        maxScaledZ = max(localMax, maxScaledZ)


    if 'zbins' not in data:
        zbins = np.unique(np.hstack([np.linspace(0., 1., 3.), np.logspace(0.1, np.log10(maxScaledZ), 12.)]))
        data['zbins'] = np.hstack([zbins[:-3], zbins[-1]])
    zbins = data['zbins']
    bincenters = (zbins[1:] + zbins[:-1])/2.

    for key in clusters.keys():

        bins, weights, aveshear = sr.calcZBinWeights(clusters[key]['scaledZ'], clusters[key]['estimators'], zbins)

        clusters[key]['weights'] = weights
        clusters[key]['shears'] = aveshear


    
    if 'maxlike' not in data:
        allweights = np.vstack([clusters[tuple(item)]['weights'] for item in items])
        allshears = np.vstack([clusters[tuple(item)]['shears'] for item in items])



        pointest = sr.calcBinDistro(zbins, allweights, allshears)


        data['pointest'] = pointest


        maxlike, sig1, sig2, maxlike_ests = sr.bootstrapBinDistro(zbins, allweights, allshears)

        data['maxlike'] = maxlike
        data['sig1'] = sig1
        data['sig2'] = sig2
        data['maxlike_ests'] = maxlike_ests

    else:
        
        maxlike = data['maxlike']
        sig1 = data['sig1']
        sig2 = data['sig2']
        


    fig = pylab.figure()
    ax = fig.add_axes([0.12, 0.12, 0.95 - 0.12, 0.95 - 0.12])

    ax.errorbar(bincenters, maxlike, sig2, fmt='bo')
    ax.errorbar(bincenters, maxlike, sig1, fmt='ro')

    xplot = np.arange(0.01, np.max(zbins), 0.01)
    ax.plot(xplot, sr.shearScaling(xplot), 'k-', linewidth=1.5)

    ax.set_xlabel('$x = \omega_s/\omega_l$')
    ax.set_ylabel('$\gamma(z)/\gamma(\infty)$')
    ax.set_title('All Cluster Stack -- Maxlike Point Est')
    fig.savefig('notes/shearratio/stack_maxlike.pdf')

    return fig, data