Example #1
0
def clusterplot(data=None):

    if data is None:
        data = {}

    if 'items' not in data:
        data['items'] = readtxtfile('worklist')

    items = data['items']

    if 'clusters' not in data:
        clusters = {}

        for cluster, filter, image in items:

            matchedfile = '/u/ki/dapple/subaru/%s/PHOTOMETRY_%s_aper/%s.matched.tab' % (
                cluster, filter, cluster)

            if not os.path.exists(matchedfile):
                continue

            bpz = ldac.openObjectFile(
                '/u/ki/dapple/ki06/catalog_backup_2012-05-17/%s.%s.bpz.tab' %
                (cluster, filter), 'STDTAB')
            zcat = ldac.openObjectFile(matchedfile, 'STDTAB')

            bpz = bpz.matchById(zcat, 'SeqNr_data')

            bpz = bpz.filter(bpz['BPZ_ODDS'] > 0.5)
            zcat = zcat.matchById(bpz, 'SeqNr', 'SeqNr_data')

            clusters[cluster] = np.array([bpz['BPZ_Z_B'], zcat['z_spec']])

        data['clusters'] = clusters

    clusters = data['clusters']

    if 'redshifts' not in data:

        data['redshifts'] = np.column_stack([x for x in clusters.itervalues()])

    redshifts = data['redshifts']

    fig = makeHexbin(redshifts[1, :],
                     redshifts[0, :],
                     xlabel='Cluster Spectroscopic $z$',
                     ylabel='5 Filter Photo-$z$',
                     gridsize=45,
                     bins='log')

    fig.axes[0].text(1.1, 0.05, '\emph{Log Color Scale}')

    fig.savefig('publication/clustercomp.pdf')

    return fig, data
Example #2
0
def readCCSummary(outdir, clusters):

    masses = {}

    for cluster in clusters:

        masses[cluster] = np.hstack(
            readtxtfile('%s/%s.anja.mass.dat' % (outdir, cluster)))

        print cluster, len(masses[cluster])

    return masses, np.array([masses[x] for x in clusters])
Example #3
0
def convertTxt2LDAC(file, keys, type):

    rawfile = readtxtfile(file)

    cols = []
    for i, key, type in zip(xrange(len(keys)), keys, type):

        cols.append(pyfits.Column(name=key, format=type, array=rawfile[:, i]))

    hdu = pyfits.new_table(pyfits.ColDefs(cols))
    hdu.header.update('EXTNAME', 'OBJECTS')

    return ldac.LDACCat(hdu)
Example #4
0
def publicationMLPullScript(data=None):

    items = readtxtfile('worklist')
    clusters = [x[0] for x in items]

    if data is None:

        MLbootmasses, MLmask = readMLBootstraps(
            '/u/ki/dapple/ki06/bootstrap_2011-12-14', items)

        MLdat = readDougMasses(
            '/u/ki/dapple/subaru/doug/publication/baseline_2011-12-14')

        pulls = np.hstack([
            makePull(MLdat[tuple(x)][0], MLdat[tuple(x)][1],
                     MLbootmasses[x[0]], MLmask[x[0]]) for x in items
        ])

        data = pulls

    else:

        pulls = data

    fig = pylab.figure()

    ax = fig.add_axes([0.14, 0.14, 0.95 - 0.14, 0.95 - 0.14])

    ax.hist(pulls, bins=50, normed=True)

    ax.axvline(0.0, c='k', linewidth=2)

    gaussx = np.arange(-6, 6, 0.0002)
    gauss = np.exp(-0.5 * gaussx**2) / np.sqrt(2 * np.pi)

    ax.plot(gaussx, gauss, c='r', marker='None', linestyle='--', linewidth=1.5)

    pullMean = np.mean(pulls)
    pullStd = np.std(pulls)

    ax.text(-5.5, 0.4, 'P(z)', fontsize=16)
    ax.text(2.5, 0.4, '$\mu = %1.3f$' % pullMean, fontsize=14)
    ax.text(2.5, 0.38, '$\sigma = %1.3f$' % pullStd, fontsize=14)

    ax.set_xlabel('Stacked $\Delta/\sigma$ for %d Clusters' % len(clusters))
    ax.set_ylabel('Probability ($\Delta/\sigma$)')

    fig.savefig('publication/mlmass_pull.eps')

    return fig, data
Example #5
0
def galdensity(data = None):

    if data is None:
        data = {}

    
    if 'ngals' not in data:

        

        ngals = cPickle.load(open('galaxy_counts_pzmethod.pkl', 'rb'))
        data['ngals'] = ngals
        


        items = [tuple(x) for x in readtxtfile('worklist')]
        clusters = [x[0] for x in items]

        redshifts = cm.readClusterRedshifts()
        properz = np.array([redshifts[x] for x in clusters])
        data['properz'] = properz

        Dl = np.array([nfwutils.angulardist(z) for z in properz])
        data['Dl'] = Dl

        inner_rad = np.arctan2(0.75, Dl) * (180./np.pi) * 60
        outer_rad = np.arctan2(3., Dl) * (180 / np.pi) * 60
        area = np.pi*(outer_rad**2 - inner_rad**2)
        data['area'] = area

        propercounts = np.array([ngals[x] for x in items])
        
        density = propercounts / area
        data['density'] = density

    else:

        properz = data['properz']
        density = data['density']
    

    fig = pylab.figure()
    ax = fig.add_axes([0.12, 0.12, 0.95 - 0.12, 0.95 - 0.12])
    ax.plot(properz, density, 'bo')
    ax.set_xlabel('Cluster Redshift')
    ax.set_ylabel('Input Galaxy Density')

    return fig, data
Example #6
0
def CCSimBiasPlotScript(data=None):

    if data is None:

        data = readtxtfile(
            '/nfs/slac/g/ki/ki06/anja/SUBARU/cosmos_cats/simulations/publication/highsn/cluster2/cc_masses/masses_concat_anja.dat'
        )

    # ${cluster} ${zcluster} ${truemass} ${meanmass} ${emean} ${beta} ${beta2} ${nshells} ${nobjects}

    clusters = [x[0] for x in data]
    redshifts = np.array([float(x[1]) for x in data])
    truemass = np.array([float(x[2]) for x in data])
    meanmass = np.array([float(x[3]) for x in data])
    errmass = np.array([float(x[4]) for x in data])

    fig = pylab.figure()
    ax = fig.add_axes([0.14, 0.14, 0.95 - 0.14, 0.95 - 0.14])

    fracbias = (meanmass - truemass) / truemass
    fracerr = errmass / truemass

    ax.axhline(0.0, c='k', linewidth=1.5)

    mean = 0.007
    err = 0.0022
    fillx = np.array([0.16, 0.72])
    ax.fill_between(fillx, mean - err, mean + err, facecolor='r', alpha=0.1)
    ax.axhline(mean, c='k', linestyle='--', alpha=0.5)

    ax.errorbar(redshifts, fracbias, fracerr, fmt='bo')

    ax.set_xlim(0.16, 0.72)
    ax.set_ylim(-0.08, 0.08)

    ax.set_xlabel('Cluster Redshift')
    ax.set_ylabel(r'Fractional Mass Bias within 1.5 Mpc')

    ax.text(0.2, 0.06, r'Color Cuts', fontsize=16)

    fig.savefig('publication/clustersims_cc_compare.eps')

    return fig, data
Example #7
0
def ML5v6filterCompareScript(data=None):

    if data is None:

        worklist = readtxtfile('simclusterlist')
        data = load5v6simdata(worklist)

    Bfracbias = data[0]
    Afracbias = data[1]
    properredshifts = data[2]

    fig = pylab.figure()
    ax = fig.add_axes([0.14, 0.12, 0.95 - 0.14, 0.95 - 0.12])

    Bmean, Berr = ss.bootstrapMean(Bfracbias)
    Amean, Aerr = ss.bootstrapMean(Afracbias)

    ax.errorbar(properredshifts - 0.0025,
                Bmean,
                Berr,
                fmt='bo',
                label=r'$BVr^+i^+z^+$')
    ax.errorbar(properredshifts + 0.0025,
                Amean,
                Aerr,
                fmt='rs',
                label=r'$uBVr^+i^+z^+$')

    ax.axhline(0.0, c='k')

    ax.set_xlim(0.16, 0.72)
    ax.set_ylim(-0.15, 0.08)

    ax.set_xlabel('Cluster Redshift')
    ax.set_ylabel(r'Fractional Mass Bias within 1.5 Mpc')

    ax.legend(loc='lower left', numpoints=1, ncol=1)

    ax.text(0.2, 0.06, r'10\% Contamination', fontsize=16)

    fig.savefig('publication/clustersims_bvriz_aper_compare.eps')

    return fig, data
Example #8
0
def prepData(cluster, filter):

    cat = ldac.openObjectFile('%s/%s/LENSING/%s_fbg.filtered.cat' %
                              (subarudir, cluster, cluster))

    cat = cat.filter(
        numpy.logical_and(
            numpy.logical_and(cat['BPZ_Z_B_MAX-MIN_Z'] < 0.8,
                              cat['BPZ_ODDS'] > 0.95),
            cat['MAG_AUTO'] < 23.85))

    #    cat = cat.filter(numpy.logical_and(cat['BPZ_Z_B_MAX-MIN_Z'] < 0.8,
    #                                       cat['BPZ_ODDS'] > 0.95))

    coords = numpy.column_stack([cat['Xpos'], cat['Ypos']])
    dX = coords - numpy.array([5000, 5000])
    dR = numpy.sqrt(dX[:, 0]**2 + dX[:, 1]**2)

    area = du.readtxtfile('%s/%s/%s/SCIENCE/coadd_%s_good/area.dat' %
                          (subarudir, cluster, filter, cluster))

    return dR, area, cat['Z_BEST'], cat['BPZ_Z_B_MIN'], cat[
        'BPZ_Z_B_MAX'], cat['MAG_AUTO']
Example #9
0
def ML5filterContamCompareScript(data=None):

    if data is None:
        data = {}

    if 'worklist' not in data:
        data['worklist'] = readtxtfile('simclusterlist')
        data['clusters'] = [x[0] for x in data['worklist']]

    worklist = data['worklist']
    clusters = data['clusters']

    if 'redshifts' not in data:
        data['redshifts'] = cm.readClusterRedshifts()
        redshifts = data['redshifts']
        data['properredshifts'] = np.array([redshifts[x] for x in clusters])

    redshifts = data['redshifts']
    properredshifts = data['properredshifts']

    if 'fracbiases' not in data:
        subdirs = ['%sBVRIZ' % x for x in ['', 'contam0p10/', 'contam0p20/']]

        data['fracbiases'] = ss.processFracBiasData(
            '/u/ki/dapple/nfs12/cosmos/simulations/clusters_2012-05-17-highdensity',
            subdirs, clusters, redshifts)
    fracbiases = data['fracbiases']

    fig = publicationContamComparePlot(fracbiases[0], fracbiases[1],
                                       fracbiases[2], properredshifts)

    ax = fig.axes[0]
    ax.text(0.2, 0.06, r'$B_{\mathrm J}V_{\mathrm J}r^+i^+z^+$', fontsize=16)

    fig.savefig('publication/clustersims_bvriz_contam_compare.eps')

    return fig, data
Example #10
0
def applyFilter(filterfile):

    filter = readtxtfile(filterfile)[:, :2]
    step = filter[1, 0] - filter[0, 0]

    #convert photon response filters to flux response filters
    filterSpline = interp.interp1d(filter[:, 0],
                                   filter[:, 1],
                                   bounds_error=False,
                                   fill_value=0.)

    spec_mags = []
    for spec in spectra:
        specStep = spec[1, 0] - spec[0, 0]
        resampFilter = filterSpline(spec[:, 0])

        logEff = log10(sum(specStep * resampFilter * spec[:, 0] * spec[:, 1]))
        logNorm = log10(
            sum(resampFilter * c * specStep /
                spec[:, 0]))  #pivot wavelength, with cancelation of norm
        mag = 2.5 * (logNorm - logEff)
        spec_mags.append(mag)

    return spec_mags
Example #11
0
def calcBootstrapCovar(bootstrap):

    sourcedir = '/u/ki/dapple/ki06/catalog_backup_2012-02-08'
    bootdir = '/u/ki/dapple/ki06/bootstrap_2012-02-08'


    data = {}

    if 'items' not in data:
        data['items'] =  readtxtfile('worklist')
    items = data['items']

    if 'zbins' not in data:
        data['zbins'] = np.unique(np.hstack([np.linspace(0., 1., 3.), np.linspace(1., 5., 10.), np.linspace(5., 10., 5.)]))
    zbins = data['zbins']
    bincenters = (zbins[1:] + zbins[:-1])/2.

    if 'clusters' not in data:
        clusters = {}
        for cluster, filter, image in items:
            key = (cluster, filter, image)
            clusters[key] = {}

            clusterdir='%s/%s' % (bootdir, cluster)

            i = bootstrap

            controller = driver.makeController()
            options, args = controller.modelbuilder.createOptions()
            options, args = controller.filehandler.createOptions(options = options, args = args, 
                                                                 workdir = sourcedir, 
                         incatalog = '%s/bootstrap_%d.ml.cat' % (clusterdir, i),
                         cluster = cluster, filter = filter, image = image)

            controller.load(options, args)

            stats = cPickle.load(open('%s/bootstrap_%d.ml.out.mass15mpc.mass.summary.pkl' % (clusterdir, i)))
            mass = stats['quantiles'][50]

            rs = nfwutils.RsMassInsideR(mass, 4.0, controller.zcluster, 1.5)

            scaledZ, estimators = sr.scaleShear(controller, rs, 4.0)

            bins, weights, aveshear = sr.calcZBinWeights(scaledZ, controller.pdz, estimators, zbins)

            clusters[key]['weights'] = weights
            clusters[key]['shears'] = aveshear



        data['clusters'] = clusters

    else:
        clusters = data['clusters']

    
    if 'medians' not in data:

        clusterboot = np.random.randint(0, len(items), len(items))
        
        allweights = np.vstack([clusters[tuple(items[j])]['weights'] for j in clusterboot])
        allshears =  np.vstack([clusters[tuple(items[j])]['shears']  for j in clusterboot])

        median, sig1, sig2 = sr.calcBinDistro(zbins, allweights, allshears)

        data['median'] = median
        data['sig1'] = sig1
        data['sig2'] = sig2


    return data
Example #12
0
def MLUbandRatioScript(data=None):

    simlist = readtxtfile('simclusterlist')
    noUlist = readtxtfile('noUlist')

    worklist = [x for x in simlist if x in noUlist]
    del worklist[-1]
    clusters = [x[0] for x in worklist]

    if data is None:

        redshifts = cm.readClusterRedshifts()
        properredshifts = np.array([redshifts[x] for x in clusters])

        noUsim_masses, noUsim_errs, noUsim_grid, scale_radii = ss.readMLMasses(
            '/u/ki/dapple/nfs12/cosmos/simulations/publication/highsn/cluster3',
            'contam0p10/BVRIZ', clusters)

        Usim_masses, Usim_errs, Usim_grid, scale_radii = ss.readMLMasses(
            '/u/ki/dapple/nfs12/cosmos/simulations/publication/highsn/cluster3',
            'contam0p10/APER', clusters)

        noUmasses, noUmask = cm.readMLBootstraps(
            '/u/ki/dapple/ki06/bootstrap_2011-12-14', worklist, np.arange(100))
        Umasses, Umask = cm.readMLBootstraps(
            '/u/ki/dapple/ki06/bootstrap_U_2012-02-03', worklist,
            np.arange(100))

        data = [
            noUsim_grid, Usim_grid, properredshifts, noUmasses, noUmask,
            Umasses, Umask
        ]

    else:

        noUsim_grid = data[0]
        Usim_grid = data[1]
        properredshifts = data[2]
        noUmasses = data[3]
        noUmask = data[4]
        Umasses = data[5]
        Umask = data[6]

    simRatio = noUsim_grid / Usim_grid
    simMean, simErr = ss.bootstrapMean(simRatio.T)

    dataRatios = cm.makeRatios(Umasses, Umask, noUmasses, noUmask)
    dataMeans, dataErrs = cm.bootstrapMeans(dataRatios, clusters)

    fig = pylab.figure()
    ax = fig.add_axes([0.14, 0.12, 0.95 - 0.14, 0.95 - 0.12])

    ax.errorbar(properredshifts + 0.0025,
                dataMeans,
                dataErrs,
                fmt='rs',
                label='Data')
    ax.errorbar(properredshifts - 0.0025,
                simMean,
                simErr,
                fmt='bo',
                label='Simulations')

    ax.axhline(1.0, c='k', linewidth=1.5)

    ax.set_xlim(0.16, 0.72)
    ax.set_ylim(0.85, 1.35)

    ax.set_xlabel('Cluster Redshift')
    ax.set_ylabel('Mass without U-band / Mass with U-band')

    ax.legend(loc='upper left', numpoints=1)

    fig.savefig('publication/dropU_comp.eps')

    return fig, data
Example #13
0
def publicationMLCCBootCompScript(worklist='worklist',
                                  cosmology=None,
                                  data=None):

    items = readtxtfile(worklist)
    clusters = [x[0] for x in items]
    redshifts = readClusterRedshifts()
    properredshifts = np.array([redshifts[x] for x in clusters])

    if data is None:

        MLmasses, MLmask = readMLBootstraps(
            '/u/ki/dapple/ki06/bootstrap_2012-05-17', items, np.arange(0, 200))

        CCmasses, CCmask = readCCSummary(
            '/u/ki/dapple/ki06/bootstrap_2012-05-17', clusters,
            np.arange(00, 200))

        ratios = makeRatios(MLmasses, MLmask, CCmasses, CCmask)

        medians, errs = bootstrapMedians(ratios, clusters)

        #        fit_ratio = np.hstack([pymc.database.pickle.load('ml_cc_rlog.out.%d' % i).trace('m', -1)[25000:] for i in range(1,6)])

        data = [medians, errs, properredshifts]

    else:

        medians = data[0]
        errs = data[1]
        properredshifts = data[2]
#        fit_ratio = data[3]

    if cosmology is not None:
        cosmoitems = readtxtfile(cosmology)
        cosmoclusters = [x[0] for x in cosmoitems]
        cosmomask = []
        for x in clusters:
            if x in cosmoclusters:
                cosmomask.append(1)
            else:
                cosmomask.append(0)
        cosmomask = np.array(cosmomask) == 1
        print cosmomask

    fig = pylab.figure()

    try:

        ax = fig.add_axes([0.12, 0.12, 0.95 - 0.12, 0.95 - 0.12])

        ax.axhline(1.0, c='k', linewidth=1.25)

        #        ratio, ratioerr = sp.ConfidenceRegion(fit_ratio)
        ratio = 0.998
        ratioerr = [0.042, 0.044]  #m, p
        ratioerr2 = [0.081, 0.085]  #m,p
        fillx = [0.16, 0.72]

        ax.fill_between(fillx,
                        ratio - ratioerr2[0],
                        ratio + ratioerr2[1],
                        facecolor=(1, .753, 0))
        ax.fill_between(fillx,
                        ratio - ratioerr[0],
                        ratio + ratioerr[1],
                        facecolor=(1, 0.4, 0))
        ax.axhline(ratio, c='k', linestyle='--')

        if cosmology:
            ratio = 0.965
            ratioerr = [0.05, 0.06]  #m, p
            fillx = [0.17, 0.46]

            ax.axhline(ratio, c='k', linestyle=':')
            ax.fill_between(fillx,
                            ratio - ratioerr[0],
                            ratio + ratioerr[1],
                            facecolor='c',
                            alpha=0.3)

            ax.plot(properredshifts[cosmomask],
                    medians[cosmomask],
                    'ko',
                    markersize=10,
                    markerfacecolor='None',
                    markeredgecolor='k')

        ax.errorbar(properredshifts, medians, errs, fmt='ko')

        ax.set_yscale('log')

        ax.set_xlim(0.16, 0.72)
        ax.set_ylim(0.45, 3.5)

        ax.set_yticks([0.5, 0.6, 0.7, 0.8, 0.9, 1.0, 2.0, 3.0])
        ax.set_yticklabels(
            ['%2.1f' % x for x in [0.5, 0.6, 0.7, 0.8, 0.9, 1.0, 2.0, 3.0]])

        ax.set_yticks([1.25, 1.5, 1.75, 2.25, 2.5, 2.75], minor=True)

        ax.set_xlabel('Cluster Redshift')
        ax.set_ylabel('CC Mass / P(z) Mass')

        fig.savefig('publication/%s.eps' % worklist)

    finally:

        return fig, data
Example #14
0
def measureMLCcBootIS(sample='worklist', data=None, diffR=None):

    if data is None:
        data = {}

    if 'items' not in data:
        items = readtxtfile(sample)
        clusters = [x[0] for x in items]
        redshifts = readClusterRedshifts()
        properredshifts = np.array([redshifts[x] for x in clusters])

        data['items'] = items
        data['clusters'] = clusters
        data['redshifts'] = redshifts
        data['properredshifts'] = properredshifts
    else:
        items = data['items']
        clusters = data['clusters']
        redshifts = data['redshifts']
        properredshifts = data['properredshifts']

    if 'MLmasses' not in data or 'CCmasses' not in data:

        if diffR is None:

            if 'MLmasses' not in data:

                MLmasses, MLmask = readMLBootstraps(
                    '/u/ki/dapple/ki06/bootstrap_2012-05-17', items,
                    np.arange(0, 200))
                data['MLmasses'] = MLmasses
                data['MLmask'] = MLmask
            else:
                MLmasses, MLmask = data['MLmasses'], data['MLmask']

            if 'CCmasses' not in data:
                CCmasses, CCmask = readCCSummary(
                    '/u/ki/dapple/ki06/bootstrap_2012-05-17', clusters,
                    np.arange(0, 200))
                data['CCmasses'] = CCmasses
                data['CCmask'] = CCmask
            else:
                CCmasses, CCmask = data['CCmasses'], data['CCmask']

        else:

            print 'Using Alternative diffR'

            if 'MLmasses' not in data:

                MLmasses, MLmask = readMLBootstraps_diffR(
                    '/u/ki/dapple/ki06/bootstrap_2012-05-17', items,
                    np.arange(0, 200), diffR, redshifts)
                data['MLmasses'] = MLmasses
                data['MLmask'] = MLmask
            else:
                MLmasses, MLmask = data['MLmasses'], data['MLmask']

            if 'CCmasses' not in data:
                CCmasses, CCmask = readCCSummary_diffR(
                    '/u/ki/dapple/ki06/bootstrap_2012-05-17', clusters,
                    np.arange(0, 200), diffR, redshifts)
                data['CCmasses'] = CCmasses
                data['CCmask'] = CCmask
            else:
                CCmasses, CCmask = data['CCmasses'], data['CCmask']

        MLreduced = {}
        CCreduced = {}
        for key in MLmasses.keys():
            totalmask = np.logical_and(MLmask[key], CCmask[key])
            MLreduced[key] = MLmasses[key][totalmask]
            CCreduced[key] = CCmasses[key][totalmask]

        data['MLreduced'] = MLreduced
        data['CCreduced'] = CCreduced

    else:
        MLreduced, CCreduced = data['MLreduced'], data['CCreduced']

    grid, means, scatters = isg.intrinsicScatter(
        MLreduced,
        CCreduced,
        means=np.arange(0.5, 1.5, 0.002),
        scatters=np.arange(0.02, 0.2, 0.005))

    data['grid'] = grid
    data['means'] = means
    data['scatters'] = scatters

    figs = []

    print
    print
    print '-----'
    print 'var\tmode\t68%% +\t-\t95%% +\t-'
    print '-----'

    if 'meandist' not in data:

        means = data['means']
        scatters = data['scatters']

        mode, (r68, r95) = isg.getdist_1d_hist(means[0],
                                               means[1],
                                               levels=[0.68, 0.95])
        data['meandist'] = (mode, r68, r95)

        mode, (r68, r95) = isg.getdist_1d_hist(scatters[0],
                                               scatters[1],
                                               levels=[0.68, 0.95])
        data['scatterdist'] = (mode, r68, r95)

    for varname in 'mean scatter'.split():

        mode, r68, r95 = data['%sdist' % varname]

        print mode, r68, r95

        print '%s\t%2.4f\t+%2.4f\t-%2.4f\t+%2.4f\t-%2.4f' % (
            varname, mode, r68[0][1] - mode, mode - r68[0][0],
            r95[0][1] - mode, mode - r95[0][0])

        x, prob = data['%ss' % varname]
        fig = isgp.plotdist_1d_hist(x, prob, mode, [r68[0], r95[0]])
        ax = fig.axes[0]
        ax.set_title(varname)

        figs.append(fig)
        fig.show()

    return figs, data
Example #15
0
#!/usr/bin/env python

import ldac, dappleutils, sys, os, pyfits

filteredIDs = dappleutils.readtxtfile(sys.argv[1])

filteredIDcat = ldac.LDACCat(
    pyfits.new_table(
        pyfits.ColDefs(
            [pyfits.Column(name='SeqNr', format='J', array=filteredIDs[:,
                                                                       0])])))

fullCat = ldac.openObjectFile(sys.argv[2])

filteredCat = dappleutils.matchById(filteredIDcat, fullCat)

base, ext = os.path.splitext(sys.argv[2])

outputname = '%s.filtered%s' % (base, ext)

filteredCat.saveas(outputname, clobber=True)
Example #16
0
#!/usr/bin/env python

import sys, glob, pyfits, os.path
from numpy import *
import scipy.interpolate.interpolate as interp
from dappleutils import readtxtfile
from optparse import OptionParser

c = 2.99792458e18  #Angstroms/s

spectrafiles = glob.glob('pickles/*.dat')
spectra = [readtxtfile(s)[:, :2] for s in spectrafiles]
nspectra = len(spectra)


def applyFilter(filterfile):

    filter = readtxtfile(filterfile)[:, :2]
    step = filter[1, 0] - filter[0, 0]

    #convert photon response filters to flux response filters
    filterSpline = interp.interp1d(filter[:, 0],
                                   filter[:, 1],
                                   bounds_error=False,
                                   fill_value=0.)

    spec_mags = []
    for spec in spectra:
        specStep = spec[1, 0] - spec[0, 0]
        resampFilter = filterSpline(spec[:, 0])
Example #17
0
def MLPointEstScript(data=None):

    if data is None:

        worklist = readtxtfile('simclusterlist')
        clusters = [x[0] for x in worklist]
        redshifts = cm.readClusterRedshifts()
        properredshifts = np.array([redshifts[x] for x in clusters])

        subdirs = ['contam0p10/BVRIZ']

        #        MLfracbias = ss.processFracBiasData('/u/ki/dapple/nfs12/cosmos/simulations/publication/highsn/cluster3',
        #                                            subdirs, clusters, redshifts)[0]
        #
        #
        #        Apointmass, Apointgrid, scale_radii = ss.readPointMasses('/u/ki/dapple/nfs12/cosmos/simulations/publication/highsn/cluster3', 'contam0p10/newman/APER', clusters)
        Bpointmass, Bpointgrid, scale_radii = ss.readPointMasses(
            '/u/ki/dapple/nfs12/cosmos/simulations/clusters_2012-05-17',
            'contam0p10/newman/BVRIZ', clusters)

        truemasses = [
            nfwutils.massInsideR(scale_radii[x], 4., redshifts[x], 1.5)
            for x in clusters
        ]

        #        Apointfracbias = ss.calcFracBias(Apointgrid, truemasses)
        Bpointfracbias = ss.calcFracBias(Bpointgrid, truemasses)

        data = [None, Bpointfracbias, properredshifts]

    else:

        #        Apointfracbias = data[0]
        Bpointfracbias = data[1]
        properredshifts = data[2]

    fig = pylab.figure()

    try:

        ax = fig.add_axes([0.15, 0.12, 0.95 - 0.15, 0.95 - 0.12])

        ax.axhline(0.0, c='k', linewidth=1.25)

        #        Apointmean, Apointerr = ss.bootstrapMean(Apointfracbias)
        Bpointmean, Bpointerr = ss.bootstrapMean(Bpointfracbias)

        ax.errorbar(properredshifts,
                    Bpointmean,
                    Bpointerr,
                    fmt='bo',
                    label=r'$BVr^+i^+z^+$')
        #        ax.errorbar(properredshifts+0.0025, Apointmean, Apointerr, fmt='rs', label=r'$uBVr^+i^+z^+$')

        ax.text(0.166, 0.135, r'$BVr^+i^+z^+$ Photo-Z Point Est', fontsize=16)

        ax.set_xlim(0.16, 0.72)
        ax.set_ylim(-0.05, 0.15)

        ax.set_xlabel('Cluster Redshift', fontsize=16)
        ax.set_ylabel(r'Fractional Mass Bias within 1.5 Mpc')

        #        ax.legend(loc='lower right', numpoints = 1, ncol=2)

        fig.savefig('publication/clustersims_pointest_compare.eps')

    finally:

        return fig, data
Example #18
0
def precisionZ(data = None):
    #mass precision as a function of redshift

    if data is None:
        data = {}

    if 'fracerrs' not in data:

        items = [tuple(x) for x in readtxtfile('worklist')]


        allmasses = cm.readDougMasses('/u/ki/dapple/subaru/doug/publication/baseline_2012-05-17')

        redshifts = cm.readClusterRedshifts()

        clusters = [x[0] for x in items]

        properz = np.array([redshifts[x] for x in clusters])

        masses, errs = cm.constructMassArray(allmasses, items)


        fracerrs = errs / masses

        aveerrs = np.mean(fracerrs, axis=0)

        data['aveerrs'] = aveerrs
        data['properz'] = properz

        ccitems = [tuple(x) for x in readtxtfile('referenceset')]
        
        ccmasses = cm.readAnjaMasses()
        clusters = [x[0] for x in ccitems]
        ccproperz = np.array([redshifts[x] for x in clusters])
        data['ccproperz'] = ccproperz
        
        masses, errs = cm.constructMassArray(ccmasses, ccitems)
        
        fracerrs = errs/masses
        
        ccaveerrs = np.mean(fracerrs, axis=0)

        data['ccaveerrs'] = ccaveerrs
                   

    else:

        aveerrs = data['aveaerrs']
        properz = data['properz']
        
        ccaveerrs = data['ccaveerrs']
        ccproperz = data['ccproperz']

    
    fig = pylab.figure()
    ax = fig.add_axes([0.12, 0.12, 0.95-0.12, 0.95-0.12])
    ax.plot(ccproperz, ccaveerrs, 'bo', label = 'Color-Cut', mfc = 'None', mew = 1.0, mec='b')
    ax.plot(properz, aveerrs, 'rD', label = 'P($z$)')
    ax.set_xlabel('Cluster Redshift')
    ax.set_ylabel('Fractional Statistical Uncertainty M(r$<$1.5Mpc)')
    ax.set_xlim([0.14, 0.72])
    ax.legend(loc='upper left', numpoints = 1)

    fig.savefig('publication/aveerr_redshift.eps')
    

    return fig, data
Example #19
0
def lostgals(data = None):

    if data is None:

        data = {}

    items = readtxtfile('worklist')
    del items[-1]
    clusters = [x[0] for x in items]


    if 'properz' not in data:




        redshifts = cm.readClusterRedshifts()
        properz = np.array([redshifts[x] for x in clusters])
        data['properz'] = properz
    else:
        properz = data['properz']

    if 'properbase' not in data:

        basecuts = {}
        for cluster, filter, image in items:

            controller = driver.makeController()

            options, args = controller.modelbuilder.createOptions()
            options, args = controller.filehandler.createOptions(options = options, args = args,
                                                     workdir = '/u/ki/dapple/ki06/catalog_backup_2012-02-08',
                                                     incatalog = '/u/ki/dapple/ki06/catalog_backup_2012-02-08/%s.%s.%s.lensingbase.cat' % (cluster, filter, image),
                                                     cluster = cluster, filter = filter, image = image,
                                                     redseqcat = '/u/ki/dapple/ki06/catalog_backup_2012-02-08/%s.%s.%s.redsequence.cat' % (cluster, filter, image), shapecut = True)

            controller.load(options, args)

            basecuts[cluster] = controller.ngalaxies
        data['basecuts'] = basecuts
        properbase = np.array([basecuts[x[0]] for x in items])
        data['properbase'] = properbase

    else:

        properbase = data['properbase']

    if 'properloose' not in data:

        loosecuts = {}
        for cluster, filter, image in items:

            controller = driver.makeController()

            options, args = controller.modelbuilder.createOptions(deltaz95high = 9999, zbhigh = 9999)
            options, args = controller.filehandler.createOptions(options = options, args = args,
                                                     workdir = '/u/ki/dapple/ki06/catalog_backup_2012-02-08',
                                                     incatalog = '/u/ki/dapple/ki06/catalog_backup_2012-02-08/%s.%s.%s.lensingbase.cat' % (cluster, filter, image),
                                                     cluster = cluster, filter = filter, image = image,
                                                     redseqcat = '/u/ki/dapple/ki06/catalog_backup_2012-02-08/%s.%s.%s.redsequence.cat' % (cluster, filter, image), shapecut = True)

            controller.load(options, args)

            loosecuts[cluster] = controller.ngalaxies
        data['loosecuts'] = loosecuts
        properloose = np.array([loosecuts[x[0]] for x in items])
        data['properloose'] = properloose

    else:
        
        properloose = data['properloose']

    if 'ratio' not in data:

        ratio = 1 - (properbase.astype('float64') / properloose)
        data['ratio'] = ratio

    else:

        ratio = data['ratio']

    fig = pylab.figure()
    ax = fig.add_axes([0.12, 0.12, 0.95 - 0.12, 0.95 - 0.12])
    ax.plot(properz, ratio, 'bo')
    ax.set_xlim([0.16, 0.72])

    ax.set_xlabel('Cluster Redshift')
    ax.set_ylabel('Fraction of Catalog Discarded')

    return fig, data
Example #20
0
def stackClusters(data = None, cosmology = nfwutils.std_cosmology, outdir = '/u/ki/dapple/subaru/doug/publication/baseline_2012-05-17'):

    workdir = '/u/ki/dapple/ki06/catalog_backup_2012-05-17'

    if data is None:
        data = {}

    if 'items' not in data:
        data['items'] =  readtxtfile('worklist')
    items = data['items']


    if 'clusters' not in data:
        clusters = {}
        for cluster, filter, image in items:
            key = (cluster, filter, image)
            clusters[key] = {}
            controller = driver.makeController()
            options, args = controller.modelbuilder.createOptions(zcut= None)
            options, args = controller.filehandler.createOptions(options = options, args = args, 
                             workdir = workdir, 
                             incatalog = '%s/%s.%s.%s.lensingbase.cat' % (workdir, cluster, filter, image),
                             cluster = cluster, filter = filter, image = image,
                             shapecut = True, 
                             redseqcat = '%s/%s.%s.%s.redsequence.cat' % (workdir, cluster, filter, image))

            controller.load(options, args)

            stats = cPickle.load(open('%s/%s.%s.%s.out.mass15mpc.mass.summary.pkl' % (outdir, cluster, filter, image)))
            mass = stats['quantiles'][50]

            rs = nfwutils.RsMassInsideR(mass, 4.0, controller.zcluster, 1.5)
            
            scaledZ, estimators = sr.scaleShear(controller, rs, 4.0, cosmology = cosmology)

            clusters[key]['scaledZ'] = scaledZ
            clusters[key]['estimators'] = estimators
            clusters[key]['pdz'] = controller.pdz

        data['clusters'] = clusters

    else:
        clusters = data['clusters']



    maxScaledZ = -1
    for key in clusters.keys():
        localMax = np.max(clusters[key]['scaledZ'])
        maxScaledZ = max(localMax, maxScaledZ)


    if 'zbins' not in data:
        zbins = np.unique(np.hstack([np.linspace(0., 1., 3.), np.logspace(0.1, np.log10(maxScaledZ), 12.)]))
        data['zbins'] = np.hstack([zbins[:-3], zbins[-1]])
    zbins = data['zbins']
    bincenters = (zbins[1:] + zbins[:-1])/2.

    for key in clusters.keys():

        bins, weights, aveshear = sr.calcZBinWeights(clusters[key]['scaledZ'], clusters[key]['estimators'], zbins)

        clusters[key]['weights'] = weights
        clusters[key]['shears'] = aveshear


    
    if 'maxlike' not in data:
        allweights = np.vstack([clusters[tuple(item)]['weights'] for item in items])
        allshears = np.vstack([clusters[tuple(item)]['shears'] for item in items])



        pointest = sr.calcBinDistro(zbins, allweights, allshears)


        data['pointest'] = pointest


        maxlike, sig1, sig2, maxlike_ests = sr.bootstrapBinDistro(zbins, allweights, allshears)

        data['maxlike'] = maxlike
        data['sig1'] = sig1
        data['sig2'] = sig2
        data['maxlike_ests'] = maxlike_ests

    else:
        
        maxlike = data['maxlike']
        sig1 = data['sig1']
        sig2 = data['sig2']
        


    fig = pylab.figure()
    ax = fig.add_axes([0.12, 0.12, 0.95 - 0.12, 0.95 - 0.12])

    ax.errorbar(bincenters, maxlike, sig2, fmt='bo')
    ax.errorbar(bincenters, maxlike, sig1, fmt='ro')

    xplot = np.arange(0.01, np.max(zbins), 0.01)
    ax.plot(xplot, sr.shearScaling(xplot), 'k-', linewidth=1.5)

    ax.set_xlabel('$x = \omega_s/\omega_l$')
    ax.set_ylabel('$\gamma(z)/\gamma(\infty)$')
    ax.set_title('All Cluster Stack -- Maxlike Point Est')
    fig.savefig('notes/shearratio/stack_maxlike.pdf')

    return fig, data
Example #21
0
#!/usr/bin/env python
####################

import sys
import numpy as np
import compare_masses as cm, intrinsicscatter as isc
import scatter_sims as ss, nfwutils
from dappleutils import readtxtfile

workdir = sys.argv[1]
subdir = sys.argv[2]
outfile = sys.argv[3]
nsamples = int(sys.argv[4])

items = readtxtfile('simclusterlist')
clusters = clusters = [ x[0] for x in items]

redshifts = cm.readClusterRedshifts()
properredshifts = np.array([redshifts[x] for x in clusters])


masses, errs, massgrid, scale_radii = ss.readMLMasses(workdir, subdir, clusters)
truemasses = {}
for cluster in clusters:
    truemasses[cluster] = nfwutils.massInsideR(scale_radii[cluster], 4., redshifts[cluster], 1.5)

x = np.hstack([len(masses[c])*[truemasses[c]] for c in clusters])
y = np.hstack([masses[c] for c in clusters])
yerr = np.hstack([errs[c] for c in clusters])

Example #22
0
def magSplit_script(category, data = None):

    figs = []

    if data is None:

        data = {}

    if 'items' not in data:

        data['items'] = readtxtfile('worklist')

#    try:

    loc1 = '%s/%s/low' % (trialsdir, category)
    loc2 = '%s/%s/high' % (trialsdir, category)

    if category not in data:

        
        masses = loadData(loc1, loc2,
                           data['items'])

        data[category] = {}
        data[category]['masses'] = masses

    curdata = data[category]


    if 'grid' not in curdata:

        grid, means, scatters = isg.intrinsicScatter(curdata['masses'][0], curdata['masses'][1], means = np.arange(0.6, 1.4, 0.002), scatters = np.arange(0.02, 0.2, 0.01))

        curdata['grid'] = grid
        curdata['means'] = means
        curdata['scatters'] = scatters

    else:

        grid = curdata['grid']
        means = curdata['means']
        scatters = curdata['scatters']

    print
    print
    print category
    print '-----'
    print 'var\tmode\t68%% +\t-\t95%% +\t-'
    print '-----'


    if 'meandist' not in curdata:

        mode, (r68, r95) = isg.getdist_1d_hist(means[0], means[1], levels = [0.68, 0.95])
        curdata['meandist'] = (mode, r68, r95)

        mode, (r68, r95) = isg.getdist_1d_hist(scatters[0], scatters[1], levels = [0.68, 0.95])
        curdata['scatterdist'] = (mode, r68, r95)



    for varname in 'mean scatter'.split():

        mode, r68, r95 = curdata['%sdist' % varname]

        print mode, r68, r95

        print '%s\t%2.4f\t+%2.4f\t-%2.4f\t+%2.4f\t-%2.4f' % (varname, mode, 
                                                             r68[0][1] - mode, mode - r68[0][0],
                                                             r95[0][1] - mode, mode - r95[0][0])

        x, prob = curdata['%ss' % varname]
        fig = isgp.plotdist_1d_hist(x, prob, mode, [r68[0], r95[0]])
        ax = fig.axes[0]
        ax.set_title(varname)

        figs.append(fig)
        fig.show()

    xlabel, ylabel = label_conv[category]
    fig, xmasses, ymasses = cm.publicationPlotMassMass(loc1, loc2, xlabel, ylabel)
    ax = fig.axes[0]

    mode, r68, r95 = curdata['meandist']

#    ax.axhline(mode, c='r', ls='--', linewidth=1.5)

    ax.fill_between([0.16, 0.72], r95[0][0], r95[0][1], facecolor=(1, 0.642, 0.610), zorder=-1)
    ax.fill_between([0.16, 0.72], r68[0][0], r68[0][1], facecolor='#CC0000', zorder=-1)
    ax.set_xlim(0.16, 0.72)
    ax.set_ylim(0.1, 10)

    fig.show()
    fig.savefig(filename_conv[category])
    figs.append(fig)
    

#    finally:
    
    return figs, data
Example #23
0
def PointEstPzScript(data=None):

    if data is None:

        worklist = readtxtfile('simclusterlist')
        clusters = [x[0] for x in worklist]
        redshifts = cm.readClusterRedshifts()
        properredshifts = np.array([redshifts[x] for x in clusters])

        subdirs = ['contam0p10/BVRIZ']

        MLfracbias = ss.processFracBiasData(
            '/u/ki/dapple/nfs12/cosmos/simulations/clusters_2012-05-17-highdensity',
            subdirs, clusters, redshifts)[0]

        Bpointmass, Bpointgrid, scale_radii = ss.readPointMasses(
            '/u/ki/dapple/nfs12/cosmos/simulations/clusters_2012-05-17',
            'contam0p10/newman/BVRIZ', clusters)

        truemasses = [
            nfwutils.massInsideR(scale_radii[x], 4., redshifts[x], 1.5)
            for x in clusters
        ]

        Bpointfracbias = ss.calcFracBias(Bpointgrid, truemasses)

        data = [MLfracbias, Bpointfracbias, properredshifts]

    else:

        MLfracbias = data[0]
        Bpointfracbias = data[1]
        properredshifts = data[2]

    fig = pylab.figure()

    try:

        ax = fig.add_axes([0.15, 0.12, 0.96 - 0.15, 0.95 - 0.12])

        ax.axhline(0.0, c='k', linewidth=1.25)

        Apointmean, Apointerr = ss.bootstrapMean(MLfracbias)
        Bpointmean, Bpointerr = ss.bootstrapMean(Bpointfracbias)

        ax.errorbar(properredshifts - 0.0025,
                    Bpointmean,
                    Bpointerr,
                    fmt='cs',
                    label=r'Point Estimators',
                    color='#BFBFD4')
        ax.errorbar(properredshifts + 0.0025,
                    Apointmean,
                    Apointerr,
                    fmt='ro',
                    label=r'P(z) Method')

        ax.set_xlim(0.16, 0.72)
        ax.set_ylim(-0.08, 0.19)

        ax.set_xlabel('Cluster Redshift')
        ax.set_ylabel(r'Fractional Mass Bias within 1.5 Mpc')

        #        ax.text(0.2, 0.12, r'$BVr^{+}i^{+}z^{+}$ Photo-$z$ Point Est', fontsize=16)

        ax.legend(loc='upper left', numpoints=1, ncol=1)

        fig.savefig('publication/clustersims_pointest_pz_compare.eps')

    finally:

        return fig, data
Example #24
0
def stackClusters(data = None, doPlot = True, cosmology = nfwutils.std_cosmology):

    workdir = '/u/ki/dapple/ki06/catalog_backup_2012-02-08'
    outdir = '/u/ki/dapple/subaru/doug/publication/baseline_2012-02-08'

    if data is None:
        data = {}

    if 'items' not in data:
        data['items'] =  readtxtfile('worklist')
    items = data['items']

    if 'zbins' not in data:
        data['zbins'] = np.unique(np.hstack([np.linspace(0., 1., 3.), np.linspace(1., 5., 10.), np.linspace(5., 10., 5.)]))
    zbins = data['zbins']
    bincenters = (zbins[1:] + zbins[:-1])/2.

    if 'clusters' not in data:
        clusters = {}
        for cluster, filter, image in items:
            key = (cluster, filter, image)
            clusters[key] = {}
            controller = driver.makeController()
            options, args = controller.modelbuilder.createOptions()
            options, args = controller.filehandler.createOptions(options = options, args = args, 
                             workdir = workdir, 
                             incatalog = '%s/%s.%s.%s.lensingbase.cat' % (workdir, cluster, filter, image),
                             cluster = cluster, filter = filter, image = image,
                             shapecut = True, 
                             redseqcat = '%s/%s.%s.%s.redsequence.cat' % (workdir, cluster, filter, image))

            controller.load(options, args)

            stats = cPickle.load(open('%s/%s.%s.%s.out.mass15mpc.mass.summary.pkl' % (outdir, cluster, filter, image)))
            mass = stats['quantiles'][50]

            rs = nfwutils.RsMassInsideR(mass, 4.0, controller.zcluster, 1.5)
            
            scaledZ, estimators = sr.scaleShear(controller, rs, 4.0, cosmology = cosmology)

            bins, weights, aveshear = sr.calcZBinWeights(scaledZ, controller.pdz, estimators, zbins)

            clusters[key]['weights'] = weights
            clusters[key]['shears'] = aveshear

        data['clusters'] = clusters

    else:
        clusters = data['clusters']

    
    if 'median' not in data:
        allweights = np.vstack([clusters[tuple(item)]['weights'] for item in items])
        allshears = np.vstack([clusters[tuple(item)]['shears'] for item in items])

        median, sig1, sig2 = sr.calcBinDistro(zbins, allweights, allshears)

        data['median'] = median
        data['sig1'] = sig1
        data['sig2'] = sig2

    else:
        
        median = data['median']
        sig1 = data['sig1']
        sig2 = data['sig2']

    
    if doPlot:

        fig = pylab.figure()
        ax = fig.add_axes([0.12, 0.12, 0.95 - 0.12, 0.95 - 0.12])

        ax.errorbar(bincenters, median, sig2, fmt='bo')
        ax.errorbar(bincenters, median, sig1, fmt='ro')

        xplot = np.arange(0., np.max(zbins), 0.01)
        ax.plot(xplot, sr.shearScaling(xplot), 'k-', linewidth=1.5)

        ax.set_xlabel('Scaled Redshift')
        ax.set_ylabel('Lensing Power')
        ax.set_title('All Cluster Stack -- Restricted to Fit Data')
        fig.savefig('notes/shearratio/stack_restricted_manypoints.pdf')

        return fig, data

    return data
Example #25
0
def fitAltOffsetScript(data=None):

    if data is None:
        data = {}

    worklist = readtxtfile('worklist')
    clusters = [x[0] for x in worklist]

    workdir = '/u/ki/dapple/nfs12/cosmos/simulations/clusters_2012-05-17-highdensity/'
    subdirs = ['%sBVRIZ' % x for x in ['', 'contam0p10/', 'contam0p20/']]
    concentration = 4.
    mradius = 1.5
    redshifts = cm.readClusterRedshifts()

    figs = []

    for subdir in subdirs:

        if subdir not in data:

            data[subdir] = {}

        curdata = data[subdir]

        if 'masses' not in curdata:

            curdata['masses'], errs, massgrid, curdata[
                'scale_radii'] = ss.readMLMasses(workdir, subdir, clusters)

        masses = curdata['masses']
        scale_radii = curdata['scale_radii']

        if 'grid' not in curdata:

            refmasses = {}

            for cluster in clusters:

                refmasses[cluster] = nfwutils.massInsideR(
                    scale_radii[cluster], concentration, redshifts[cluster],
                    mradius) * np.ones_like(masses[cluster])

            curdata['grid'], curdata['means'], curdata[
                'scatters'] = isg.intrinsicScatter(
                    refmasses,
                    masses,
                    means=1. + np.arange(-0.08, 0.08, 0.0001),
                    scatters=np.arange(0.005, 0.05, 0.0025))

            means = curdata['means']
            scatters = curdata['scatters']

            mode, (r68, r95) = isg.getdist_1d_hist(means[0],
                                                   means[1],
                                                   levels=[0.68, 0.95])
            curdata['meandist'] = (mode, r68, r95)

            mode, (r68, r95) = isg.getdist_1d_hist(scatters[0],
                                                   scatters[1],
                                                   levels=[0.68, 0.95])
            curdata['scatterdist'] = (mode, r68, r95)

        for varname in 'mean scatter'.split():

            mode, r68, r95 = curdata['%sdist' % varname]

            print mode, r68, r95

            print '%s\t%2.4f\t+%2.4f\t-%2.4f\t+%2.4f\t-%2.4f' % (
                varname, mode, r68[0][1] - mode, mode - r68[0][0],
                r95[0][1] - mode, mode - r95[0][0])

            x, prob = curdata['%ss' % varname]
            fig = isgp.plotdist_1d_hist(x, prob, mode, [r68[0], r95[0]])
            ax = fig.axes[0]
            ax.set_title('%s %s' % (subdir, varname))

            figs.append(fig)
            fig.show()

    return figs, data
Example #26
0
#!/usr/bin/env python
####################

import sys
import numpy as np
import compare_masses as cm, intrinsicscatter2 as isc2
from dappleutils import readtxtfile
import pymc

outfile = sys.argv[1]
nsamples = int(sys.argv[2])

items = readtxtfile('worklist')
clusters = clusters = [ x[0] for x in items]

mlbootstraps, mlmasks = cm.readMLBootstraps('/u/ki/dapple/ki06/bootstrap_2011-12-14/', items)
ccbootstraps, ccbootmask = cm.readCCSummary('/u/ki/dapple/ki06/bootstrap_2011-12-14/', clusters, 100)

reducedML = {}
reducedCC = {}

for cluster in clusters:
    totalmask = np.logical_and(mlmasks[cluster] == 1, ccbootmask[cluster] == 1)
    reducedML[cluster] = mlbootstraps[cluster][totalmask]
    reducedCC[cluster] = ccbootstraps[cluster][totalmask]

calibmodel = isc2.IntrinsicScatter2(reducedML, reducedCC)
calibMCMC = calibmodel.buildMCMC(outfile)

#calibMCMC.use_step_method(pymc.Metropolis, calibMCMC.m_angle, proposal_sd = 0.1)
#calibMCMC.use_step_method(pymc.Metropolis, calibMCMC.log10_intrinsic_scatter, proposal_sd = 0.8)