Beispiel #1
0
def get_PDZerrDistribution(hdf5file, bpzfile, columns):
    """
    It returns the error distribution based on PDZs.
---
hdf5file = '/Users/albertomolino/doctorado/photo/catalogos/specz/spzPDZs/alhambra.spz.hdf5'
bpzfile  = '/Users/albertomolino/doctorado/photo/catalogos/specz/spzPDZs/alhambra.spz.bpz'
columns  = '/Users/albertomolino/doctorado/photo/catalogos/specz/spzPDZs/alhambra.spz.columns'
    
    """
    ids, zb, zs, mo = U.get_data(bpzfile, (0, 1, 11, 12))
    # ids,zb,zs,mo = U.get_data(bpzfile,(0,1,9,10))
    good = N.greater(abs(mo), 17.) * N.less(abs(mo), 25.)
    # good = N.greater(abs(mo),22.)*N.less(abs(mo),23.)
    ids, zb, zs, mo = U.multicompress(good, (ids, zb, zs, mo))
    ng = len(ids)

    #Readin the PDZs...
    p = h5py.File(hdf5file, mode='r')
    pdzo = p.get('FullProbability')
    pdz = pdzo[good, :, :]
    zz = p.get('redshift')[:]
    dz = zz[2] - zz[1]
    basez2 = N.arange(-0.1, 0.1, dz)
    basez2b = basez2[:-1] + ((basez2[1] - basez2[0]) / 2.)
    nz = len(basez2)
    delta_z_pdzs = N.zeros(nz - 1)

    # Computing the z error distr. function
    # based on peak values.
    delta_z_peaks = (zb - zs) / (1. + zs)
    a1, a2 = N.histogram(delta_z_peaks, basez2)

    for ii in range(ng):
        pdz_mot = U.sum(pdz[ii, :, :], axis=1)
        delta_z_pdzs += U.match_resol(zz - zb[ii], pdz_mot, basez2b)

    plt.figure(12, figsize=(8.5, 10.), dpi=80, facecolor='w', edgecolor='k')
    plt.clf()
    plt.subplot(211)
    plt.plot(basez2b, a1 / float(sum(a1)), 'b-', lw=12, alpha=0.6)
    plt.plot(basez2b,
             delta_z_pdzs / float(sum(delta_z_pdzs)),
             'r-',
             lw=5,
             alpha=0.9)
    plt.grid()
    plt.xlim(-0.1, 0.1)
    plt.ylabel('P(z)', size=20, labelpad=+1)
    plt.legend(['peaks', 'pdfs'], loc='upper left', fontsize=20)
    plt.subplot(212)
    resi = 2
    plt.plot(
        basez2b[::resi],
        abs((a1[::resi] / float(sum(a1))) -
            (delta_z_pdzs[::resi] / float(sum(delta_z_pdzs)))), 'k-')
    plt.grid()
    plt.xlim(-0.1, 0.1)
    plt.xlabel('$\delta_{z}$', size=30)
Beispiel #2
0
def function(z, m, nt):
    """Prior based on the SDSS spectroscopic catalog
    This function defines a prior based only on the 
    magnitude of the objects. It assumes that the type 
    fraction does not depend on redshift
    It assumes a shape p(z)=z**2*exp(-(z/zm)**1.5)
    Returns an array pi[z[:],:6]
    The i-band should be  used as M_0
    """

    global zt_at_1p5
    global zt_at_2
    xm = numpy.arange(12., 18.0)
    ft = numpy.array((0.55, 0.21, 0.21, .01, .01, .01))
    zm0 = numpy.array([0.021, 0.034, 0.056, 0.0845, 0.1155, 0.127]) * (old_div(
        2., 3.))

    if len(ft) != nt:
        print("Wrong number of templates!")
        sys.exit()

    nz = len(z)
    m = numpy.array([m])  #match_resol works with arrays
    m = numpy.clip(m, xm[0], xm[-1])
    zm = match_resol(xm, zm0, m)
    #    zm=zm[0]
    try:
        zt_2.shape
    except NameError:
        t2 = [2.] * nt
        zt_2 = numpy.power.outer(z, t2)
    try:
        zt_1p5.shape
    except NameError:
        t1p5 = [1.5] * nt
        zt_1p5 = numpy.power.outer(z, t1p5)

    zm_3 = numpy.power.outer(zm, 3)
    zm_1p5 = numpy.power.outer(zm, 1.5)
    p_i = 3. / 2. / zm_3 * zt_2[:, :] * numpy.exp(
        -numpy.clip(old_div(zt_1p5[:, :], zm_1p5), 0., 700.))
    norm = numpy.add.reduce(p_i[:nz, :], 0)
    #Get rid of very low probability levels
    p_i[:nz, :] = numpy.where(
        less(old_div(p_i[:nz, :], norm[:]), old_div(1e-5, float(nz))), 0.,
        old_div(p_i[:nz, :], norm[:]))
    norm = numpy.add.reduce(p_i[:nz, :], 0)
    return p_i[:nz, :] / norm[:] * ft[:]
root = '/Users/albertomolino/codigos/bpz-1.99.2/FILTER/'
names = pd.read_table(root + 'SPLUS_September2018/SPLUS_201809.list')
filtros_old = U.get_str(root + 'SPLUS_July2017/SPLUS.list', 0)
filtros_new = U.get_str(root + 'SPLUS_September2018/SPLUS_201809.list', 0)

base = N.arange(3200, 10000, 10)
values = N.zeros(12)
eff_wav = N.zeros(12)
for ii in range(12):
    eff_wav[ii] = B.effective_wavelength(filtros_new[ii])

for ii in range(12):
    x_o, y_o = U.get_data(root + filtros_old[ii], (0, 1))
    x_n, y_n = U.get_data(root + filtros_new[ii], (0, 1))
    y_o_r = U.match_resol(x_o, y_o, base)
    y_n_r = U.match_resol(x_n, y_n, base)
    values[ii] = N.sum(y_o_r - y_n_r) / N.sum(y_o_r)

plt.figure(20)
plt.subplot(211)
for ii in range(12):
    x, y = U.get_data(root + filtros_old[ii], (0, 1))
    plt.fill_between(x * 1., y, 0, alpha=0.4, color='grey', lw=1)
    x, y = U.get_data(root + filtros_new[ii], (0, 1))
    plt.fill_between(x * 1., y, 0, alpha=0.4, color='grey', lw=1)
plt.xlim(3000, 10000)
plt.ylim(0., 0.8)
plt.ylabel('Throughput', size=28, labelpad=8)
plt.xticks(fontsize=20)
plt.yticks(fontsize=20)
Beispiel #4
0
    else:
        plt.title('R=%s' % (m[ss + 3]), size=20)
    if ss in [0, 4]:
        plt.ylabel('Spectral-type', size=20)
    #if ss in [4,5,6,7]:
    plt.xlabel('$z$', size=25, labelpad=-2)
plt.savefig('/Users/albertomolino/Desktop/hdf5plot2.png', dpi=90)

basez = N.arange(0.005, 0.45, 0.03)
plt.figure(23, figsize=(13, 6), dpi=70, facecolor='w', edgecolor='k')
plt.clf()
for ss in range(4):
    plt.subplot(1, 4, ss + 1)
    pepe0 = final_mat[ss, 50:-150, :]
    pepa0 = U.sum(pepe0, axis=1)
    new_pdz0 = U.match_resol(zz[50:-150], pepa0, basez)
    plt.plot(basez, (new_pdz0 / new_pdz0.sum() * 1.), '-ro')
    if ss < 1:
        plt.title('R$\leq$%s' % (m[ss + 3]), size=20)
    else:
        plt.title('R=%s' % (m[ss + 3]), size=20)
    if ss in [0, 4]:
        plt.ylabel('Spectral-type', size=20)
    plt.xlabel('$z$', size=25, labelpad=-2)
    plt.xlim(0.005, 0.35)
plt.savefig('/Users/albertomolino/Desktop/hdf5plot3.png', dpi=90)
"""
ii=0


Beispiel #5
0
def get_PDZerrDistribution_byMagnitudes(hdf5file, bpzfile, columns):
    """
    It returns the error distribution based on PDZs.
---
import alhambrahdf5 as AH
#hdf5file = '/Users/albertomolino/doctorado/photo/catalogos/reduction_v5/GOLD/alhambragold.hdf5'
#bpzfile = '/Users/albertomolino/doctorado/photo/catalogos/reduction_v5/GOLD/alhambragold.bpz'
#columns = '/Users/albertomolino/doctorado/photo/catalogos/reduction_v5/GOLD/alhambragold.columns'
hdf5file = '/Users/albertomolino/doctorado/photo/catalogos/specz/spzPDZs/alhambra.spz.hdf5'
bpzfile  = '/Users/albertomolino/doctorado/photo/catalogos/specz/spzPDZs/alhambra.spz.bpz'
columns  = '/Users/albertomolino/doctorado/photo/catalogos/specz/spzPDZs/alhambra.spz.columns'
basez2b,delta_z_peaks,delta_z_pdzs = AH.get_PDZerrDistribution_byMagnitudes(hdf5file,bpzfile,columns) 
    
    """
    basem = N.arange(18, 26, 2)
    # basem = N.arange(18,25,2)
    nm = len(basem)
    ids, zb, zs, mo = U.get_data(bpzfile, (0, 1, 11, 12))
    # ids,zb,zs,mo = U.get_data(bpzfile,(0,1,9,10))
    #Readin the PDZs...
    p = h5py.File(hdf5file, mode='r')
    pdzo = p.get('FullProbability')
    zz = p.get('redshift')[:]
    dz = zz[2] - zz[1]
    basez2 = N.arange(-0.1, 0.1, dz)
    basez2b = basez2[:-1] + ((basez2[1] - basez2[0]) / 2.)
    nz = len(basez2)

    # Defining the final outputs.
    delta_z_pdzs = N.zeros((nm - 1, nz - 1), float)
    delta_z_peaks = N.zeros((nm - 1, nz - 1), float)

    for ii in range(nm - 1):
        good = N.greater_equal(mo, basem[ii]) * N.less_equal(mo, basem[ii + 1])
        idr, zbr, zsr, mor = U.multicompress(good, (ids, zb, zs, mo))
        ng = len(idr)
        pdz = pdzo[good, :, :]

        # Computing the z error distr. function
        # based on peak values.
        temporal_delta_z_peaks = (zbr - zsr) / (1. + zsr)
        a1, a2 = N.histogram(temporal_delta_z_peaks, basez2)
        delta_z_peaks[ii, :] = a1[:]

        for jj in range(ng):
            pdz_mot = U.sum(pdz[jj, :, :], axis=1)
            delta_z_pdzs[ii, :] += U.match_resol(zz - zbr[jj], pdz_mot,
                                                 basez2b)

    # plt.figure(12, figsize = (8.5,10.),dpi=80, facecolor='w', edgecolor='k')
    # plt.clf()
    # plt.subplot(211)
    # plt.plot(basez2b,a1/float(sum(a1)),'b-',lw=12,alpha=0.6)
    # plt.plot(basez2b,delta_z_pdzs/float(sum(delta_z_pdzs)),'r-',lw=5,alpha=0.9)
    # plt.grid()
    # plt.xlim(-0.1,0.1)
    # plt.ylabel('P(z)',size=20,labelpad=+1)
    # plt.legend(['peaks','pdfs'],loc='upper left',fontsize=20)
    # plt.subplot(212)
    # resi = 2
    # plt.plot(basez2b[::resi],abs((a1[::resi]/float(sum(a1)))-(delta_z_pdzs[::resi]/float(sum(delta_z_pdzs)))),'k-')
    # plt.grid()
    # plt.xlim(-0.1,0.1)
    # plt.xlabel('$\delta_{z}$',size=30)

    return basez2b, delta_z_peaks, delta_z_pdzs
    z2, p2 = U.get_data(root + 'master_R19_PDF.specz.odds09.txt', (0, 3))
    z3, p3 = U.get_data(root + 'master_R21_PDF.specz.odds09.txt', (0, 3))
else:
    z1, p1 = U.get_data(root + 'master_R17_PDF.specz.txt', (0, 3))
    z2, p2 = U.get_data(root + 'master_R19_PDF.specz.txt', (0, 3))
    z3, p3 = U.get_data(root + 'master_R21_PDF.specz.txt', (0, 3))

if odds_cut:
    dz = 0.003
else:
    dz = 0.007

basez_1 = N.arange(0.01, max(zbs_1) + dz, dz)
basez2_1 = basez_1[:-1] + ((basez_1[1] - basez_1[0]) / 2.)
v1, v2, v3 = plt.hist(zbs_1, basez_1, color='blue', alpha=0.3, normed=0)
p1r = U.match_resol(z1, p1, basez2_1)

basez_2 = N.arange(0.01, max(zbs_2) + dz, dz)
basez2_2 = basez_2[:-1] + ((basez_2[1] - basez_2[0]) / 2.)
w1, w2, w3 = plt.hist(zbs_2, basez_2, color='blue', alpha=0.3, normed=0)
p2r = U.match_resol(z2, p2, basez2_2)

basez_3 = N.arange(0.01, max(zbs_3) + dz, dz)
basez2_3 = basez_3[:-1] + ((basez_3[1] - basez_3[0]) / 2.)
q1, q2, q3 = plt.hist(zbs_3, basez_3, color='blue', alpha=0.3, normed=0)
p3r = U.match_resol(z3, p3, basez2_3)

plt.figure(21)
plt.clf()
plt.subplot(131)
plt.loglog(basez2_1, v1, 'r-', lw=5, alpha=0.8)
Beispiel #7
0
        plt.plot(rf_wavel[clean_sample], delta_f[clean_sample], '+', alpha=0.2)
        plt.plot(base_wavel, average_corr, '-ro', lw=3)
        plt.grid()
        plt.ylim(0.5, 1.5)
        plt.xlim(min_wave_corr * 0.9, max_wave_corr * 1.1)
        plt.ylabel('$F_{th}/F_{ob}$', size=20)

    outfilename = final_sed_root_data + sed[
        ss] + 'S82SPLUS_crf_res%iAA.dat' % (new_delta_lbda)
    U.put_data(outfilename, (base_wavel, average_corr),
               '# base_wavel average_corr')

    ## Here it applies the corrections to the original templates
    sed_wavel, sed_flux_orig = U.get_data(root_to_seds + sed[ss], (0, 1))

    corr_ori_wavel = U.match_resol(base_wavel, average_corr, sed_wavel)
    if new_dim > min_ng:
        sed_flux_new = sed_flux_orig / (1. * corr_ori_wavel)
    else:
        sed_flux_new = sed_flux_orig / 1.

    if plots:
        plt.subplot(212)
        pepe = sct.lookcloser(sed_wavel, normal_wavel)
        plt.semilogy(sed_wavel,
                     sed_flux_orig / sed_flux_orig[pepe - 1],
                     '-',
                     linewidth=8.,
                     alpha=0.5,
                     color='grey')
Beispiel #8
0
def reobs(sed,
          m=0.,
          z_0=0.,
          oldfilter='I_LRIS',
          z_new=0.,
          newfilter='V_LRIS',
          cosmology=(0.3, 0.7, .7),
          madau='yes'):
    """Arguments: sed,m,z_0,oldfilter,z_new,newfilter,cosmology
    Takes a galaxy with m at redshift z_0 in oldfilter,
    SED=sed and produces its new magnitude in newfilter at z_new.
    Takes into account cosmological dimming and intergalactic Madau absorption
    The tuple cosmology=(omega,lambda,hubble_constant)
    """
    if sed[-4:] == '.sed': sed = sed[:-4]
    #single_z=type(z_new)==type(z_0)
    single_z = z_new.__class__.__name__[0:3] == z_0.__class__.__name__[0:3]

    if single_z:
        if z_0 == z_new and oldfilter == newfilter: return m
        z_new = numpy.array([z_new])

        #Calculate fnew
    model = '.'.join([sed, newfilter, 'AB'])
    model_path = os.path.join(ab_dir, model)

    #Check whether there are already AB files
    if madau == 'yes':
        if model[:-3] in ab_db:
            zo, f_mod_0 = useful.get_data(model_path, (0, 1))
            fnew = z_new * 0.
            for i in range(len(z_new)):
                fnew[i] = useful.match_resol(zo, f_mod_0, z_new[i])
        else:
            fnew = f_z_sed_AB(sed, newfilter, z_new, 'nu')
    else:
        fnew = f_z_sed(sed, newfilter, z_new, units='nu', madau=madau)

    fnew = numpy.where(
        equal(fnew, 0.), 99.,
        fnew)  # if the new flux is 0, returns 99. (code non-detection)

    #Calculate f_old
    model = '.'.join([sed, oldfilter, 'AB'])
    model_path = os.path.join(ab_dir, model)

    #Check whether there are already AB files
    if madau == 'yes':
        if model[:-3] in ab_db:
            zo, f_mod_0 = useful.get_data(model_path, (0, 1))
            f_old = useful.match_resol(zo, f_mod_0, z_0)
        else:
            f_old = f_z_sed_AB(sed, oldfilter, numpy.array([z_0]), units='nu')
    else:
        f_old = f_z_sed(sed, oldfilter, numpy.array([z_0]), units='nu')

    k = 2.5 * log10((old_div(
        (1. + z_new), fnew)) * (old_div(f_old, (1. + z_0))))

    if single_z and z_0 == z_new[0]:
        m_obs = m + k
        return m_obs[0]

        #Distance modulus
    dist = dist_mod(z_new, cosmology) - dist_mod(z_0, cosmology)
    m_obs = m + dist + k
    if single_z: return m_obs[0]
    else: return m_obs
def get_masterPDZ_pro(hdf5file, mmin, mmax, dm):
    """
    TAKEN DIRECTLY FROM CLASH_TOOLS.py

    This routine derives the master PDZ for
    a sample of magnitudes based on an empirical
    BPZ-HDF5 catalogue.
    ---
    This new version allows the user to change
    the magnitude range from outside.
    ===
import clash_tools as CT
hdf5file = root+'alhambra.spz.hdf5'
mpdz,z,sigz,meanz = CT.get_masterPDZ(hdf5file)

    """
    # Reading data
    p1 = h5py.File(hdf5file, mode='r')
    pdz1 = p1.get('FullProbability')
    zz1 = p1.get('redshift')[:]
    tt1 = p1.get('type')[:]
    mo1 = p1.get('m_0')[:]

    # Defining resolution and other variables.
    basem = N.arange(mmin, mmax + dm, dm)
    nm = len(basem)
    res = 2
    zz1r = zz1[::res]
    dz2 = (zz1r.max() - zz1r.min())
    basez = N.linspace(-dz2, dz2, len(zz1r) * 2)
    basez2 = basez + ((basez[1] - basez[0]) / 2.)
    masterpdz = N.zeros((nm, len(basez)), float)
    sigma_pdz = N.zeros(nm)
    meanz = N.zeros(nm)

    # Global P(z)
    weirdpeaks = []
    print 'Number of magnitude-bins: ', nm
    for jj in range(nm):
        # Selecting galaxies within that magnitude bin.
        if jj == 0:
            good = N.less_equal(mo1, basem[jj])
            pdz1r = pdz1[good, :, :]
        elif jj == nm - 1:
            good = N.greater_equal(mo1, basem[jj - 1])
            good *= N.less_equal(mo1, basem[jj])
            pdz1r = pdz1[good, :, :]
        else:
            good = N.greater_equal(mo1, basem[jj - 1])
            good *= N.less_equal(mo1, basem[jj])
            pdz1r = pdz1[good, :, :]

        ng1 = N.shape(pdz1r)[0]
        peaks = []
        print '%i galaxies in magnitude-bin %i ' % (ng1, jj + 1)
        mo1r = mo1[good]
        for ii in range(ng1):
            # print 'galaxy number %i out of %i '%(ii+1,ng1)
            pdz_ind = N.sum(pdz1r[ii, :, :], axis=1)
            pdz_ind_r = pdz_ind[::res]
            peak = pdz_ind_r.max()
            pos = N.where(pdz_ind_r == peak)[0][0]
            zpeak = zz1r[pos]
            # new_zmin = len(zz1r)-zpeak
            new_zmin = len(zz1r) - pos - 1  # len(zz1r[0:pos])
            min_val = min(pdz_ind_r)
            max_val = max(pdz_ind_r)
            if abs(min_val - max_val) < 1.0e-8: pdz_ind_r[:] = zz1r * 0
            if zpeak < 0.0005: pdz_ind_r[:] = zz1r * 0
            # try:
            if zpeak > 0.0005:
                masterpdz[jj, new_zmin:new_zmin + len(zz1r)] += pdz_ind_r[:]
            # except: weirdpeaks.append(zpeak)
            if zpeak > 0.0005:
                peaks.append(zpeak)

            # plt.figure(200)
            # plt.clf()
            # plt.plot(zz1r,pdz_ind_r,'k-')
            # plt.xlim(0.,0.2)
            # plt.grid()
            # print 'AB,dz/1+z,dz: %.2f,%.3f,%.2f:'%(mo1r[ii],(zpeak-0.044)/1.044,zpeak-0.044)
            # pausa = raw_input('paused')

        masterpdz[jj, :] /= float(masterpdz[jj, :].sum())
        cumasterpdz = U.add.accumulate(masterpdz[jj, :])
        cumasterpdz /= cumasterpdz.max()
        zmin_err_e = U.match_resol(cumasterpdz, basez2, 0.17)
        zmax_err_e = U.match_resol(cumasterpdz, basez2, 0.83)
        sigma_pdz[jj] = (zmax_err_e - zmin_err_e)
        peak_position = N.where(
            masterpdz[jj, :] == masterpdz[jj, :].max())[0][0]
        # print 'peak_position',peak_position
        # print 'zpeak=',zz1r[peak_position]
        meanz[jj] = U.mean_robust(N.array(peaks))
        # U.std_mad(N.array(peaks))

    return masterpdz, basez2, sigma_pdz, meanz
def get_PDZerrDistribution_byTemplates(hdf5file, bpzfile, m_max):
    """
    It returns the error distribution based on PDZs.
---
import splus_s82_hdf5_tools as to
root = '/Users/albertomolino/Postdoc/T80S_Pipeline/Commisioning/'
root += 'S82/Dec2017/splus_cats_NGSL/'
hdf5list = root+'hdf5.list'
bpzlist = root+'bpz/master.STRIPE82_Photometry.m21.bpz.list'
hdf5_files = U.get_str(hdf5list,0)
n_hdf5 = len(hdf5_files)
bpz_files  = U.get_str(bpzlist,0)
n_bpz = len(bpz_files)
for ii in range(n_bpz):
    name = os.path.basename(hdf5_files[ii])
    print name
    try: z,dp,df = to.get_PDZerrDistribution_byTemplates(hdf5_files[ii],bpz_files[ii],19)
    except: print 'Impossible to run on ',name

    """

    plots = 1
    # starting plots if necessary
    if plots:
        plt.figure(12,
                   figsize=(8.5, 10.),
                   dpi=80,
                   facecolor='w',
                   edgecolor='k')

    try:
        ids, zb, zs, mo, tb, odd = U.get_data(bpzfile, (0, 1, 11, 12, 4, 5))
    except:
        ids, zb, zs, mo, tb, odd = U.get_data(bpzfile, (0, 1, 9, 10, 4, 5))
    good = N.less_equal(mo, m_max)
    ids, zb, zs, mo, tb, odd = U.multicompress(good,
                                               (ids, zb, zs, mo, tb, odd))
    ng = len(ids)

    #Readin the PDZs...
    p = h5py.File(hdf5file, mode='r')
    #pdzo = p.get('FullProbability')
    pdz = p.get('Likelihood')
    pdz = pdz[good, :, :]
    zz = p.get('redshift')[:]
    dz = (zz[2] - zz[1]) * 100.
    basez2 = N.arange(-0.2, 0.2, dz)
    basez2b = basez2[:-1] + ((basez2[1] - basez2[0]) / 2.)
    nz = len(basez2)
    res = 1

    # Computing the z error distr. function
    # based on peak values.
    delta_z_peaks = (zb - zs) / (1. + zs)
    a1, a2 = N.histogram(delta_z_peaks, basez2)

    delta_z_pdzs = N.zeros(nz - 1)
    for ii in range(ng):
        pdz_mot = U.sum(pdz[ii, :, :], axis=1)
        pdz_mot_peak = pdz_mot / float(max(pdz_mot))
        # To get rid of long tails in PDFs with low probabilities.
        pdz_mot_peak = N.where(pdz_mot_peak < 1.0e-4, 0., pdz_mot_peak)
        pdz_mot_norm = pdz_mot_peak / float(sum(pdz_mot_peak))
        pdz_mot_norm = N.where(pdz_mot_norm < 0., 0., pdz_mot_norm)
        #pdz_mot_norm  = pdz_mot/float(sum(pdz_mot))
        pdz_mot_norm_resample = U.match_resol(zz - zs[ii], pdz_mot_norm,
                                              basez2b)
        pdz_mot_norm_resample = N.where(pdz_mot_norm_resample < 0., 0.,
                                        pdz_mot_norm_resample)
        delta_z_pdzs += pdz_mot_norm_resample[:]
        """
        if plots:
           plt.clf()
           plt.subplot(121)
           peak_zb_pos = N.argmax(pdz_mot_norm[::res])
           print zz[peak_zb_pos]
           plt.plot(zz[::res]-zs[ii],pdz_mot_norm[::res],'-',lw=5,alpha=0.6)
           #plt.plot(zz[::res]-zz[peak_zb_pos],pdz_mot_norm[::res],'-',lw=5,alpha=0.6)
           plt.grid()
           plt.xlim(-0.2,0.2)
           #plt.ylim(0.001,0.1)
           plt.xlabel('$\delta_{z}$',size=30)
           plt.ylabel('P(z)',size=20,labelpad=+1)
           plt.legend(['R=%.2f''\n''T=%.1f''\n''O=%.1f'%(mo[ii],tb[ii],odd[ii])],loc='upper right')
           plt.title('zb = %.2f, zs = %.2f, dz/1+z = %.2f'%(zb[ii],zs[ii],delta_z_peaks[ii]),size=20)
           plt.subplot(122)
           plt.plot(basez2b,delta_z_pdzs,'k-',lw=5)
           plt.grid()
           plt.xlim(-0.2,0.2)
           #plt.ylim(0.001,0.1)
           plt.xlabel('$\delta_{z}$',size=30)
           plt.ylabel('P(z)',size=20,labelpad=+1)
           pausa = raw_input('press a bottom to continue')
        """

    # New variables to handle data easily.
    # It scales the normalized PDFs by the ng!
    norm_dz_peaks = a1 / float(sum(a1))
    norm_dz_pdfs = delta_z_pdzs / float(sum(delta_z_pdzs))

    if plots:
        plt.figure(11,
                   figsize=(8.5, 10.),
                   dpi=80,
                   facecolor='w',
                   edgecolor='k')
        plt.clf()
        #plt.subplot(212)
        plt.plot(basez2b, norm_dz_peaks, 'b-', lw=8, alpha=0.6)
        plt.plot(basez2b, norm_dz_pdfs, 'r-', lw=5, alpha=0.9)
        plt.grid()
        plt.xlim(-0.2, 0.2)
        plt.ylabel('P(z)', size=20, labelpad=+1)
        plt.legend(['peaks', 'pdfs'], loc='upper left', fontsize=20)
        plt.xlabel('$\delta_{z}$', size=30)
        plot_filename = hdf5file[:-4] + 'deltaz.mmax%.2fAB.png' % (m_max)
        plt.savefig(plot_filename, dpi=80)

    # Saving data into a file.
    output_filename = hdf5file[:-4] + 'deltaz.mmax%.2fAB.mat' % (m_max)
    U.put_data(output_filename, (basez2b, norm_dz_peaks, norm_dz_pdfs),
               'z dz_peak dz_PDFs')

    return basez2b, norm_dz_peaks, norm_dz_pdfs
Beispiel #11
0
def function(z, m, nt):
    """HDFN prior for the main six types of Benitez 2000
    Returns an array pi[z[:],:6]
    The input magnitude is F814W AB
    """

    if nt != 6:
        print("Wrong number of template spectra!")
        sys.exit()

    global zt_at_a
    global zt_at_1p5
    global zt_at_2

    nz = len(z)
    momin_hdf = 20.

    if m <= 20.:
        xm = numpy.arange(12., 18.0)
        ft = numpy.array((0.55, 0.21, 0.21, .01, .01, .01))
        zm0 = numpy.array([0.021, 0.034, 0.056, 0.0845, 0.1155, 0.127]) * (
            old_div(2., 3.))

        if len(ft) != nt:
            print("Wrong number of templates!")
            sys.exit()

        nz = len(z)
        m = numpy.array([m])  # match_resol works with arrays
        m = numpy.clip(m, xm[0], xm[-1])
        zm = match_resol(xm, zm0, m)
        try:
            zt_2.shape
        except NameError:
            t2 = [2.] * nt
            zt_2 = numpy.power.outer(z, t2)
        try:
            zt_1p5.shape
        except NameError:
            t1p5 = [1.5] * nt
            zt_1p5 = numpy.power.outer(z, t1p5)

        zm_3 = numpy.power.outer(zm, 3)
        zm_1p5 = numpy.power.outer(zm, 1.5)
        p_i = 3. / 2. / zm_3 * zt_2[:, :] * numpy.exp(-numpy.clip(
            old_div(zt_1p5[:, :], zm_1p5), 0., 700.))
        norm = numpy.add.reduce(p_i[:nz, :], 0)
        #Get rid of very low probability levels
        p_i[:nz, :] = numpy.where(
            numpy.less(
                old_div(p_i[:nz, :], norm[:]), old_div(1e-5, float(nz))), 0.,
            old_div(p_i[:nz, :], norm[:]))
        norm = numpy.add.reduce(p_i[:nz, :], 0)
        return p_i[:nz, :] / norm[:] * ft[:]

    else:

        m = numpy.minimum(numpy.maximum(20., m), 32)
        a = numpy.array((2.465, 1.806, 1.806, 0.906, 0.906, 0.906))
        zo = numpy.array((0.431, 0.390, 0.390, 0.0626, 0.0626, 0.0626))
        km = numpy.array((0.0913, 0.0636, 0.0636, 0.123, 0.123, 0.123))
        fo_t = numpy.array((0.35, 0.25, 0.25))
        k_t = numpy.array((0.450, 0.147, 0.147))
        dm = m - momin_hdf
        zmt = numpy.clip(zo + km * dm, 0.01, 15.)
        zmt_at_a = zmt**(a)
        #We define z**a as global to keep it
        #between function calls. That way it is
        # estimated only once
        try:
            zt_at_a.shape
        except NameError:
            zt_at_a = numpy.power.outer(z, a)

#Morphological fractions
        f_t = numpy.zeros((len(a), ), Float)
        f_t[:3] = fo_t * numpy.exp(-k_t * dm)
        f_t[3:] = old_div((1. - numpy.add.reduce(f_t[:3])), 3.)
        #Formula:
        #zm=zo+km*(m_m_min)
        #p(z|T,m)=(z**a)*numpy.exp(-(z/zm)**a)
        p_i = zt_at_a[:nz, :6] * numpy.exp(-numpy.clip(
            old_div(zt_at_a[:nz, :6], zmt_at_a[:6]), 0., 700.))
        #This eliminates the very low level tails of the priors
        norm = numpy.add.reduce(p_i[:nz, :6], 0)
        p_i[:nz, :6] = numpy.where(
            less(
                old_div(p_i[:nz, :6], norm[:6]), old_div(1e-2, float(nz))), 0.,
            old_div(p_i[:nz, :6], norm[:6]))
        norm = numpy.add.reduce(p_i[:nz, :6], 0)
        p_i[:nz, :6] = p_i[:nz, :6] / norm[:6] * f_t[:6]
        return p_i
Beispiel #12
0
complet_file_mr  = root_to_complet + 'AEGIS.master.spz.%s.'%(aperture)
complet_file_mr += 'cali.completeness.mr.cat'
number_counts = root_to_counts + 'AEGIS.master.z.counts.mr.cat'

# Reading the number counts.
base_mr,mr = U.get_data(number_counts,(0,1))
# Reading the completeness fraction
base_mo,cm_o1,cm_o2,cm_o3,cm_o4 = U.get_data(complet_file_mr,(0,1,2,3,4))

# Intervals to be used.
mmin = 17.
mmax = 28.
dm = 0.5
new_base = np.arange(mmin,mmax+dm,dm)

new_numb_counts = U.match_resol(base_mr,mr,new_base)
new_complet_o1 = U.match_resol(base_mo,cm_o1,new_base)
new_complet_o1 = np.where(new_complet_o1<0.,0.,new_complet_o1)
new_complet_o2 = U.match_resol(base_mo,cm_o2,new_base)
new_complet_o2 = np.where(new_complet_o2<0.,0.,new_complet_o2)
new_complet_o3 = U.match_resol(base_mo,cm_o3,new_base)
new_complet_o3 = np.where(new_complet_o3<0.,0.,new_complet_o3)
new_complet_o4 = U.match_resol(base_mo,cm_o4,new_base)
new_complet_o4 = np.where(new_complet_o4<0.,0.,new_complet_o4)

# Final number counts.
final_num_counts_o1 = new_numb_counts * new_complet_o1
final_num_counts_o2 = new_numb_counts * new_complet_o2
final_num_counts_o3 = new_numb_counts * new_complet_o3
final_num_counts_o4 = new_numb_counts * new_complet_o4