예제 #1
0
def select_galaxies_JPLUS_catalog(catalog):
    """

import sys
sys.path.append('/Users/albertomolino/doctorado/photo/programas/')
import useful as U
import numpy as N
import jplus_calib_tools
from jplus_calib_tools import select_jplus_galaxies,num_tiles
catalog = '/Users/albertomolino/jplus_data_download/SV02_March07/SV02_March07.clean.cat'

    """

    tile_num, obj_id, ra, dec, fw = U.get_data(catalog, (0, 1, 2, 3, 4))
    u_m, u_em, g_m, g_em, r_m, r_em = U.get_data(catalog, (5, 6, 7, 8, 9, 10))
    i_m, i_em, z_m, z_em, f395_m, f395_em = U.get_data(
        catalog, (11, 12, 13, 14, 15, 16))
    f410_m, f410_em, f430_m, f430_em, f515_m, f515_em = U.get_data(
        catalog, (17, 18, 19, 20, 21, 22))
    f660_m, f660_em, f861_m, f861_em = U.get_data(catalog, (23, 24, 25, 26))

    header_cat = '# 1.tile_id 2.object_id 3.ra 4.dec 5.fwhm 6.uJAVA 7.uJAVA_err 8.gSDSS \
    9.gSDSS_err 10.rSDSS 11.rSDSS_err 12.iSDSS 13.iSDSS_err 14.zSDSS 15.zSDSS_err \
    16.J0395 17.J0395_err 18.J0410 19.J0410_err 20.J0430 21.J0430_err \
    22.J0515 23.J0515_err 24.J0660 25.J0660_err 26.J0861 27.J0861_err '

    only_tiles = num_tiles(tile_num)  # tiles w/o duplications
    n_tiles = len(only_tiles)  # number of Tiles

    for ii in range(n_tiles):
        ref_tile = only_tiles[ii]
        g1 = N.less(abs(tile_num - ref_tile), 1)
        tile_cat = catalog[:-3] + '%i.gal.cat' % (ref_tile)
        tile_num_r, obj_id_r, ra_r, dec_r, fw_r = U.multicompress(
            g1, (tile_num, obj_id, ra, dec, fw))
        u_m_r, u_em_r, g_m_r, g_em_r, r_m_r, r_em_r = U.multicompress(
            g1, (u_m, u_em, g_m, g_em, r_m, r_em))
        i_m_r, i_em_r, z_m_r, z_em_r, f395_m_r, f395_em_r = U.multicompress(
            g1, (i_m, i_em, z_m, z_em, f395_m, f395_em))
        f410_m_r, f410_em_r, f430_m_r, f430_em_r, f515_m_r, f515_em_r = U.multicompress(
            g1, (f410_m, f410_em, f430_m, f430_em, f515_m, f515_em))
        f660_m_r, f660_em_r, f861_m_r, f861_em_r = U.multicompress(
            g1, (f660_m, f660_em, f861_m, f861_em))
        gs = select_jplus_galaxies(fw_r, r_m_r)
        # out_cat = maincat[:-3]+'gal.cat'
        U.put_data(
            tile_cat,
            (tile_num_r[gs], obj_id_r[gs], ra_r[gs], dec_r[gs], fw_r[gs],
             u_m_r[gs], u_em_r[gs], g_m_r[gs], g_em_r[gs], r_m_r[gs],
             r_em_r[gs], i_m_r[gs], i_em_r[gs], z_m_r[gs], z_em_r[gs],
             f395_m_r[gs], f395_em_r[gs], f410_m_r[gs], f410_em_r[gs],
             f430_m_r[gs], f430_em_r[gs], f515_m_r[gs], f515_em_r[gs],
             f660_m_r[gs], f660_em_r[gs], f861_m_r[gs], f861_em_r[gs]),
            header_cat)
예제 #2
0
def get_SExt_assoc_files(pepe):
    """
    It creates the associated catalogues with
    the detections to be included in the analysis.
    """

    for ii in range(7):
        for jj in range(4):
            for kk in range(4):
                cat = root + '/f0%i/alhambra.F0%iP0%iC0%i.ColorProBPZ.cat' % (
                    ii + 2, ii + 2, jj + 1, kk + 1)
                if os.path.exists(cat):
                    ids = U.get_str(cat, 0)
                    x, y, ar, ra, dec, mm = U.get_data(cat,
                                                       (6, 7, 8, 4, 5, 65))
                    nameout = root + '/f0%i/alhambra.F0%iP0%iC0%i.ColorProBPZ.coo' % (
                        ii + 2, ii + 2, jj + 1, kk + 1)
                    good = U.less_equal(abs(mm), 23.0)
                    ids = U.compress(good, (ids))
                    x, y, ar, ra, dec, mm = U.multicompress(
                        good, (x, y, ar, ra, dec, mm))
                    ne = len(x)
                    fileout = open(nameout, 'w')
                    fileout.write('#  X  Y  AREA  ID  RA  DEC  F814W  \n')
                    print 'Analyzing ', cat
                    for ss in range(ne):
                        linea = '%.3f   %.3f   %i   %s    %f   %f   %.2f  \n' % (
                            x[ss], y[ss], ar[ss], ids[ss], ra[ss], dec[ss],
                            mm[ss])
                        fileout.write(linea)
                    fileout.close()
예제 #3
0
def get_PDZerrDistribution(hdf5file, bpzfile, columns):
    """
    It returns the error distribution based on PDZs.
---
hdf5file = '/Users/albertomolino/doctorado/photo/catalogos/specz/spzPDZs/alhambra.spz.hdf5'
bpzfile  = '/Users/albertomolino/doctorado/photo/catalogos/specz/spzPDZs/alhambra.spz.bpz'
columns  = '/Users/albertomolino/doctorado/photo/catalogos/specz/spzPDZs/alhambra.spz.columns'
    
    """
    ids, zb, zs, mo = U.get_data(bpzfile, (0, 1, 11, 12))
    # ids,zb,zs,mo = U.get_data(bpzfile,(0,1,9,10))
    good = N.greater(abs(mo), 17.) * N.less(abs(mo), 25.)
    # good = N.greater(abs(mo),22.)*N.less(abs(mo),23.)
    ids, zb, zs, mo = U.multicompress(good, (ids, zb, zs, mo))
    ng = len(ids)

    #Readin the PDZs...
    p = h5py.File(hdf5file, mode='r')
    pdzo = p.get('FullProbability')
    pdz = pdzo[good, :, :]
    zz = p.get('redshift')[:]
    dz = zz[2] - zz[1]
    basez2 = N.arange(-0.1, 0.1, dz)
    basez2b = basez2[:-1] + ((basez2[1] - basez2[0]) / 2.)
    nz = len(basez2)
    delta_z_pdzs = N.zeros(nz - 1)

    # Computing the z error distr. function
    # based on peak values.
    delta_z_peaks = (zb - zs) / (1. + zs)
    a1, a2 = N.histogram(delta_z_peaks, basez2)

    for ii in range(ng):
        pdz_mot = U.sum(pdz[ii, :, :], axis=1)
        delta_z_pdzs += U.match_resol(zz - zb[ii], pdz_mot, basez2b)

    plt.figure(12, figsize=(8.5, 10.), dpi=80, facecolor='w', edgecolor='k')
    plt.clf()
    plt.subplot(211)
    plt.plot(basez2b, a1 / float(sum(a1)), 'b-', lw=12, alpha=0.6)
    plt.plot(basez2b,
             delta_z_pdzs / float(sum(delta_z_pdzs)),
             'r-',
             lw=5,
             alpha=0.9)
    plt.grid()
    plt.xlim(-0.1, 0.1)
    plt.ylabel('P(z)', size=20, labelpad=+1)
    plt.legend(['peaks', 'pdfs'], loc='upper left', fontsize=20)
    plt.subplot(212)
    resi = 2
    plt.plot(
        basez2b[::resi],
        abs((a1[::resi] / float(sum(a1))) -
            (delta_z_pdzs[::resi] / float(sum(delta_z_pdzs)))), 'k-')
    plt.grid()
    plt.xlim(-0.1, 0.1)
    plt.xlabel('$\delta_{z}$', size=30)
예제 #4
0
def numberdensity_2(C1,C2):
    """
    As the other ones but fixing the ranges manually.
    USAGE:  matrix,axis2,axis1 = CC_numberdensity_contour(V-B, R-I)
    """
    axis1  = N.arange(0.0,0.5,0.0025) #SPLUS FWHM mosaic
    axis2  = N.arange(-0.3,0.3,0.0025)  #SPLUS FWHM mosaic
    matrix = N.zeros((len(axis1),len(axis2)),float)
    for ii in range(len(axis1)-1):
        for jj in range(len(axis2)-1):
            good = N.greater_equal(C1,axis1[ii]) * N.less_equal(C1,axis1[ii+1])
            good *= N.greater_equal(C2,axis2[jj]) * N.less_equal(C2,axis2[jj+1])
            value1,value2 = U.multicompress(good,(C1,C2))
            number = len(value1)
            matrix[ii,jj] = number
    return matrix,axis2,axis1
예제 #5
0
def runZPcal_catalogue(reference, frame, final):
    """
    ----
filter_ref_cat,alig_frame_cat,alig_cal_frame_cat
    """
    plots = 1
    data2 = C.loaddata(frame)  # Loading the whole catalog2 content.
    head2 = C.loadheader(frame)  # Loading the original header2.
    pos_mags = 12  # ([12,20,21,22])

    mag_r = U.get_data(reference, 12)
    mag_f = U.get_data(frame, 12)
    # good_sample = U.greater_equal(mag_r,16.) * U.less_equal(mag_r,21.5)
    good_sample = U.greater_equal(mag_r, 16.) * U.less_equal(mag_r, 19.)
    mag_r2, mag_f2 = U.multicompress(good_sample, (mag_r, mag_f))
    offset = U.mean_robust(mag_f2 - mag_r2)

    if plots:
        plt.figure(11, figsize=(12, 9), dpi=80, facecolor='w', edgecolor='k')
        plt.clf()
        plt.plot(mag_r, (mag_f - mag_r - offset), 'ko', ms=10, alpha=0.1)
        plt.xlim(16, 25)
        plt.ylim(-5, 5.)
        plt.xlabel('AB', size=25)
        plt.ylabel('Mf-Mr', size=25)
        plt.xticks(fontsize=25)
        plt.yticks(fontsize=25)
        plt.legend(['Offset: %.4f' % (offset)], loc='upper right', numpoints=1)
        plt.title(A.getfilename(frame), size=15)
        plt.grid()
        figurename = final[:-3] + 'png'
        print 'figurename: ', figurename
        plt.savefig(figurename, dpi=100)
        plt.close()

    # Here it saves the offset in an ASCII file
    fileout = open(final[:-3] + 'txt', 'w')
    linea = '%s %.5f \n' % (final, offset)
    fileout.write(linea)
    fileout.close()

    # The offset is only applied to m!=99. magnitudes.
    new_mags = U.where(abs(mag_f) < 99, mag_f - offset, mag_f)
    data2[:, pos_mags] = new_mags
    C.savedata(data2, final, dir="", header=head2)
    print ' '
예제 #6
0
# Root to data
mainroot = '/Users/albertomolino/Postdoc/T80S_Pipeline/Commisioning/S82/Dec2017/'
root2cats = mainroot + 'splus_cats_NGSL/'
#bpz_cat = root2cats + 'COSMOSeB11new_recal/master.STRIPE82_Photometry.m21_COSMOSeB11new_recal_redu.bpz'
#bpz_cat = root2cats + 'PriorSM/masterBPZ_PriorSM.bpz'
bpz_cat = root2cats + 'COSMOSeB11new_recal/PriorSM/masterBPZ_PriorSM.bpz'


# Reading data
zb,mo,od,zs,chi2,tb = U.get_data(bpz_cat,(1,10,5,9,8,4))

# Doing some cleaning
good = N.less(chi2,30) * N.greater_equal(mo,14)
good *= N.greater_equal(od,0.01)
good *= N.less_equal(zs,0.5)
zb,mo,od,zs,chi2,tb = U.multicompress(good,(zb,mo,od,zs,chi2,tb))

# Defining three samples.
mag_1 = 17
mag_2 = 19
mag_3 = 21
gm1 = N.less_equal(mo,mag_1) #* N.greater(tb,4.5)
gm2 = N.less_equal(mo,mag_2)
gm3 = N.less_equal(mo,mag_3)

# Defining basis for histograms
base_histo = N.arange(-0.2,0.2,0.01)
base_histo_2 = [-0.1,-0.05,0.0,0.05,0.1]
base_histo_3 = [-0.2,-0.1,0.0,0.1,0.2]
base_line = N.arange(0.,1.,0.1)
base_3 = N.arange(0.,0.6,0.1)
예제 #7
0
sys.path.append('/Users/albertomolino/doctorado/photo/programas/')
import numpy as N
import useful as U
import matplotlib.pyplot as plt
from matplotlib.pyplot import cm

root = '/Users/albertomolino/Postdoc/T80S_Pipeline/Commisioning/S82/Dec2017/'
root += 'data_quality/photometry/SDSSSPLUS/'
u_cat = root + 'master_SPLUS_STRIPE82_to_SDSS_Ivezick_stars_Uband.cat'
griz_cat = root + 'master_SPLUS_STRIPE82_to_SDSS_Ivezick_stars_griz.cat'

s2nu, u_sp, u_sd = U.get_data(u_cat, (2, 3, 4))
delta_u = (u_sp - u_sd)
good_u = N.greater_equal(s2nu, 5.) * N.less_equal(abs(delta_u), 2.)
good_u *= N.less_equal(u_sd, 22.)
u_sp, u_sd, delta_u = U.multicompress(good_u, (u_sp, u_sd, delta_u))

s2n, g_sp, r_sp, i_sp, z_sp = U.get_data(griz_cat, (2, 3, 4, 5, 6))  # SPLUS
g_sd, r_sd, i_sd, z_sd = U.get_data(griz_cat, (7, 8, 9, 10))  # SDSS
delta_g = (g_sp - g_sd)
delta_r = (r_sp - r_sd)
delta_i = (i_sp - i_sd)
delta_z = (z_sp - z_sd)

good_griz = N.greater_equal(s2n, 5.) * N.less_equal(abs(delta_g), 2.)
good_griz *= N.less_equal(abs(delta_r), 2.) * N.less_equal(abs(delta_i), 2.)
good_griz *= N.less_equal(abs(delta_z), 2.)
g_sp, r_sp, i_sp, z_sp = U.multicompress(good_griz, (g_sp, r_sp, i_sp, z_sp))
g_sd, r_sd, i_sd, z_sd = U.multicompress(good_griz, (g_sd, r_sd, i_sd, z_sd))
delta_g, delta_r = U.multicompress(good_griz, (delta_g, delta_r))
delta_i, delta_z = U.multicompress(good_griz, (delta_i, delta_z))
예제 #8
0
def figura33(lista):
    """

    I'm using the stellar classification from the version_e.
    ----
import alhambra_completeness as alhc
lista = '/Volumes/amb22/catalogos/reduction_v4d/globalcats/lista.list'
alhc.figura33(lista)

    """
    blue = 0
    red = 1
    cats = U.get_str(lista, 0)
    cats2 = U.get_str(lista, 1)
    nc = len(cats)
    dx = 0.2
    dy = 0.4
    nxbins = 4
    nybins = 2
    ods = 0.05
    mmin = 16.0
    mmax = 23.75
    zbmin = 0.0001
    zbmax = 1.4
    Mmin = -24
    Mmax = -17
    if red:
        Tbmin = 1  # 7.
        Tbmax = 5  # 11.
        resolmag = 0.2  # 0.2
        resolz = 0.05
    if blue:
        Tbmin = 7.
        Tbmax = 11.
        resolmag = 0.2
        resolz = 0.05

    resol = 0.025
    areas = ([0.45, 0.47, 0.23, 0.24, 0.47, 0.47, 0.46, 2.79])

    plt.figure(111, figsize=(21.5, 11.5), dpi=70, facecolor='w', edgecolor='k')
    ss = 0
    for jj in range(nybins):
        for ii in range(nxbins):
            # Reading data from catalogs.
            mo, zb, tb, odds, m814 = U.get_data(cats[ss], (81, 72, 75, 76, 62))
            sf = U.get_data(cats2[ss], 71)
            # mo,zb,tb,sf,odds,m814 = U.get_data(cats[ss],(81,72,75,71,76,62))
            g = U.greater_equal(abs(m814), mmin) * U.less_equal(
                abs(m814), mmax)
            # g* = U.greater_equal(odds,ods)
            g *= U.greater_equal(tb, Tbmin) * U.less_equal(tb, Tbmax)
            g *= U.less_equal(sf, 0.8)
            yy = -0.014 * m814 + 0.38
            g *= U.greater(odds, yy)
            g *= U.less_equal(mo, Mmax + resol) * U.greater(mo, Mmin - resol)
            g *= U.greater(zb, zbmin) * U.less_equal(zb, zbmax)
            mo, zb, tb, odds = U.multicompress(g, (mo, zb, tb, odds))
            print 'dimension', len(mo)
            # Plotting density.
            # cuadrado = plt.axes([.1+(ii*dx),.1+((nybins-jj-1)*dy),dx,dy])
            if ii == nxbins - 1:
                cuadrado = plt.axes([
                    .1 + (ii * dx), .1 + ((nybins - jj - 1) * dy),
                    dx + (dx * 0.2), dy
                ])
            else:
                cuadrado = plt.axes(
                    [.1 + (ii * dx), .1 + ((nybins - jj - 1) * dy), dx, dy])
            matrix, axis2, axis1 = rs.CC_numberdensity_contour_zvolume(
                zb, mo, resolz, resolmag, 1)
            if blue:
                plt.contourf(axis2,
                             axis1,
                             U.log10(matrix / areas[ss]),
                             250,
                             vmin=-11.,
                             vmax=-7.)  # blue galaxies
            if red:
                plt.contourf(axis2,
                             axis1,
                             U.log10(matrix / areas[ss]),
                             250,
                             vmin=-12.,
                             vmax=-7.65)  # red galaxies

            if ii == nxbins - 1:
                aa = plt.colorbar(pad=0., format='%.1f')
                aa.set_label('Log. Density [N/Mpc$^{3}$/deg$^{2}$]', size=18)
            if jj != nybins - 1: plt.setp(cuadrado, xticks=[])
            if ii != 0: plt.setp(cuadrado, yticks=[])
            if jj == nybins - 1:
                plt.xlabel('M$_{B}$', size=27)
                plt.xticks(fontsize=17)
            if ii == 0:
                plt.ylabel('redshift', size=28)
                plt.yticks(fontsize=17)

            # plotting axis manually
            base1 = U.arange(Mmin, Mmax + 1., 1.)
            base2 = U.arange(0, zbmax + (2. * resol), resol)
            dim1 = len(base1)
            dim2 = len(base2)
            for rr in range(dim1):
                plt.plot(base2 * 0. + base1[rr],
                         base2,
                         'k--',
                         linewidth=1.,
                         alpha=0.25)
            for rr in range(dim2):
                plt.plot(base1,
                         base1 * 0. + base2[rr],
                         'k--',
                         linewidth=1.,
                         alpha=0.25)

            # plt.grid()
            plt.ylim(zbmin + 0.0001, zbmax - 0.001)
            plt.xlim(Mmin + 0.0001, Mmax - 0.0001)
            if ss == 7: labelleg = 'Global'
            else: labelleg = 'A%i' % (ss + 2)
            xypos = (Mmax - 1.6, zbmax - 0.18)
            if ss == 7: xypos = (Mmax - 3.5, zbmax - 0.18)
            plt.annotate(labelleg, xy=xypos, fontsize=40, color='black')
            ss += 1

    plt.savefig('completeness.alhambra.png', dpi=200)
예제 #9
0
def get_PDZerrDistribution_byMagnitudes(hdf5file, bpzfile, columns):
    """
    It returns the error distribution based on PDZs.
---
import alhambrahdf5 as AH
#hdf5file = '/Users/albertomolino/doctorado/photo/catalogos/reduction_v5/GOLD/alhambragold.hdf5'
#bpzfile = '/Users/albertomolino/doctorado/photo/catalogos/reduction_v5/GOLD/alhambragold.bpz'
#columns = '/Users/albertomolino/doctorado/photo/catalogos/reduction_v5/GOLD/alhambragold.columns'
hdf5file = '/Users/albertomolino/doctorado/photo/catalogos/specz/spzPDZs/alhambra.spz.hdf5'
bpzfile  = '/Users/albertomolino/doctorado/photo/catalogos/specz/spzPDZs/alhambra.spz.bpz'
columns  = '/Users/albertomolino/doctorado/photo/catalogos/specz/spzPDZs/alhambra.spz.columns'
basez2b,delta_z_peaks,delta_z_pdzs = AH.get_PDZerrDistribution_byMagnitudes(hdf5file,bpzfile,columns) 
    
    """
    basem = N.arange(18, 26, 2)
    # basem = N.arange(18,25,2)
    nm = len(basem)
    ids, zb, zs, mo = U.get_data(bpzfile, (0, 1, 11, 12))
    # ids,zb,zs,mo = U.get_data(bpzfile,(0,1,9,10))
    #Readin the PDZs...
    p = h5py.File(hdf5file, mode='r')
    pdzo = p.get('FullProbability')
    zz = p.get('redshift')[:]
    dz = zz[2] - zz[1]
    basez2 = N.arange(-0.1, 0.1, dz)
    basez2b = basez2[:-1] + ((basez2[1] - basez2[0]) / 2.)
    nz = len(basez2)

    # Defining the final outputs.
    delta_z_pdzs = N.zeros((nm - 1, nz - 1), float)
    delta_z_peaks = N.zeros((nm - 1, nz - 1), float)

    for ii in range(nm - 1):
        good = N.greater_equal(mo, basem[ii]) * N.less_equal(mo, basem[ii + 1])
        idr, zbr, zsr, mor = U.multicompress(good, (ids, zb, zs, mo))
        ng = len(idr)
        pdz = pdzo[good, :, :]

        # Computing the z error distr. function
        # based on peak values.
        temporal_delta_z_peaks = (zbr - zsr) / (1. + zsr)
        a1, a2 = N.histogram(temporal_delta_z_peaks, basez2)
        delta_z_peaks[ii, :] = a1[:]

        for jj in range(ng):
            pdz_mot = U.sum(pdz[jj, :, :], axis=1)
            delta_z_pdzs[ii, :] += U.match_resol(zz - zbr[jj], pdz_mot,
                                                 basez2b)

    # plt.figure(12, figsize = (8.5,10.),dpi=80, facecolor='w', edgecolor='k')
    # plt.clf()
    # plt.subplot(211)
    # plt.plot(basez2b,a1/float(sum(a1)),'b-',lw=12,alpha=0.6)
    # plt.plot(basez2b,delta_z_pdzs/float(sum(delta_z_pdzs)),'r-',lw=5,alpha=0.9)
    # plt.grid()
    # plt.xlim(-0.1,0.1)
    # plt.ylabel('P(z)',size=20,labelpad=+1)
    # plt.legend(['peaks','pdfs'],loc='upper left',fontsize=20)
    # plt.subplot(212)
    # resi = 2
    # plt.plot(basez2b[::resi],abs((a1[::resi]/float(sum(a1)))-(delta_z_pdzs[::resi]/float(sum(delta_z_pdzs)))),'k-')
    # plt.grid()
    # plt.xlim(-0.1,0.1)
    # plt.xlabel('$\delta_{z}$',size=30)

    return basez2b, delta_z_peaks, delta_z_pdzs
예제 #10
0
def flagging_dobledetections(cat1,cat2):
    """
    This serves to append an extra column (each to both inputted catalogs)
    indicating either a detection was repeated and with the lowest S/N
    of the two.
    Sources flagged as 1 are those detections to be excluded when combining
    both catalogs into a single one.
--------
import alhambra_overlap as alhov
cat1 = '/Volumes/amb22/catalogos/reduction_v4e/f02/f02p02_colorproext_1_ISO.cat'
cat2 = '/Volumes/amb22/catalogos/reduction_v4e/f02/f02p01_colorproext_1_ISO.cat'
alhov.flagging_dobledetections(cat1,cat2)    
    
    """
    
    id1,ra1,dec1,x1,y1,s2n1 = U.get_data(cat1,(0,1,2,3,4,14))
    id2,ra2,dec2,x2,y2,s2n2 = U.get_data(cat2,(0,1,2,3,4,14))
    ne1 = len(id1)
    ne2 = len(id2)
    g1 = U.greater_equal(ra1,min(ra2))
    g2 = U.less_equal(ra2,max(ra1))
    id1r,ra1r,dec1r,x1r,y1r,s2n1r = U.multicompress(g1,(id1,ra1,dec1,x1,y1,s2n1))
    id2r,ra2r,dec2r,x2r,y2r,s2n2r = U.multicompress(g2,(id2,ra2,dec2,x2,y2,s2n2))
    flag1 = U.zeros(ne1)
    flag2 = U.zeros(ne2)
    
    dim1 = len(id1r)
    dim2 = len(id2r)
    print 'dim1,dim2',dim1,dim2
    if dim1>0 and dim2>0:
       print 'Matching samples....'
       pepe = matching_vects_ddet(id1r,ra1r,dec1r,id2r,ra2r,dec2r,0.000312)   # We use now X,Y instead RA,Dec
       # Purging null elements
       matchidcol = pepe[:,0].astype(int)
       good_det1 = U.greater(matchidcol,0)  # Excluding 0's (non matched detections)
       matchidcol = U.compress(good_det1,(matchidcol))
       matchidsp = pepe[:,1].astype(int)
       good_det2 = U.greater(matchidsp,0) # Excluding 0's (non matched detections)
       matchidsp = U.compress(good_det2,(matchidsp))
       if len(matchidcol) == len(matchidsp) and len(matchidcol) >0 :
           newdim = len(matchidsp)
           print 'Dimension of matching',newdim
           idr1  = U.zeros(newdim)
           idr2  = U.zeros(newdim)
           s2nr1 = U.zeros(newdim)
           s2nr2 = U.zeros(newdim)
           for ii in range(newdim):
               idr1index = ap.id2pos(id1r,matchidcol[ii]) 
               idr2index = ap.id2pos(id2r,matchidsp[ii]) 
               idr1[ii]  = id1r[idr1index]
               s2nr1[ii] = s2n1r[idr1index]               
               idr2[ii]  = id2r[idr2index] 
               s2nr2[ii] = s2n2r[idr2index]
               
           # Select/Purge detections according to its S/N
           marcador1 = U.zeros(newdim)
           marcador2 = U.zeros(newdim)
           for ss in range(newdim):
               cociente = s2nr1[ss]/s2nr2[ss]  
               if cociente >= 1.: marcador1[ss] = 1.
               else: marcador2[ss] = 1.     
                   
           cond1 = U.less(marcador1,1)
           cond2 = U.less(marcador2,1)
           idr1b = U.compress(cond1,idr1)
           dim1rr = len(idr1b)
           idr2b = U.compress(cond2,idr2)
           dim2rr = len(idr2b)
           
           # Two new IDs (finalid1 & finalid2) are generated with 
           # the final elements to be included in the output catalog.
           for hh1 in range(ne1):
               if id1[hh1] in idr1b:
                  flag1[hh1] = 1
                  
           for hh2 in range(ne2):
               if id2[hh2] in idr2b:
                  flag2[hh2] = 1

           # A new smaller catalog will be created containing specz info as an extra column.
           outcat1 = ap.decapfile(cat1)+'.doubledetect.cat'
           outcat2 = ap.decapfile(cat2)+'.doubledetect.cat'
           print 'outcat1',outcat1
           print 'outcat2',outcat2
           ap.appendcol(cat1,flag1,'Flag2Detected',outcat1)
           ap.appendcol(cat2,flag2,'Flag2Detected',outcat2)

           # Renaming files
           ap.renamefile(cat1,cat1+'.old.cat')
           if not os.path.exists(cat1): ap.renamefile(outcat1,cat1)
           ap.renamefile(cat2,cat2+'.old.cat')
           if not os.path.exists(cat2): ap.renamefile(outcat2,cat2)           
           
    else:
       print 'No common sources in betwen the catalogs'
       # A new smaller catalog will be created containing specz info as an extra column.
       outcat1 = ap.decapfile(cat1)+'.doubledetect.cat'
       outcat2 = ap.decapfile(cat2)+'.doubledetect.cat'
       print 'outcat1',outcat1
       print 'outcat2',outcat2
       ap.appendcol(cat1,flag1*0,'Flag2Detected',outcat1)
       ap.appendcol(cat2,flag2*0,'Flag2Detected',outcat2)
       
       # Renaming files
       ap.renamefile(cat1,cat1+'.old.cat')
       if not os.path.exists(cat1): ap.renamefile(outcat1,cat1)
       ap.renamefile(cat2,cat2+'.old.cat')
       if not os.path.exists(cat2): ap.renamefile(outcat2,cat2)   
예제 #11
0
                    final_pdf_blue = pb
                    final_pdf_global = pg
                    n_gals = ng
                else:
                    final_pdf_red += pr
                    final_pdf_blue += pb
                    final_pdf_global += pg
                    n_gals += ng

                if odds_cut:
                    gm = N.less_equal(mo, mag_bins[ii])
                    gm *= N.greater(odd, 0.89)
                else:
                    gm = N.less_equal(mo, mag_bins[ii])

                idr, zbr = U.multicompress(gm, (ids, zb))
                #print 'len(idr)',len(idr)
                for ff in range(len(idr)):
                    if ii < 1:
                        zbs_1.append(zbr[ff])
                    elif ii == 1:
                        zbs_2.append(zbr[ff])
                    else:
                        zbs_3.append(zbr[ff])
                    kk += 1

        #print 'kk,ng',kk,n_gals
        #final_pdf_red *= n_gals
        #final_pdf_blue *= n_gals
        #final_pdf_global *= n_gals
        # Saving P(z|R<Ri)
예제 #12
0
out_file.write('#  14 F0861_auto  \n')
out_file.write('#  15 z_auto  \n')
out_file.write('# ID RA Dec U_auto F0378_auto F0395_auto '
               'F0410_auto F0430_auto G_auto F0515_auto '
               'R_auto F0660_auto I_auto F0861_auto z_auto'\n)

for ii in range(n_cats):
    print 'reading catalogue %i '%(ii+1)
    ids,ra,dec,u,f378,f395,f410,f430 = U.get_data(cats[ii],(0,1,2,15,24,33,42,51))
    g,f515,r,f660,i,f861,z,ps = U.get_data(cats[ii],(60,69,78,87,96,105,114,132))
    #Selecting good stars
    good  = N.greater_equal(ps,0.9)
    good *= N.less_equal(abs(u),30) * N.less_equal(abs(g),30)
    good *= N.less_equal(abs(r),30) * N.less_equal(abs(i),30)
    good *= N.less_equal(abs(z),30) * N.less_equal(abs(f378),30)
    good *= N.less_equal(abs(f395),30) * N.less_equal(abs(f410),30)
    good *= N.less_equal(abs(f430),30) * N.less_equal(abs(f515),30)
    good *= N.less_equal(abs(f660),30) * N.less_equal(abs(f861),30)
    # Compressing.
    ids,ra,dec,u,f378,f395,f410,f430 = U.multicompress(good,(ids,ra,dec,u,f378,f395,f410,f430))
    g,f515,r,f660,i,f861,z = U.multicompress(good,(g,f515,r,f660,i,f861,z))
    # Writing data into new catalogue.
    n_stars = len(ids)
    for ss in range(n_stars):
        linea = '%i  %.4f  %.4f  %.2f  '%(ids[ss],ra[ss],dec[ss],u[ss])
        linea += '%.2f  %.2f  %.2f  %.2f  '%(f378[ss],f395[ss],f410[ss],f430[ss])
        linea += '%.2f  %.2f  %.2f  %.2f  '%(g[ss],f515[ss],r[ss],f660[ss])
        linea += '%.2f  %.2f  %.2f  '%(i[ss],f861[ss],z[ss])
        out_file.write(linea+'\n')

out_file.close()
base_z2 = base_z[:-1] + ((base_z[1] - base_z[0]) / 2.)
base_m2 = base_m[:-1] + ((base_m[1] - base_m[0]) / 2.)
n_z = len(base_z)
n_m = len(base_m)
valor_auto = N.zeros((n_m, n_z), float)

alphas = [0.2, 0.4, 0.6, 0.8, 1.0]

#AUTO apertures
zb, zs, mo = U.get_data(master_bpz_auto, (1, 9, 10))
dz = (zb - zs) / (1. + zs)
satur = N.greater(mo, 14)

for ii in range(n_m):
    good_1 = N.less_equal(mo, base_m[ii])
    zs_r, dz_r = U.multicompress(good_1, (zs, dz))
    for jj in range(n_z):
        #good = N.greater_equal(zs_r,base_z[jj])
        good = N.less_equal(zs_r, base_z[jj])
        linea = 'm<%.2f,' % (base_m[ii])
        linea += 'z<%.2f' % (base_z[jj])
        linea += ': %i' % (len(zs_r[good]))
        if len(zs_r[good]) > 100:
            valor_auto[ii, jj] = U.std_mad(dz_r[good])
        else:
            valor_auto[ii, jj] = -1.
        linea += ', dz/1+z: %.3f' % (valor_auto[ii, jj])
        print linea
        #pausa = raw_input('paused')

# Plot
예제 #14
0
def overlaygalaxies2(image, cat, posx, posy, cond, shape, legend, save,
                     outfile):
    """
    Similar to version 1 but now the info display has nothing to do
    with the ID from the catalogue. Just an extra variable.
    This serves to create a color image with just stars.
============
import alhambra_webpage as alhw
import useful as U
image = '/Volumes/amb22/imagenes/f02/color_images/f02p01_OPTICAL_1.png'
cat = '/Volumes/amb22/catalogos/reduction_v4e/f02/f02p01_colorproext_1_ISO.cat'
posid = 0
posx = 3
posy = 4
sf = U.get_data(cat,71)
cond = U.greater_equal(sf,0.7)
shape = 'circle'
save = 'yes'
outfile = '/Volumes/amb22/catalogos/reduction_v4e/stars/f02/f02p01_stars_1.png'
alhw.overlaygalaxies2(image,cat,posx,posy,cond,shape,legend,save,outfile)

    """
    colorfile = image
    im = Image.open(colorfile)
    imsize = nx, ny = im.size
    stamp = im.crop((0, 0, nx, ny))
    draw = ImageDraw.Draw(stamp)

    x, y = U.get_data(cat, (posx, posy))
    x, y, legend = U.multicompress(cond, (x, y, legend))

    for ii in range(len(x)):
        xx = x[ii]
        yy = y[ii]
        shapesize = 40  # aa * 1.05
        colores = (255, 255, 1)

        if shape != 'None':
            if shape == 'circle':
                draw.ellipse((xx - shapesize, ny - yy - shapesize,
                              xx + shapesize, ny - yy + shapesize),
                             fill=None)
            elif shape == 'crosshair':
                draw.line((xx + shapesize, yy, xx + shapesize - 10, yy),
                          fill=None,
                          width=3)
                draw.line((xx - shapesize, yy, xx - shapesize + 10, yy),
                          fill=None,
                          width=3)
                draw.line((xx, yy + shapesize, xx, yy + shapesize - 10),
                          fill=None,
                          width=3)
                draw.line((xx, yy - shapesize, xx, yy - shapesize + 10),
                          fill=None,
                          width=3)
            elif shape == 'rectangle':
                draw.rectangle((dx - shapesize, dy - shapesize, dx + shapesize,
                                dy + shapesize),
                               fill=colores)  # fill=None)
            else:
                print 'Shape not found!. It will not be overlaid...'

        label = '%.2f' % (legend[ii])
        draw.text((xx - (shapesize / 2.), ny - yy - (shapesize / 2.) - 15),
                  label,
                  fill=(255, 255, 1))

    if save == 'yes':
        if outfile != 'None':
            stamp.save(outfile)
        else:
            stamp.save('imcutoff.png')
예제 #15
0
def globalimage_zb(image, cat, posx, posy, posarea, poszb, shape, save,
                   outfile):
    """
============
from alhambra_webpage import *
image = '/Users/albertomolino/Desktop/emss2137/emss2137.png'
cat = '/Volumes/amb2/SUBARU/emss2137/catalogs/MS2137_Subaru.bpz.2.cat'
posx = 3
posy = 4
posarea = 5
poszb = 17
shape = 'circle'
save = 'yes'
outfile = '/Users/albertomolino/Desktop/emss2137/emss2137_22.png'
globalimage_zb(image,cat,posx,posy,posarea,poszb,shape,save,outfile)
-----------------
import alhambra_photools
from alhambra_photools import *
import alhambra_webpage
from alhambra_webpage import *
image = '/Users/albertomolino/Desktop/macs1206/macs1206.color.png'
cat = '/Users/albertomolino/Desktop/UDF/Molino12/catalogs/ColorPro/macs1206_UDFconf_NIR_July2012_ISO_RS.cat'
posx = 3
posy = 4
shape = 'circle'
save = 'yes'
posarea = 5
poszb = 0
outfile = '/Users/albertomolino/Desktop/macs1206/macs1206.color.RS.purged.png'
globalimage_zb(image,cat,posx,posy,posarea,poszb,shape,save,outfile)
--------
import alhambra_photools
from alhambra_photools import *
import alhambra_webpage
from alhambra_webpage import *
image = '/Users/albertomolino/Desktop/rxj2248/HST/rxj2248_acs.png'
cat = '/Users/albertomolino/Desktop/rxj2248/HST/catalogs/rxj2248_IR_RedSeq.cat'
posx = 3
posy = 4
shape = 'circle'
save = 'yes'
posarea = 5
poszb = 0
outfile = '/Users/albertomolino/Desktop/rxj2248/HST/rxj2248_acs.RS.png'
globalimage_zb(image,cat,posx,posy,posarea,poszb,shape,save,outfile)
--------

    """
    colorfile = image
    im = Image.open(colorfile)
    imsize = nx, ny = im.size
    stamp = im.crop((0, 0, nx, ny))
    draw = ImageDraw.Draw(stamp)

    try:
        x, y, area, zb = U.get_data(cat, (posx, posy, posarea, poszb))
        sf = U.get_data(cat, 74)
        # good = greater(zb,0.28) * less(zb,0.34)
        good = U.greater(zb, 0.001) * U.less(sf, 0.7)
        x, y, area, zb = U.multicompress(good, (x, y, area, zb))
    except:
        print 'Impossible to read the data from catalog. Check it out!!'

    for ii in range(len(x)):
        xx = x[ii]
        yy = y[ii]
        aa = area[ii]
        zzb = zb[ii]
        zbval = ' %.2f ' % (zzb)
        # print 'x,y',xx,yy
        # print 'zbval',zbval
        # print 'ii',ii
        # dx = dy = 1.5 * aa
        shapesize = 20  # aa * 1.05
        # print 'shapesize',shapesize
        # dxo = dyo = 0

        colores = (255, 255, 1)
        # if zzb < 0.1  : colores = (255,1,1)   # red
        # elif zzb >= 0.1 and zzb < 0.3 : colores = (255,255,1) # yellow
        # elif zzb >= 0.3 and zzb < 1.  : colores = (1,255,1)   # Green
        # elif zzb >= 1.  and zzb < 3.  : colores = (1,255,255) # Blue
        # else: colores = (255,1,255) # Purple

        if shape != 'None':
            if shape == 'circle':
                draw.ellipse((xx - shapesize, ny - yy - shapesize,
                              xx + shapesize, ny - yy + shapesize),
                             fill=None)
            elif shape == 'crosshair':
                draw.line((xx + shapesize, yy, xx + shapesize - 10, yy),
                          fill=None,
                          width=3)
                draw.line((xx - shapesize, yy, xx - shapesize + 10, yy),
                          fill=None,
                          width=3)
                draw.line((xx, yy + shapesize, xx, yy + shapesize - 10),
                          fill=None,
                          width=3)
                draw.line((xx, yy - shapesize, xx, yy - shapesize + 10),
                          fill=None,
                          width=3)
            elif shape == 'rectangle':
                draw.rectangle((dx - shapesize, dy - shapesize, dx + shapesize,
                                dy + shapesize),
                               fill=colores)  # fill=None)
            else:
                print 'Shape not found!. It will not be overlaid...'

        draw.text((xx - (shapesize / 2.), ny - yy - (shapesize / 2.)),
                  zbval,
                  fill=(255, 255, 1))
        # draw.text((xx-(1.5*shapesize),ny-yy-(2.*shapesize)),zbval,fill=(255,255,255))
        # # draw.text((xx-(1.5*shapesize),ny-yy-(2.*shapesize)),zbval,fill=colores)

    if save == 'yes':
        if outfile != 'None':
            stamp.save(outfile)
        else:
            stamp.save('imcutoff.png')
예제 #16
0
def overlaygalaxies(image,
                    cat,
                    posid,
                    posx,
                    posy,
                    cond,
                    shape,
                    save,
                    outfile,
                    extra=None):
    """
    This serves to create a color image with just stars.
============
import alhambra_webpage as alhw
import useful as U
image = '/Volumes/amb22/imagenes/f02/color_images/f02p01_OPTICAL_1.png'
cat = '/Volumes/amb22/catalogos/reduction_v4e/f02/f02p01_colorproext_1_ISO.cat'
posid = 0
posx = 3
posy = 4
sf = U.get_data(cat,71)
cond = U.greater_equal(sf,0.7)
shape = 'circle'
save = 'yes'
outfile = '/Volumes/amb22/catalogos/reduction_v4e/stars/f02/f02p01_stars_1.png'
alhw.overlaygalaxies(image,cat,posid,posx,posy,cond,shape,save,outfile)

    """
    colorfile = image
    im = Image.open(colorfile)
    imsize = nx, ny = im.size
    stamp = im.crop((0, 0, nx, ny))
    draw = ImageDraw.Draw(stamp)

    ids, x, y = U.get_data(cat, (posid, posx, posy))
    ids, x, y = U.multicompress(cond, (ids, x, y))

    for ii in range(len(x)):
        iid = ids[ii]
        xx = x[ii]
        yy = y[ii]
        shapesize = 10  # aa * 1.05
        colores = (255, 255, 1)

        if shape != 'None':
            if shape == 'circle':
                draw.ellipse((xx - shapesize, ny - yy - shapesize,
                              xx + shapesize, ny - yy + shapesize),
                             fill=None)
            elif shape == 'crosshair':
                draw.line((xx + shapesize, yy, xx + shapesize - 10, yy),
                          fill=None,
                          width=3)
                draw.line((xx - shapesize, yy, xx - shapesize + 10, yy),
                          fill=None,
                          width=3)
                draw.line((xx, yy + shapesize, xx, yy + shapesize - 10),
                          fill=None,
                          width=3)
                draw.line((xx, yy - shapesize, xx, yy - shapesize + 10),
                          fill=None,
                          width=3)
            elif shape == 'rectangle':
                draw.rectangle((dx - shapesize, dy - shapesize, dx + shapesize,
                                dy + shapesize),
                               fill=colores)  # fill=None)
            else:
                print 'Shape not found!. It will not be overlaid...'

        if extra == None:
            label = 'ID:814%i' % (iid)
        else:
            uno = int(int(extra) + iid)
            label = 'ID:%i' % (uno)
        draw.text((xx - (shapesize / 2.), ny - yy - (shapesize / 2.) - 15),
                  label,
                  fill=(255, 255, 1))

    if save == 'yes':
        if outfile != 'None':
            stamp.save(outfile)
        else:
            stamp.save('imcutoff.png')
예제 #17
0
# Creating the file where to save the data.
file_out_name = final_path + 'psf.Rband_centeref.cat'
file_out = open(file_out_name, 'w')
file_out.write('# x y fwhm \n')

# Starting the game...
for sss in range(n_cats):
    field = os.path.basename(cats[sss])[9:-15]
    master_cat = root_to_cats + 'STRIPE82-%s_Photometry.cat' % (field)  ##
    print 'reading catalog %i out of %i ' % (sss + 1, n_cats)
    x, y, fwhm, mr = U.get_data(master_cat, (3, 4, 8, 84))
    fwhm_master, stars = sct.get_seeing_from_data_pro(fwhm, mr)
    # Reading FWHM from Individual R images.
    ind_catalog = root_to_ind + 'sex_STRIPE82-%s_R_swp.cat' % (field)  ##
    fwhm_indiv = U.get_data(ind_catalog, 6)
    x_stars, y_stars, fw_stars = U.multicompress(stars, (x, y, fwhm_indiv))
    center_pos_x = N.less(abs(x_stars - N.mean(x_stars)), 1000.)
    center_pos_y = N.less(abs(y_stars - N.mean(y_stars)), 1000.)
    fw_stars_center = fw_stars[center_pos_x * center_pos_y]
    for ii in range(len(x_stars)):
        linea = '%i  %i ' % (x_stars[ii], y_stars[ii])
        mean_value_center = U.mean_robust(fw_stars_center)
        #valor = (fw_stars[ii]*fw_stars[ii])-(mean_value_center*mean_value_center)
        valor = (fw_stars[ii] - mean_value_center)
        linea += '%.2f \n' % (valor)
        file_out.write(linea)
file_out.close()

if os.path.exists(file_out_name):
    xx, yy, ff = U.get_data(file_out_name, (0, 1, 2))
예제 #18
0
n_models = len(sed_models)

# Reading the S-PLUS/S82 data (spec-z cat)
#bpz_cat = root_to_cats+'bpz/'+'master.STRIPE82_Photometry.m21.bpz'
if splus:
    photo_cat = root_to_cats + 'cat/' + 'master.STRIPE82_Photometry.m21.cat'
    u, du, g, dg, r, dr, i, z, dz, zs = U.get_data(
        photo_cat, (15, 16, 60, 61, 78, 79, 96, 114, 115, 125))
else:
    photo_cat = '/Users/albertomolino/Postdoc/T80S_Pipeline/targets/SDSS_S82/'
    photo_cat += 'catalogues/stripe82_spz_extcorr.cat'
    u, g, r, i, z, zs = U.get_data(photo_cat, (4, 6, 8, 10, 12, 3))

clean_data = N.less_equal(abs(u - g), 5.) * N.less_equal(abs(g - z), 5.)
#clean_data *= N.less_equal(du,0.1) * N.less_equal(dg,0.07) * N.less_equal(dz,0.07)
u, g, r, i, z, zs = U.multicompress(clean_data, (u, g, r, i, z, zs))

# Defining colours
color_x = g - z
color_y = u - g

# Estimating dimensionality
ab_file_example = ab_path + sed_models[0][:-3] + filter_x1 + '.AB'
z_ab = U.get_data(ab_file_example, 0)
nz = len(z_ab)

# Creating matrix where to save the data.
ab_filter_x1 = N.zeros((nz, n_models), float)
ab_filter_x2 = N.zeros((nz, n_models), float)
ab_filter_y1 = N.zeros((nz, n_models), float)
ab_filter_y2 = N.zeros((nz, n_models), float)
예제 #19
0
    nameout2 = fluxcomp[:-15] + 'magzpdist.txt'
    plt.figure(112, figsize=(8, 10.), dpi=80, facecolor='w', edgecolor='k')
    plt.clf()
    mt = B.flux2mag(ft[ii])
    mob = B.flux2mag(fob[ii])
    emob = N.where(
        abs(fob[ii][:]) > 0,
        B.e_frac2mag(efob[ii][:]) / fob[ii][:], -99)
    uno = plt.axes([0.15, .675, 0.75, 0.265])
    dm = mt[g] - mob[g]
    mmo = mo[g]
    emmob = emob[g]
    mtt = mt[g]
    mobb = mob[g]
    g2 = N.less(abs(dm), 0.3)
    dm2, mo2, emo2, mt2, mob2 = U.multicompress(g2,
                                                (dm, mmo, emmob, mtt, mobb))
    a1, a2, a3 = plt.hist(dm[g2], basem_histo, facecolor='grey', alpha=0.2)
    plt.xlim(-0.49, 0.49)
    plt.grid()
    plt.title(filters[ii][:-4], size=20)
    # plt.xlabel('mt-mob',size=20)
    plt.ylabel('Counts', size=20)
    plt.xticks(fontsize=20)
    plt.yticks(fontsize=20)

    dos = plt.axes([0.15, .1, 0.75, 0.57])
    plt.plot(dm2[::10], mo2[::10], 'bo', ms=2, alpha=0.1)
    plt.errorbar(dm2[::10],
                 mo2[::10], [emo2[::10], emo2[::10]],
                 fmt="bo",
                 ms=2,
예제 #20
0
def get_PDZerrDistribution_byTemplates(hdf5file, bpzfile, m_max):
    """
    It returns the error distribution based on PDZs.
---
import splus_s82_hdf5_tools as to
root = '/Users/albertomolino/Postdoc/T80S_Pipeline/Commisioning/'
root += 'S82/Dec2017/splus_cats_NGSL/'
hdf5list = root+'hdf5.list'
bpzlist = root+'bpz/master.STRIPE82_Photometry.m21.bpz.list'
hdf5_files = U.get_str(hdf5list,0)
n_hdf5 = len(hdf5_files)
bpz_files  = U.get_str(bpzlist,0)
n_bpz = len(bpz_files)
for ii in range(n_bpz):
    name = os.path.basename(hdf5_files[ii])
    print name
    try: z,dp,df = to.get_PDZerrDistribution_byTemplates(hdf5_files[ii],bpz_files[ii],19)
    except: print 'Impossible to run on ',name

    """

    plots = 1
    # starting plots if necessary
    if plots:
        plt.figure(12,
                   figsize=(8.5, 10.),
                   dpi=80,
                   facecolor='w',
                   edgecolor='k')

    try:
        ids, zb, zs, mo, tb, odd = U.get_data(bpzfile, (0, 1, 11, 12, 4, 5))
    except:
        ids, zb, zs, mo, tb, odd = U.get_data(bpzfile, (0, 1, 9, 10, 4, 5))
    good = N.less_equal(mo, m_max)
    ids, zb, zs, mo, tb, odd = U.multicompress(good,
                                               (ids, zb, zs, mo, tb, odd))
    ng = len(ids)

    #Readin the PDZs...
    p = h5py.File(hdf5file, mode='r')
    #pdzo = p.get('FullProbability')
    pdz = p.get('Likelihood')
    pdz = pdz[good, :, :]
    zz = p.get('redshift')[:]
    dz = (zz[2] - zz[1]) * 100.
    basez2 = N.arange(-0.2, 0.2, dz)
    basez2b = basez2[:-1] + ((basez2[1] - basez2[0]) / 2.)
    nz = len(basez2)
    res = 1

    # Computing the z error distr. function
    # based on peak values.
    delta_z_peaks = (zb - zs) / (1. + zs)
    a1, a2 = N.histogram(delta_z_peaks, basez2)

    delta_z_pdzs = N.zeros(nz - 1)
    for ii in range(ng):
        pdz_mot = U.sum(pdz[ii, :, :], axis=1)
        pdz_mot_peak = pdz_mot / float(max(pdz_mot))
        # To get rid of long tails in PDFs with low probabilities.
        pdz_mot_peak = N.where(pdz_mot_peak < 1.0e-4, 0., pdz_mot_peak)
        pdz_mot_norm = pdz_mot_peak / float(sum(pdz_mot_peak))
        pdz_mot_norm = N.where(pdz_mot_norm < 0., 0., pdz_mot_norm)
        #pdz_mot_norm  = pdz_mot/float(sum(pdz_mot))
        pdz_mot_norm_resample = U.match_resol(zz - zs[ii], pdz_mot_norm,
                                              basez2b)
        pdz_mot_norm_resample = N.where(pdz_mot_norm_resample < 0., 0.,
                                        pdz_mot_norm_resample)
        delta_z_pdzs += pdz_mot_norm_resample[:]
        """
        if plots:
           plt.clf()
           plt.subplot(121)
           peak_zb_pos = N.argmax(pdz_mot_norm[::res])
           print zz[peak_zb_pos]
           plt.plot(zz[::res]-zs[ii],pdz_mot_norm[::res],'-',lw=5,alpha=0.6)
           #plt.plot(zz[::res]-zz[peak_zb_pos],pdz_mot_norm[::res],'-',lw=5,alpha=0.6)
           plt.grid()
           plt.xlim(-0.2,0.2)
           #plt.ylim(0.001,0.1)
           plt.xlabel('$\delta_{z}$',size=30)
           plt.ylabel('P(z)',size=20,labelpad=+1)
           plt.legend(['R=%.2f''\n''T=%.1f''\n''O=%.1f'%(mo[ii],tb[ii],odd[ii])],loc='upper right')
           plt.title('zb = %.2f, zs = %.2f, dz/1+z = %.2f'%(zb[ii],zs[ii],delta_z_peaks[ii]),size=20)
           plt.subplot(122)
           plt.plot(basez2b,delta_z_pdzs,'k-',lw=5)
           plt.grid()
           plt.xlim(-0.2,0.2)
           #plt.ylim(0.001,0.1)
           plt.xlabel('$\delta_{z}$',size=30)
           plt.ylabel('P(z)',size=20,labelpad=+1)
           pausa = raw_input('press a bottom to continue')
        """

    # New variables to handle data easily.
    # It scales the normalized PDFs by the ng!
    norm_dz_peaks = a1 / float(sum(a1))
    norm_dz_pdfs = delta_z_pdzs / float(sum(delta_z_pdzs))

    if plots:
        plt.figure(11,
                   figsize=(8.5, 10.),
                   dpi=80,
                   facecolor='w',
                   edgecolor='k')
        plt.clf()
        #plt.subplot(212)
        plt.plot(basez2b, norm_dz_peaks, 'b-', lw=8, alpha=0.6)
        plt.plot(basez2b, norm_dz_pdfs, 'r-', lw=5, alpha=0.9)
        plt.grid()
        plt.xlim(-0.2, 0.2)
        plt.ylabel('P(z)', size=20, labelpad=+1)
        plt.legend(['peaks', 'pdfs'], loc='upper left', fontsize=20)
        plt.xlabel('$\delta_{z}$', size=30)
        plot_filename = hdf5file[:-4] + 'deltaz.mmax%.2fAB.png' % (m_max)
        plt.savefig(plot_filename, dpi=80)

    # Saving data into a file.
    output_filename = hdf5file[:-4] + 'deltaz.mmax%.2fAB.mat' % (m_max)
    U.put_data(output_filename, (basez2b, norm_dz_peaks, norm_dz_pdfs),
               'z dz_peak dz_PDFs')

    return basez2b, norm_dz_peaks, norm_dz_pdfs
예제 #21
0
def purging_dobledetections(cat1,cat2):
    """

import alhambra_overlap
from alhambra_overlap import *
cat1 = '/Volumes/amb22/catalogos/reduction_v4e/f02/f02p02_colorproext_1_ISO.cat'
cat2 = '/Volumes/amb22/catalogos/reduction_v4e/f02/f02p01_colorproext_1_ISO.cat'
purging_dobledetections(cat1,cat2)    
    
    """
    
    id1,ra1,dec1,x1,y1,s2n1 = U.get_data(cat1,(0,1,2,3,4,14))
    id2,ra2,dec2,x2,y2,s2n2 = U.get_data(cat2,(0,1,2,3,4,14))
    ne1 = len(id1)
    ne2 = len(id2)
    g1 = U.greater_equal(ra1,min(ra2))
    g2 = U.less_equal(ra2,max(ra1))
    id1r,ra1r,dec1r,x1r,y1r,s2n1r = U.multicompress(g1,(id1,ra1,dec1,x1,y1,s2n1))
    id2r,ra2r,dec2r,x2r,y2r,s2n2r = U.multicompress(g2,(id2,ra2,dec2,x2,y2,s2n2))

    dim1 = len(id1r)
    dim2 = len(id2r)
    print 'dim1,dim2',dim1,dim2
    if dim1>0 and dim2>0:
       print 'Matching samples....'
       pepe = matching_vects_ddet(id1r,ra1r,dec1r,id2r,ra2r,dec2r,0.000312)   # We use now X,Y instead RA,Dec
       # Purging null elements
       matchidcol = pepe[:,0].astype(int)
       good_det1 = U.greater(matchidcol,0)  # Excluding 0's (non matched detections)
       matchidcol = U.compress(good_det1,(matchidcol))
       matchidsp = pepe[:,1].astype(int)
       good_det2 = U.greater(matchidsp,0) # Excluding 0's (non matched detections)
       matchidsp = U.compress(good_det2,(matchidsp))
       if len(matchidcol) == len(matchidsp) and len(matchidcol) >0 :
           newdim = len(matchidsp)
           print 'Dimension of matching',newdim
           idr1  = U.zeros(newdim)
           idr2  = U.zeros(newdim)
           s2nr1 = U.zeros(newdim)
           s2nr2 = U.zeros(newdim)
           for ii in range(newdim):
               idr1index = ap.id2pos(id1r,matchidcol[ii]) 
               idr2index = ap.id2pos(id2r,matchidsp[ii]) 
               idr1[ii]  = id1r[idr1index]
               s2nr1[ii] = s2n1r[idr1index]               
               idr2[ii]  = id2r[idr2index] 
               s2nr2[ii] = s2n2r[idr2index]
               
           # Select/Purge detections according to its S/N
           marcador1 = U.zeros(newdim)
           marcador2 = U.zeros(newdim)
           for ss in range(newdim):
               cociente = s2nr1[ss]/s2nr2[ss]  
               if cociente >= 1.: marcador1[ss] = 1.
               else: marcador2[ss] = 1.     
                   
           cond1 = U.less(marcador1,1)
           cond2 = U.less(marcador2,1)
           idr1b = U.compress(cond1,idr1)
           dim1rr = len(idr1b)
           idr2b = U.compress(cond2,idr2)
           dim2rr = len(idr2b)
           print ''
           print 'Number of detections to be removed from cat1: ', dim1rr
           print 'Number of detections to be removed from cat2: ', dim2rr
           print ''
           
           # Two new IDs (finalid1 & finalid2) are generated with 
           # the final elements to be included in the output catalog.
           finalid1 = U.zeros((ne1-dim1rr))
           finalid2 = U.zeros((ne2-dim2rr))
           kk1 = 0
           for hh1 in range(ne1):
               if id1[hh1] not in idr1b:
                  finalid1[kk1] = id1[hh1]
                  kk1 += 1
                  
           print 'kk1',kk1
           
           kk2 = 0       
           for hh2 in range(ne2):
               if id2[hh2] not in idr2b:
                  if kk2 <= (ne2-dim2rr-1): 
                     finalid2[kk2] = id2[hh2]
                     kk2+=1
                  
           print 'kk2',kk2       
                  
           # A new smaller catalog will be created containing specz info as an extra column.
           outcat1 = ap.decapfile(cat1)+'.wo2detect.cat'
           outcat2 = ap.decapfile(cat2)+'.wo2detect.cat'
           print 'outcat1',outcat1
           print 'outcat2',outcat2
           ap.select_rows_bylist(cat1,finalid1,outcat1)
           ap.select_rows_bylist(cat2,finalid2,outcat2)
           
           
    else:
       print 'No common sources in betwen the catalogs'
예제 #22
0
coordinates = root_to_cats + 'coo/'
if not os.path.exists(coordinates):
    cmd = '/bin/mkdir %s' % (coordinates)
    os.system(cmd)

# Creating (if necessary) new files containing IDs,RA,DEC from each S-PLUS/S82 tile.
for ii in range(n_cats):
    field = os.path.basename(cats[ii])[9:-15]
    new_radec_filename = coordinates + 'STRIPE82_%s_radec.coo' % (field)
    if not os.path.exists(new_radec_filename):
        new_file = open(new_radec_filename, 'w')
        print 'Reading file %i out of %i' % (ii + 1, n_cats)
        ids, ra, dec, x, y = U.get_data(cats[ii], (0, 1, 2, 3, 4))
        good_xy = N.greater_equal(x, 2500) * N.less_equal(x, 7000)
        good_xy *= N.greater_equal(y, 2500) * N.less_equal(y, 7000)
        ids, ra, dec, x, y = U.multicompress(good_xy, (ids, ra, dec, x, y))
        n_gals = len(ids)
        for ss in range(n_gals):
            #linea = '%s  %i  %.7f  %.7f  \n'%(field,ids[ss],ra[ss],dec[ss])
            linea = '%s  %i  %.7f  %.7f  %.1f  %.1f  \n' % (
                field, ids[ss], ra[ss], dec[ss], x[ss], y[ss])
            new_file.write(linea)
        new_file.close()

# It performs a match between S-PLUS coo and SDSS coo to
for ggg in range(n_cats):
    radec_match_cat = coordinates + os.path.basename(cats[ggg][:-3] +
                                                     'radec.match2sdss.cat')
    if not os.path.exists(radec_match_cat):
        print 'Cross-matching file %i out of %i' % (ggg + 1, n_cats)
        field = os.path.basename(cats[ggg])[9:-15]
예제 #23
0
cats_names = U.get_str(root2cats + lista_cats, 0)
n_cats = len(cats_names)

out_filename = final_root + 'countsxamplif'

if not os.path.exists(out_filename):
    # Variables where to store the information
    counts = N.zeros((2, 8), float)  # 2x8 amplifiers.

    for ii in range(n_cats):
        catalog = cats_names[ii]
        print 'Reading file: ', catalog.split('/')[-1]
        x, y, mr = U.get_data(catalog, (3, 4, 78))
        good_sample = N.greater_equal(mr, 14)
        good_sample *= N.less_equal(mr, 20)
        x, y, mr = U.multicompress(good_sample, (x, y, mr))

        #Defining edges
        min_x = min(x)
        max_x = max(x)
        min_y = min(y)
        max_y = max(y)
        dx = (max_x - min_x) / 8.
        dy = (max_y - min_y) / 2.

        #print 'dx: ',dx
        #print 'dy: ',dy

        # Starting loop
        for jj in range(2):
            for hh in range(8):
예제 #24
0
def match_spz_sample(cluster): # TO CHECK
       
    finalcat1 = catalog2[:-3]+'CLASH.redu.cat'
    finalcat2 = catalog2[:-3]+'nada.cat'
    # if not os.path.exists(finalcat1):
    if not os.path.exists(finalcat2):
        # print 'Final catalog does not exist yet.'                           
        if os.path.exists(catalog1) and os.path.exists(catalog2):
            # It matches up detections to its Spectroscopic Sample.
            # Reading specz catalog
            print 'Reading info1 before matching...'
            speczsample = catalog1
            idsp,xsp,ysp = U.get_data(speczsample,(0,3,4))
            goodsp = U.greater_equal(xsp,1500) * U.less_equal(xsp,3500)
            goodsp *= U.greater_equal(ysp,1500) * U.less_equal(ysp,3500)
            idsp,xsp,ysp = U.multicompress(goodsp,(idsp,xsp,ysp))
            print 'New dimension for specz catalogue: ',len(xsp)
            # rasp,decsp,xsp,ysp,zsp = get_data(speczsample,(0,1,2,3,4))
            # xsp,ysp,zsp = get_data(speczsample,(1,2,7))
            ####### idsp = U.arange(len(xsp))+1 
            # idsp = arange(len(rasp))+1
            # Reading ColorPro catalog
            print 'Reading info2 before matching...'
            idcol,xcol,ycol = U.get_data(catalog2,(0,3,4))
            print 'Dimension for input catalogue before compressing: ',len(idcol)
            gsp = U.greater_equal(xcol,1500) * U.less_equal(xcol,3500)
            gsp *= U.greater_equal(ycol,1500) * U.less_equal(ycol,3500)
            idcol,xcol,ycol = U.multicompress(gsp,(idcol,xcol,ycol))
            print 'Dimension for input catalogue after compressing: ',len(idcol)
            # Using "matching_vects" to match up samples...
            print 'Matching samples....'
            pepe = CT.matching_vects(idcol,xcol,ycol,idsp,xsp,ysp,1.1)   # We use now X,Y instead RA,Dec
            # Compressing matches for ColorPro...
            print 'Compressing matches...'
            matchidcol = pepe[:,0].astype(int)
            gdet_col = U.greater(matchidcol,0)  # Excluding 0's (non matched detections)
            matchidcol = U.compress(gdet_col,(matchidcol))
            # Compressing matches for Spectroscopic...
            matchidsp = pepe[:,1].astype(int)
            gdet_spz = U.greater(matchidsp,0)   # Excluding 0's (non matched detections)
            matchidsp = U.compress(gdet_spz,(matchidsp))
            print 'len(idcol)',len(idcol)
            print 'len(idsp)',len(idsp)
            if len(matchidcol) == len(matchidsp):
                print 'Creating idredu & zsredu '
                print 'Dimension of matchidsp ',len(matchidsp)
                idredu = U.zeros(len(matchidsp))
                idspredu = U.zeros(len(matchidsp))
                for ii in range(len(matchidsp)):
                    colindex = A.id2pos(idcol,matchidcol[ii]) # Position for Index idcol
                    spzindex = A.id2pos(idsp,matchidsp[ii])   # Position for Index idsp
                    idredu[ii] = idcol[colindex]  # ID for ColorPro
                    idspredu[ii] = idsp[spzindex]    # Specz for Specz
                    
                # A new smaller catalog will be created containing specz info as an extra column.
                print 'Selecting by rows... ' 
                finalcat1 = catalog2[:-3]+'UDF.redu.cat'
                finalcat2 = catalog2[:-3]+'CLASH.redu.cat'
                U.put_data(catalog2[:-3]+'idsfrommatch.txt',(idredu,idspredu))
                A.select_rows_bylist_sorted(catalog1,idspredu,finalcat1)
                A.select_rows_bylist_sorted(catalog2,idredu,finalcat2)               
예제 #25
0
        ra_p, dec_p = U.get_data(cats_names[ggg], (1, 2))
        x_p, y_p = U.get_data(cats_names[ggg], (3, 4))
        ra_s, dec_s, z_s, r_s = U.get_data(sdss_s82_spz_cat, (0, 1, 2, 3))

        # Let's remove edges.
        xmin = np.min(x_p) + 300
        xmax = np.max(x_p) - 300
        ymin = np.min(y_p) + 300
        ymax = np.max(y_p) - 300
        good_photo = np.greater_equal(x_p, xmin)
        good_photo *= np.less_equal(x_p, xmax)
        good_photo *= np.greater_equal(x_p, ymin)
        good_photo *= np.less_equal(x_p, ymax)
        # Compress the photometric sample.
        ra_p, dec_p = U.multicompress(good_photo, (ra_p, dec_p))

        # Common area
        area_ra = np.greater_equal(ra_s, min(ra_p))
        area_ra *= np.less_equal(ra_s, max(ra_p))
        area_dec = np.greater_equal(dec_s, min(dec_p))
        area_dec *= np.less_equal(dec_s, max(dec_p))
        good_area = area_ra * area_dec
        #Compressing
        ra_s, dec_s, z_s, r_s = U.multicompress(good_area,
                                                (ra_s, dec_s, z_s, r_s))
        # Saving
        redu_spec = cats_names[ggg][:-3] + 'spez.areacommon.cat'
        U.put_data(redu_spec, (ra_s, dec_s, z_s, r_s), '# ra dec zs mr')

        # Here we find the missing galaxies
예제 #26
0
plt.clf()

bin_labels = [0, 6]
base_x = N.linspace(-3.0, 3.0, 300)

for ss in range(12):
    plt.subplot(2, 6, ss + 1)
    if ss == 0:
        g_1 = U.greater_equal(mag1, mmin) * U.less_equal(mag1, mmax + 3)
    else:
        g_1 = U.greater_equal(mag1, mmin) * U.less_equal(mag1, mmax)
    dm2_1 = B.flux2mag(fob1[ss][g_1]) - B.flux2mag(ft1[ss][g_1])
    emob = B.e_frac2mag(efob1[ss][g_1]) / fob1[ss][g_1]
    sense_values_1 = N.less(abs(dm2_1), 5.)
    sense_values_1 *= N.less(abs(emob), 1.)
    dm2_1, emob2 = U.multicompress(sense_values_1, (dm2_1, emob))
    #dm2_1 = N.compress(sense_values_1,dm2_1)

    valor = base_x * 0.
    for ii in range(len(emob2)):
        temporal = A.gaussian(base_x, emob2[ss], 0.00, 1)
        valor += temporal * temporal
    valor = N.sqrt(valor)

    if ss in [0, 1, 2, 3, 4]:
        basem = basem_low
        #basem2 = basem2_low
    elif ss in [5, 6]:
        basem = basem_med
    else:
        basem = basem_high
예제 #27
0
for ss in range(n_total):
    linea = '%i  %i  %.2f  %i  %.2f  %.2f  \n' % (
        x_global[ss], y_global[ss], s2n_global[ss], Flag_global[ss],
        fw_global[ss], mr_global[ss])
    file_out.write(linea)
file_out.close()

#########

#psfcat = final_path+'psfnew.cat'
psfcat = final_path + 'psfnew_dividedbyseeing.cat'
x, y, s2n, Flag, fw, mr = U.get_data(psfcat, (0, 1, 2, 3, 4, 5))
good = N.greater(s2n, 100) * N.greater(mr, 13.5) * N.less(mr, 18.5) * N.less(
    Flag, 1)
#good *= N.greater(fw,0.92)*N.less(fw,1.05)
x, y, s2n, Flag, fw, mr = U.multicompress(good, (x, y, s2n, Flag, fw, mr))
rad = N.sqrt((x - N.mean(x)) * (x - N.mean(x)) + (y - N.mean(y)) *
             (y - N.mean(y)))
rad2 = rad * 0.55 / 60.
plt.figure(1)
mat, v1, v2 = R.CC_numberdensity_contour_rangefixed(fw, rad2, 0.1, 0, 1, 0)
base = N.arange(0.8, 1.2, 0.1)

plt.figure(2, figsize=(17, 10), dpi=70, facecolor='w', edgecolor='k')
plt.clf()

dos = plt.axes([0.35, 0.215, 0.2, 0.7])
a1, a2, a3 = plt.hist(fw2,
                      base,
                      facecolor='blue',
                      alpha=0.15,
예제 #28
0
m_max = 19.
delta_m = 0.2
base_m = N.arange(m_min, m_max + delta_m, delta_m)
base_m2 = base_m[:-1] + ((base_m[1] - base_m[0]) / 2.)
z_min = 0.005
z_max = 0.4
delta_z = 0.01
base_z = N.arange(z_min, z_max + delta_z, delta_z)
base_z2 = base_z[:-1] + ((base_z[1] - base_z[0]) / 2.)

#All bands
ruta = '/Users/albertomolino/Postdoc/T80S_Pipeline/Commisioning/S82/Dec2017/splus_cats_NGSL/'
b0 = ruta + 'COSMOSeB11new_recal/master.STRIPE82_Photometry.m21_COSMOSeB11new_recal_redu.bpz'
zb0, zs0, m0, chi0, od0, tb0 = U.get_data(b0, (1, 9, 10, 8, 5, 4))
good0 = N.greater_equal(od0, 0.1) * N.less(chi0, 10)
zb0, zs0, m0, tb0 = U.multicompress(good0, (zb0, zs0, m0, tb0))
dz0 = (zb0 - zs0) / (1. + zs0)
valor0 = N.zeros(len(base_m) - 1)
for ii in range(len(valor0)):
    good = N.greater_equal(m0, base_m[ii])
    good *= N.less_equal(m0, base_m[ii + 1])
    valor0[ii] = U.std_mad(dz0[good])
#valor0[2]=0.0039
valor0[1] = 0.0052

#5-bands
b1 = ruta + 'using_5sdss_bands/master.STRIPE82_Photometry.m21_COSMOSeB11new_recal_5bands.bpz'
zb1, zs1, m1, chi1, od1, tb1 = U.get_data(b1, (1, 9, 10, 8, 5, 4))
good1 = N.greater_equal(od1, 0.1) * N.less(chi1, 10)
zb1, zs1, m1, tb1 = U.multicompress(good1, (zb1, zs1, m1, tb1))
dz1 = (zb1 - zs1) / (1. + zs1)
filter_y2 = 'g_SDSS'

#Models
sed_models = U.get_str(sed_path+sed_lib,0)
n_models = len(sed_models)

# SDSS/S82 Contours
sdss_s82_path = '/Users/albertomolino/Postdoc/T80S_Pipeline/targets/SDSS_S82/'
sdss_s82_cat = sdss_s82_path+'catalogues/stripe82_spz_extcorr.cat'

u_mag,g_mag,r_mag,i_mag,z_mag,z_spec = U.get_data(sdss_s82_cat,(4,6,8,10,12,3))
good_sample = N.less(abs(u_mag-g_mag),5.)
good_sample*= N.less(abs(g_mag-z_mag),5.)
good_sample*= N.greater_equal(z_spec,z_min)
good_sample*= N.less_equal(z_spec,z_max)
u_mag,g_mag,r_mag  = U.multicompress(good_sample,(u_mag,g_mag,r_mag))
i_mag,z_mag,z_spec = U.multicompress(good_sample,(i_mag,z_mag,z_spec))

#Reduce sample size!
sdss_res = 3
u_mag,g_mag,r_mag = u_mag[::sdss_res],g_mag[::sdss_res],r_mag[::sdss_res]
i_mag,z_mag,z_spec = i_mag[::sdss_res],z_mag[::sdss_res],z_spec[::sdss_res]

#Reading contours
path_contour_map = sdss_s82_path+'catalogues/contours/'
x_cmap,y_cmap = U.get_data(path_contour_map+'S82cont2.dat',(0,1))
z_cmap = U.get_2Darray(path_contour_map+'S82cont.dat')


# Estimating dimensionality
ab_file_example = ab_path+sed_models[0][:-3]+filter_x1+'.AB'
예제 #30
0
    'green',
    'red',
    'purple',
]

zb_all, odds_all, mo_all, chi2_all, spty_all = U.get_data(
    master_bpz_auto, (1, 5, 9, 8, 4))
zb_spz, zs, odds_spz, mo_spz, chi2_spz, spty_spz = U.get_data(
    master_spz_auto, (1, 9, 5, 10, 8, 4))

good_sptype_all = N.greater_equal(spty_all, t_min) * N.less_equal(
    spty_all, t_max)
good_sptype_spz = N.greater_equal(spty_spz, t_min) * N.less_equal(
    spty_spz, t_max)

zb_all, odds_all, mo_all, chi2_all = U.multicompress(
    good_sptype_all, (zb_all, odds_all, mo_all, chi2_all))
zb_spz, zs, odds_spz, mo_spz, chi2_spz = U.multicompress(
    good_sptype_spz, (zb_spz, zs, odds_spz, mo_spz, chi2_spz))

valor_auto_m_spz = N.zeros((len(base_o), (len(base_m) - 1)), float)
valor_auto_m_all = N.zeros((len(base_o), (len(base_m) - 1)), float)
valor_auto_z_spz = N.zeros((len(base_o), (len(base_z) - 1)), float)
#valor_auto_z_all = N.zeros((len(base_o),(len(base_z)-1)),float)

plt.figure(1, figsize=(9, 8), dpi=80, facecolor='w', edgecolor='k')
plt.clf()
plt.subplot(121)
# As a function of AB
for jj in range(len(base_o)):
    for ii in range(len(base_m) - 1):
        #all