Exemple #1
0
def replacing_nans_catalogs(catalog, newname):
    """

    vars = []
    evars = []
    data = C.loaddata(catalog)
    mags = data[:,vars]
    emags = data[:,evars]

    """

    vars = N.array([
        15, 18, 21, 24, 27, 30, 33, 36, 39, 42, 45, 48, 51, 54, 57, 60, 63, 66,
        69, 72, 75, 78, 81, 84, 87, 90, 93, 96, 99, 102, 105, 108, 111
    ])
    evars = vars[:] + 1
    s2n_vars = vars[:] + 2

    data = C.loaddata(catalog)  # Loading the whole catalog content.
    head = C.loadheader(catalog)  # Loading the original header.
    mm = data[:, vars]
    em = data[:, evars]
    s2n = data[:, s2n_vars]

    nl = len(mm[:,
                0])  # nl is the number of detections inside every single band.
    nf = len(mm[0, :])  # nf is the number of bands inside the catalog.
    newmag = U.zeros((nl, nf),
                     float)  # Where the new photo errors will be saved.
    errmag = U.zeros((nl, nf),
                     float)  # Where the new photo errors will be saved.
    new_s2n = U.zeros((nl, nf), float)

    for jj in range(len(vars)):
        for ii in range(nl):
            if abs(mm[ii, jj]) > 60.:
                newmag[ii, jj] = -99.0
                errmag[ii, jj] = 0.00
                new_s2n[ii, jj] = -1
            elif s2n[ii, jj] < 0.00001:
                new_s2n[ii, jj] = 0.
            else:
                newmag[ii, jj] = mm[ii, jj]
                errmag[ii, jj] = em[ii, jj]
                new_s2n[ii, jj] = s2n[ii, jj]

    # New values of mags error overwrites now the original data.
    data[:, vars] = newmag[:, U.arange(nf)]
    data[:, evars] = errmag[:, U.arange(nf)]
    data[:, s2n_vars] = new_s2n[:, U.arange(nf)]
    C.savedata(data, newname, dir="",
               header=head)  # Saving & creating a new catalog.
def replace_upplimits(cluster):
    """

    """
    data = C.loaddata(catalog)      # Loading the whole catalog content.
    head = C.loadheader(catalog)    # Loading the original header.
    m = get_magnitudes(catalog,columns)
    em = get_errmagnitudes(catalog,columns)
    filters = bpt.get_filter_list(columns)
    
    nl = len(m[:,0])    # nl is the number of detections inside every single band.
    nf = len(m[0,:])    # nf is the number of bands inside the catalog. 
    errmag = U.zeros((nl,nf),float)  # Where the new photo errors will be saved. 

    for jj in range(nf):
        # maglim = get_limitingmagnitude(m[:,jj],em[:,jj],3.,0.25)
        maglim = bpt.get_limitingmagnitude(m[:,jj],em[:,jj],1.,0.25)
        print 'Limiting Magnitude for filter %s: %.3f'%(filters[jj],maglim)
        for ii in range(nl):
            # print 'm[%i,%i]'%(ii,jj),m[ii,jj]
            if m[ii,jj] != 99. :         
               errmag[ii,jj] = em[ii,jj]    
            else:
               # print 'UNDETECTED OBJECT. SAVING ITS LIMITING MAGNITUDE !!'
               errmag[ii,jj] = maglim
    
    
    # New values of mags error overwrites now the original data.
    vars,evars,posref,zpe,zpo = get_usefulcolumns(columns)
    data[:,evars] = errmag[:,U.arange(nf)]
    C.savedata(data,finalcatalog, dir="",header=head) # Saving & creating a new catalog.
Exemple #3
0
def arrange_alhambraHDF5list_byfield(lista, save='yes'):
    """
    It creates a global P(z)'s
---------------------------------
import alhambrahdf5
from alhambrahdf5 import *
mat = arrange_alhambraHDF5list_byfield(lista)

    """

    ims = U.get_str(lista, 0)
    basez = U.arange(0.001, 7.001, 0.001)
    dim = len(ims)
    for ii in range(dim):
        print '%i/%i' % (ii + 1, dim)
        infile = ims[ii]
        data = U.get_data(infile, 0)
        if ii < 1: datos = data
        else: datos += data

    if save == 'yes':
        finaldata = decapfile(lista) + '.global.mat'
        U.put_data(finaldata, (datos, basez))

    return datos
Exemple #4
0
def slicing_hdf5alhambra(infile, bpz):
    """
    This routine generates subsample of P(z,T) 
    applying a magnitude-based criteria.
    ------------------------------------------ 

import alhambrahdf5
from alhambrahdf5 import *
slicing_hdf5alhambra(infile,bpz)    
    
    """

    ruta = '/Volumes/amb22/catalogos/reduction_v4f/globalPDZ/'
    basem = U.arange(19., 25.5, 0.5)
    ns = len(basem)
    mo, stflag = U.get_data(bpz, (64, 71))
    for ii in range(ns - 1):
        cond = U.greater_equal(mo, basem[ii]) * U.less_equal(mo, basem[ii + 1])
        cond *= U.less_equal(stflag, 0.8)
        infile2 = infile.split('/')[-1:][0]
        finalname = ruta + infile2 + '.%.1fm%.1f.mat' % (basem[ii],
                                                         basem[ii + 1])
        if not os.path.exists(finalname):
            print 'generating new file %s' % (finalname)
            mat = AH.alhambra_get2Dmatrix_HDF5(infile, cond, finalname, 1)
Exemple #5
0
def replace_photo_uncert(catalog, columns):
    """

    """
    data = C.loaddata(catalog)  # Loading the whole catalog content.
    head = C.loadheader(catalog)  # Loading the original header.
    mm = A.get_magnitudes(catalog, columns)
    em = A.get_errmagnitudes(catalog, columns)
    filters = B.get_filter_list(columns)

    nl = len(mm[:,
                0])  # nl is the number of detections inside every single band.
    nf = len(mm[0, :])  # nf is the number of bands inside the catalog.
    errmag = U.zeros((nl, nf),
                     float)  # Where the new photo errors will be saved.

    for jj in range(nf):
        maglim = B.get_limitingmagnitude(mm[:, jj], em[:, jj], 1., 0.25)
        print 'Limiting Magnitude for filter %s: %.3f' % (filters[jj], maglim)
        for ii in range(nl):
            if mm[ii, jj] == -99.:
                errmag[ii, jj] = 0.00
            elif mm[ii, jj] == 99.:
                errmag[ii, jj] = maglim
            else:
                errmag[ii, jj] = em[ii, jj]

    # New values of mags error overwrites now the original data.
    vars, evars, posref, zpe, zpo = get_usefulcolumns(columns)
    data[:, evars] = errmag[:, U.arange(nf)]
    finalcatalog = catalog[:-3] + 'upp.cat'
    C.savedata(data, finalcatalog, dir="",
               header=head)  # Saving & creating a new catalog.
def replace_kerttu_errmags(catalog, columns, finalcatalog):
    """

import alhambra_kerttu_fixerrmags as AFM
catalog = '/Users/albertomolino/doctorado/articulos/ALHAMBRA/kerttu/test_photoz/kerttu.cat'
columns = '/Users/albertomolino/doctorado/articulos/ALHAMBRA/kerttu/test_photoz/kerttu.columns'
finalcatalog = '/Users/albertomolino/doctorado/articulos/ALHAMBRA/kerttu/test_photoz/kerttu3.cat'
AFM.replace_kerttu_errmag(catalog,columns,finalcatalog)
------

    """

    data = C.loaddata(catalog)  # Loading the whole catalog content.
    head = C.loadheader(catalog)  # Loading the original header.
    mm = A.get_magnitudes(catalog, columns)
    em = A.get_errmagnitudes(catalog, columns)
    filters = B.get_filter_list(columns)

    nl = len(mm[:,
                0])  # nl is the number of detections inside every single band.
    nf = len(mm[0, :])  # nf is the number of bands inside the catalog.
    errmag = U.zeros((nl, nf),
                     float)  # Where the new photo errors will be saved.

    for jj in range(nf):
        for ii in range(nl):
            if mm[ii, jj] == -99.: errmag[ii, jj] = 0.00
            else: errmag[ii, jj] = em[ii, jj]

    # New values of mags error overwrites now the original data.
    vars, evars, posref, zpe, zpo = A.get_usefulcolumns(columns)
    data[:, evars] = errmag[:, U.arange(nf)]
    C.savedata(data, finalcatalog, dir="",
               header=head)  # Saving & creating a new catalog.
Exemple #7
0
def pepe(num, pdz):
    basez = U.arange(0.001, 7.001, 0.001)
    b = basez * 0.
    for ss in range(81):
        a = pdz[num, :, ss]
        if a.sum() > 1.0e-30:
            b += a / sum(a)
    plt.plot(basez, b / b.sum(), '-', lw=2)
    xlim(0., 3.)
Exemple #8
0
def script_arrange_alhambraHDF5list_bymags(field):
    """

import alhambrahdf5
from alhambrahdf5 import *
field=2field)

    """

    base = U.arange(19., 25.5, 0.5)
    dim = len(base) - 1
    root = '/Volumes/amb22/catalogos/reduction_v4f/globalPDZ/'
    for ii in range(dim):
        lista = '%salhambra.%.1fm%.1f.pdz.list' % (root, base[ii],
                                                   base[ii + 1])
        if os.path.exists(lista):
            print 'Reading %s...' % (lista)
            paco = arrange_alhambraHDF5list_byfield(lista)
        else:
            print '%s does not exists!' % (lista)
Exemple #9
0
def get_alhambraHDF5list_bymags(field):
    """
import alhambrahdf5
from alhambrahdf5 import *
field=2
get_alhambraHDF5list_bymags(field)
    
    """

    root = '/Volumes/amb22/catalogos/reduction_v4f/globalPDZ/'
    file2 = open(root + 'alhambra.cmds.txt', 'w')
    base = U.arange(19., 25.5, 0.5)
    dim = len(base) - 1
    for ii in range(dim):
        linea = ''
        linea = 'ls %sF0*/f0*%.1fm%.1f.global.mat > %salhambra.%.1fm%.1f.pdz.list' % (
            root, base[ii], base[ii + 1], root, base[ii], base[ii + 1])
        linea += '\n'
        file2.write(linea)

    file2.close()
Exemple #10
0
def get_alhambraHDF5list_byfield(field):
    """
import alhambrahdf5
from alhambrahdf5 import *
field=7
get_alhambraHDF5list_byfield(field)
    
    """

    root = '/Volumes/amb22/catalogos/reduction_v4f/globalPDZ/F0%i/' % (field)
    file2 = open(root + 'F0%i.cmds.txt' % (field), 'w')
    base = U.arange(19., 25.5, 0.5)
    dim = len(base) - 1
    for ii in range(dim):
        linea = ''
        linea = 'ls %sf0%ip0*hdf5*%.1fm%.1f.mat > %sf0%ihdf5.%.1fm%.1f.list' % (
            root, field, base[ii], base[ii + 1], root, field, base[ii],
            base[ii + 1])
        linea += '\n'
        file2.write(linea)

    file2.close()
Exemple #11
0
def appending_ids2catalogues(field, pointing, ccd):
    """

import alhambra_3arcs as A3
A3.appending_ids2catalogues(2,1,1)


    """

    catalhambra = root + 'f0%i/alhambra.F0%iP0%iC0%i.ColorProBPZ.cat' % (
        field, field, pointing, ccd)
    idalh = U.get_str(catalhambra, 0)
    idalh2 = U.arange(len(idalh)) + 1
    xalh, yalh = U.get_data(catalhambra, (6, 7))

    cat3arcs = finalroot + 'f0%i/alhambra.f0%ip0%ic0%i.3arcs.cat' % (
        field, field, pointing, ccd)
    id3arcs, x3arcs, y3arcs = U.get_data(cat3arcs, (0, 3, 4))
    print len(id3arcs)

    matchfile = cat3arcs[:-3] + 'idsfrommatch.txt'
    if not os.path.exists(matchfile):
        idcol = idalh2
        xcol = xalh
        ycol = yalh
        idsp = id3arcs
        xsp = x3arcs
        ysp = y3arcs

        pepe = CT.matching_vects(idcol, xcol, ycol, idsp, xsp, ysp, 5)

        # Compressing matches for ColorPro...
        print 'Compressing matches...'
        matchidcol = pepe[:, 0].astype(int)
        gdet_col = U.greater(matchidcol,
                             0)  # Excluding 0's (non matched detections)
        matchidcol = U.compress(gdet_col, (matchidcol))
        # Compressing matches for Spectroscopic...
        matchidsp = pepe[:, 1].astype(int)
        gdet_spz = U.greater(matchidsp,
                             0)  # Excluding 0's (non matched detections)
        matchidsp = U.compress(gdet_spz, (matchidsp))
        print 'len(idcol)', len(idcol)
        print 'len(idsp)', len(idsp)
        if len(matchidcol) == len(matchidsp):
            print 'Creating idredu & zsredu '
            print 'Dimension of matchidsp ', len(matchidsp)
            idredu = U.zeros(len(matchidsp))
            idspredu = U.zeros(len(matchidsp))
            for ii in range(len(matchidsp)):
                colindex = A.id2pos(idcol,
                                    matchidcol[ii])  # Position for Index idcol
                spzindex = A.id2pos(idsp,
                                    matchidsp[ii])  # Position for Index idsp
                idredu[ii] = idcol[colindex]  # ID for ColorPro
                idspredu[ii] = idsp[spzindex]  # Specz for Specz

            matchfile = cat3arcs[:-3] + 'idsfrommatch.txt'
            U.put_data(matchfile, (idredu, idspredu))

    if os.path.exists(matchfile):
        pepa = open(matchfile[:-3] + 'bis.cat', 'w')
        idredu, idspredu = U.get_data(matchfile, (0, 1))
        i11 = idredu.astype(int) - 1
        i22 = idspredu.astype(int)
        lista = []
        for ii in range(len(i11)):
            lista.append(idalh[i11[ii]])
            pepa.write('%s  %s  \n' % (idalh[i11[ii]], i22[ii]))
        pepa.close()

        finalfinal = cat3arcs[:-3] + 'final.cat'
        if os.path.exists(finalfinal): A.deletefile(finalfinal)
        if not os.path.exists(finalfinal):
            print 'Preparing ', finalfinal
            idsa = U.get_str(matchfile[:-3] + 'bis.cat', 0)
            append_IDs2_3arcs_catalogues(cat3arcs, idsa)
Exemple #12
0
def figura33(lista):
    """

    I'm using the stellar classification from the version_e.
    ----
import alhambra_completeness as alhc
lista = '/Volumes/amb22/catalogos/reduction_v4d/globalcats/lista.list'
alhc.figura33(lista)

    """
    blue = 0
    red = 1
    cats = U.get_str(lista, 0)
    cats2 = U.get_str(lista, 1)
    nc = len(cats)
    dx = 0.2
    dy = 0.4
    nxbins = 4
    nybins = 2
    ods = 0.05
    mmin = 16.0
    mmax = 23.75
    zbmin = 0.0001
    zbmax = 1.4
    Mmin = -24
    Mmax = -17
    if red:
        Tbmin = 1  # 7.
        Tbmax = 5  # 11.
        resolmag = 0.2  # 0.2
        resolz = 0.05
    if blue:
        Tbmin = 7.
        Tbmax = 11.
        resolmag = 0.2
        resolz = 0.05

    resol = 0.025
    areas = ([0.45, 0.47, 0.23, 0.24, 0.47, 0.47, 0.46, 2.79])

    plt.figure(111, figsize=(21.5, 11.5), dpi=70, facecolor='w', edgecolor='k')
    ss = 0
    for jj in range(nybins):
        for ii in range(nxbins):
            # Reading data from catalogs.
            mo, zb, tb, odds, m814 = U.get_data(cats[ss], (81, 72, 75, 76, 62))
            sf = U.get_data(cats2[ss], 71)
            # mo,zb,tb,sf,odds,m814 = U.get_data(cats[ss],(81,72,75,71,76,62))
            g = U.greater_equal(abs(m814), mmin) * U.less_equal(
                abs(m814), mmax)
            # g* = U.greater_equal(odds,ods)
            g *= U.greater_equal(tb, Tbmin) * U.less_equal(tb, Tbmax)
            g *= U.less_equal(sf, 0.8)
            yy = -0.014 * m814 + 0.38
            g *= U.greater(odds, yy)
            g *= U.less_equal(mo, Mmax + resol) * U.greater(mo, Mmin - resol)
            g *= U.greater(zb, zbmin) * U.less_equal(zb, zbmax)
            mo, zb, tb, odds = U.multicompress(g, (mo, zb, tb, odds))
            print 'dimension', len(mo)
            # Plotting density.
            # cuadrado = plt.axes([.1+(ii*dx),.1+((nybins-jj-1)*dy),dx,dy])
            if ii == nxbins - 1:
                cuadrado = plt.axes([
                    .1 + (ii * dx), .1 + ((nybins - jj - 1) * dy),
                    dx + (dx * 0.2), dy
                ])
            else:
                cuadrado = plt.axes(
                    [.1 + (ii * dx), .1 + ((nybins - jj - 1) * dy), dx, dy])
            matrix, axis2, axis1 = rs.CC_numberdensity_contour_zvolume(
                zb, mo, resolz, resolmag, 1)
            if blue:
                plt.contourf(axis2,
                             axis1,
                             U.log10(matrix / areas[ss]),
                             250,
                             vmin=-11.,
                             vmax=-7.)  # blue galaxies
            if red:
                plt.contourf(axis2,
                             axis1,
                             U.log10(matrix / areas[ss]),
                             250,
                             vmin=-12.,
                             vmax=-7.65)  # red galaxies

            if ii == nxbins - 1:
                aa = plt.colorbar(pad=0., format='%.1f')
                aa.set_label('Log. Density [N/Mpc$^{3}$/deg$^{2}$]', size=18)
            if jj != nybins - 1: plt.setp(cuadrado, xticks=[])
            if ii != 0: plt.setp(cuadrado, yticks=[])
            if jj == nybins - 1:
                plt.xlabel('M$_{B}$', size=27)
                plt.xticks(fontsize=17)
            if ii == 0:
                plt.ylabel('redshift', size=28)
                plt.yticks(fontsize=17)

            # plotting axis manually
            base1 = U.arange(Mmin, Mmax + 1., 1.)
            base2 = U.arange(0, zbmax + (2. * resol), resol)
            dim1 = len(base1)
            dim2 = len(base2)
            for rr in range(dim1):
                plt.plot(base2 * 0. + base1[rr],
                         base2,
                         'k--',
                         linewidth=1.,
                         alpha=0.25)
            for rr in range(dim2):
                plt.plot(base1,
                         base1 * 0. + base2[rr],
                         'k--',
                         linewidth=1.,
                         alpha=0.25)

            # plt.grid()
            plt.ylim(zbmin + 0.0001, zbmax - 0.001)
            plt.xlim(Mmin + 0.0001, Mmax - 0.0001)
            if ss == 7: labelleg = 'Global'
            else: labelleg = 'A%i' % (ss + 2)
            xypos = (Mmax - 1.6, zbmax - 0.18)
            if ss == 7: xypos = (Mmax - 3.5, zbmax - 0.18)
            plt.annotate(labelleg, xy=xypos, fontsize=40, color='black')
            ss += 1

    plt.savefig('completeness.alhambra.png', dpi=200)
Exemple #13
0
import h5py
import tables

idpos = 0
hdf5file = '/Users/albertomolino/Desktop/CLASH/SN_Colfax/colfaxHost.hdf5'
p = h5py.File(hdf5file, mode='r')
pdf = p.get('Likelihood')
z = p.get('redshift')
zz = z[:]
t = p.get('type')
tt = t[:]
deltazz = [zz[1] - zz[0]]
deltatt = [tt[1] - tt[0]]
# deltazz2 = deltazz[0]/2.
# deltatt2 = deltatt[0]/2.
basez = U.arange(zz.min(), zz.max() + deltazz, deltazz)
baset = U.arange(tt.min(), tt.max() + deltatt, deltatt)
matris = pdf[idpos, :, :]
temps = U.sum(pdf[idpos, :, :], axis=0)
reds = U.sum(pdf[idpos, :, :], axis=1)

plt.figure(15, figsize=(12., 9.5), dpi=80, facecolor='w', edgecolor='k')
plt.clf()
plt.ion()
plt.show()
nullfmt = plt.NullFormatter()  # no labels
left, width = 0.1, 0.65
bottom, height = 0.1, 0.65
bottom_h = left_h = left + width + 0.02
rect_scatter = [left, bottom, width, height]
rect_histx = [left, bottom_h, width, 0.2]
Exemple #14
0
def check_variable_candidate(alhambraid):
    """
    It replaces failed magnitudes in the ALHAMBRA catalogues (artificial absorptions,
    non-observed sources assigned as non-detected with upper limits) by m=-99,em=99
    It might decrease the amount of low Odds at bright magnitudes.
----
import alhambra_photools as A
A.replacing_fakeabsorptions(2,1,2)

A.check_sample(image,catalog,posID,posX,posY,posMAG)
A.alhambra_id_finder(ra1,dec1)
    idd = int(id[pos])

A.alhambra_colorstamp_byID(id)

f,p,c,ids = A.alhambra_id_finder(37.4992,1.2482)
        
    """

    field = int(str(alhambraid)[3])
    pointing = int(str(alhambraid)[4])
    ccd = int(str(alhambraid)[5])

    numero = str(alhambraid)[-5:]
    print numero
    for ii in range(2):
        if numero[0] == '0': numero = numero[1:]
    print numero

    root2cats = '/Volumes/amb22/catalogos/reduction_v4f/f0%i/' % (field)
    catalog = root2cats + 'originals/f0%ip0%i_colorproext_%i_ISO.cat' % (
        field, pointing, ccd)

    cols1 = root2cats + 'f0%ip0%i_%i_tot_ISO_eB10.columns' % (field, pointing,
                                                              ccd)
    cols2 = root2cats + 'f0%ip0%i_colorproext_%i_ISO_phz_eB10.columns' % (
        field, pointing, ccd)
    if os.path.exists(cols1): columns = cols1
    else: columns = cols2

    filters = B.get_filter_list(columns)
    print filters

    data = C.loaddata(catalog)  # Loading the whole catalog content.
    head = C.loadheader(catalog)  # Loading the original header.
    m = A.get_magnitudes(catalog, columns)
    # em   = get_errmagnitudes(catalog,columns)

    root2 = '/Volumes/amb22/catalogos/reduction_v4e/'
    fluxc1 = root2 + 'f0%i/f0%ip0%i_colorproext_%i_ISO_phz_eB11.flux_comparison' % (
        field, field, pointing, ccd)
    fluxc2 = root2 + 'f0%i/f0%ip0%i_colorproext_%i_ISO.flux_comparison' % (
        field, field, pointing, ccd)
    if os.path.exists(fluxc1): fluxc = fluxc1
    else: fluxc = fluxc2
    ido, ftt, foo, efoo, zb, tb, mm = P.get_usefulfluxcomparison(
        columns, fluxc)

    pos = A.get_position(ido, int(numero))
    print ido[pos], mm[pos]

    plt.figure(1, figsize=(10, 7), dpi=80, facecolor='w', edgecolor='k')
    plt.clf()
    # P.plot1sedfitting(foo[:,jj],efoo[:,jj],ftt[:,jj],zb[jj],tb[jj],root_bpz_sed+'eB11.list',filters)
    plt.plot(U.arange(20) + 1, foo[0:20, pos], 'k-', alpha=0.4, lw=6)
    plt.plot(U.arange(20) + 1, foo[0:20, pos], 'ko', alpha=0.4, ms=12)
    plt.errorbar(U.arange(20) + 1,
                 foo[0:20, pos], (efoo[0:20, pos] / 1.),
                 fmt="ko",
                 alpha=0.4,
                 ms=10)
    minf = (foo[0:20, pos].min()) * 1.1
    maxf = (foo[0:20, pos].max()) * 1.1
    maxef = (efoo[0:20, pos].max()) * 1.1
    # plt.ylim(minf-maxef,maxf+maxef)
    plt.xlim(0, 21)
    plt.xlabel('Filter', size=25)
    plt.ylabel('Flux', size=25)
    plt.legend(['Magnitude: %.2f' % (m[pos][-1])],
               loc='upper right',
               numpoints=1,
               fontsize=20)
    plt.title(alhambraid, size=25)
    plt.grid()
    plt.show()
    namefig = '/Users/albertomolino/doctorado/photo/variability/analysis/vocheck.ID%s.png' % (
        alhambraid)
    plt.savefig(namefig, dpi=125)

    outcat = '/Users/albertomolino/doctorado/photo/variability/analysis/vocheck.ID%s.cat' % (
        alhambraid)
    A.select_rows_bylist_pro(catalog, ido[pos], outcat)
    print ' '