Exemple #1
0
def appendcatalogs(catalog1, catalog2, catalogOUT):
    """
    The task appends catalogs using only the catalog1's header. Catalog1(withheader)+catalog2(woheader)
    The final (composed) catalog is saved as catalogOUT.
    NEEDLESS TO SAY BOTH CATALOGS HAVE TO HAVE THE SAME FORMAT (ROWS&COLUMNS) !!!
    -----

    """

    print 'Reading file1: ', catalog1
    data1 = C.loaddata(catalog1)  # Loading the whole catalog1 content.
    head1 = C.loadheader(catalog1)  # Loading the original header1.
    print 'Reading file2: ', catalog2
    data2 = C.loaddata(catalog2)  # Loading the whole catalog2 content.
    head2 = C.loadheader(catalog2)  # Loading the original header2.

    outcat = catalogOUT
    print outcat

    try:
        nf1 = N.shape(data1)[0]
        nc1 = N.shape(data1)[1]
    except:
        nf1 = 1
        nc1 = N.shape(data1)[0]

    try:
        nf2 = N.shape(data2)[0]
        nc2 = N.shape(data2)[1]
    except:
        nf2 = 1
        nc2 = N.shape(data2)[0]

    print 'Dimensions catalogue_1: ncols: %i, nraws: %i' % (nf1, nc1)
    print 'Dimensions catalogue_2: ncols: %i, nraws: %i' % (nf2, nc2)

    if nc1 == nc2:
        nf = nf1 + nf2
        nc = nc1
        newdata = N.zeros((nf, nc), float)

        for ii in range(nf1):
            if nf1 < 2: newdata[ii, :] = data1[:]
            else: newdata[ii, :] = data1[ii, :]

        for ii in range(nf2):
            if nf2 < 2: newdata[ii + nf1, :] = data2[:]
            else: newdata[ii + nf1, :] = data2[ii, :]

        C.savedata(newdata, outcat, dir="",
                   header=head1)  # Saving and creating the new catalog.

    else:
        print 'Different number of rows between catalogs. Impossible to append catalogs !!'
Exemple #2
0
def minimum_photouncert(catalog, columns):
    """

    """
    data = C.loaddata(catalog)  # Loading the whole catalog content.
    head = C.loadheader(catalog)  # Loading the original header.
    mm = A.get_magnitudes(catalog, columns)
    em = A.get_errmagnitudes(catalog, columns)
    nl = len(mm[:,
                0])  # nl is the number of detections inside every single band.
    nf = len(mm[0, :])  # nf is the number of bands inside the catalog.
    errmag = N.zeros((nl, nf),
                     float)  # Where the new photo errors will be saved.

    for jj in range(nf):
        for ii in range(nl):
            if em[ii, jj] < 0.01: errmag[ii, jj] = 0.03
            elif em[ii, jj] > 1.0: errmag[ii, jj] = 1.0
            else: errmag[ii, jj] = em[ii, jj]

    # New values of mags error overwrites now the original data.
    vars, evars, posref, zpe, zpo = A.get_usefulcolumns(columns)
    data[:, evars] = errmag[:, N.arange(nf)]
    finalcatalog = catalog[:-3] + 'ecor.cat'
    C.savedata(data, finalcatalog, dir="",
               header=head)  # Saving & creating a new catalog.
Exemple #3
0
def replace_photo_uncert(catalog, columns):
    """

    """
    data = C.loaddata(catalog)  # Loading the whole catalog content.
    head = C.loadheader(catalog)  # Loading the original header.
    mm = A.get_magnitudes(catalog, columns)
    em = A.get_errmagnitudes(catalog, columns)
    filters = B.get_filter_list(columns)

    nl = len(mm[:,
                0])  # nl is the number of detections inside every single band.
    nf = len(mm[0, :])  # nf is the number of bands inside the catalog.
    errmag = U.zeros((nl, nf),
                     float)  # Where the new photo errors will be saved.

    for jj in range(nf):
        maglim = B.get_limitingmagnitude(mm[:, jj], em[:, jj], 1., 0.25)
        print 'Limiting Magnitude for filter %s: %.3f' % (filters[jj], maglim)
        for ii in range(nl):
            if mm[ii, jj] == -99.:
                errmag[ii, jj] = 0.00
            elif mm[ii, jj] == 99.:
                errmag[ii, jj] = maglim
            else:
                errmag[ii, jj] = em[ii, jj]

    # New values of mags error overwrites now the original data.
    vars, evars, posref, zpe, zpo = get_usefulcolumns(columns)
    data[:, evars] = errmag[:, U.arange(nf)]
    finalcatalog = catalog[:-3] + 'upp.cat'
    C.savedata(data, finalcatalog, dir="",
               header=head)  # Saving & creating a new catalog.
def replace_kerttu_errmags(catalog, columns, finalcatalog):
    """

import alhambra_kerttu_fixerrmags as AFM
catalog = '/Users/albertomolino/doctorado/articulos/ALHAMBRA/kerttu/test_photoz/kerttu.cat'
columns = '/Users/albertomolino/doctorado/articulos/ALHAMBRA/kerttu/test_photoz/kerttu.columns'
finalcatalog = '/Users/albertomolino/doctorado/articulos/ALHAMBRA/kerttu/test_photoz/kerttu3.cat'
AFM.replace_kerttu_errmag(catalog,columns,finalcatalog)
------

    """

    data = C.loaddata(catalog)  # Loading the whole catalog content.
    head = C.loadheader(catalog)  # Loading the original header.
    mm = A.get_magnitudes(catalog, columns)
    em = A.get_errmagnitudes(catalog, columns)
    filters = B.get_filter_list(columns)

    nl = len(mm[:,
                0])  # nl is the number of detections inside every single band.
    nf = len(mm[0, :])  # nf is the number of bands inside the catalog.
    errmag = U.zeros((nl, nf),
                     float)  # Where the new photo errors will be saved.

    for jj in range(nf):
        for ii in range(nl):
            if mm[ii, jj] == -99.: errmag[ii, jj] = 0.00
            else: errmag[ii, jj] = em[ii, jj]

    # New values of mags error overwrites now the original data.
    vars, evars, posref, zpe, zpo = A.get_usefulcolumns(columns)
    data[:, evars] = errmag[:, U.arange(nf)]
    C.savedata(data, finalcatalog, dir="",
               header=head)  # Saving & creating a new catalog.
Exemple #5
0
def compress_bpz_catalogues(catalogue, sample, outname):
    """
    It selects a subsample of sources from
    an input catalogue.

    :param catalogue:
    :return:
    """
    head1 = C.loadheader(catalogue)
    data1 = C.loaddata(catalogue)
    C.savedata(data1[sample, :], outname, dir="", header=head1)
Exemple #6
0
def replacing_nans_catalogs(catalog, newname):
    """

    vars = []
    evars = []
    data = C.loaddata(catalog)
    mags = data[:,vars]
    emags = data[:,evars]

    """

    vars = N.array([
        15, 18, 21, 24, 27, 30, 33, 36, 39, 42, 45, 48, 51, 54, 57, 60, 63, 66,
        69, 72, 75, 78, 81, 84, 87, 90, 93, 96, 99, 102, 105, 108, 111
    ])
    evars = vars[:] + 1
    s2n_vars = vars[:] + 2

    data = C.loaddata(catalog)  # Loading the whole catalog content.
    head = C.loadheader(catalog)  # Loading the original header.
    mm = data[:, vars]
    em = data[:, evars]
    s2n = data[:, s2n_vars]

    nl = len(mm[:,
                0])  # nl is the number of detections inside every single band.
    nf = len(mm[0, :])  # nf is the number of bands inside the catalog.
    newmag = U.zeros((nl, nf),
                     float)  # Where the new photo errors will be saved.
    errmag = U.zeros((nl, nf),
                     float)  # Where the new photo errors will be saved.
    new_s2n = U.zeros((nl, nf), float)

    for jj in range(len(vars)):
        for ii in range(nl):
            if abs(mm[ii, jj]) > 60.:
                newmag[ii, jj] = -99.0
                errmag[ii, jj] = 0.00
                new_s2n[ii, jj] = -1
            elif s2n[ii, jj] < 0.00001:
                new_s2n[ii, jj] = 0.
            else:
                newmag[ii, jj] = mm[ii, jj]
                errmag[ii, jj] = em[ii, jj]
                new_s2n[ii, jj] = s2n[ii, jj]

    # New values of mags error overwrites now the original data.
    data[:, vars] = newmag[:, U.arange(nf)]
    data[:, evars] = errmag[:, U.arange(nf)]
    data[:, s2n_vars] = new_s2n[:, U.arange(nf)]
    C.savedata(data, newname, dir="",
               header=head)  # Saving & creating a new catalog.
def run():
    cs = U.get_str(lista_fluxcomp, 0)
    nc = len(cs)
    cat = U.get_str(lista_catalogs, 0)
    ncat = len(cat)
    print 'Number of catalogues to convert: ', nc
    cols = U.get_str(lista_columns, 0)
    ncols = len(cols)
    print 'nc,ncat,ncols: ', nc, ncat, ncols

    for ii in range(nc):
        head = coeio.loadheader(cs[ii])
        nh = len(head)
        print 'Number of variables: ', nh
        # pausa = raw_input('paused')
        nf = nh - 5
        body = coeio.loaddata(cs[ii])
        ng = len(body)
        # pausa = raw_input('paused')
        field = ((cat[ii].split('/')[-1]).split('.')[1])[2]
        pointing = ((cat[ii].split('/')[-1]).split('.')[1])[5]
        ccd = ((cat[ii].split('/')[-1]).split('.')[1])[8]
        print 'Reading Field: %s, Pointing: %s, CCD: %s' % (field, pointing,
                                                            ccd)

        # pausa = raw_input('paused')
        filename = final_root + cat[ii].split('/')[-1][:-3] + 'flux_comparison'
        print 'filename', filename
        filename2 = final_root + cat[ii].split('/')[-1][:-3] + 'columns'
        A.copyfile(cols[ii], filename2)
        # pausa = raw_input('paused')
        outfile = open(filename, 'w')
        for ii in range(len(head)):
            outfile.write('%s \n' % (head[ii]))

        for ss in range(ng):
            ids = body[ss, 0]
            # print 'IDs',ids
            ids814 = convert814ids(ids, int(field), int(pointing), int(ccd))
            for jj in range(nh):
                if jj == 0: outfile.write('%s  ' % (ids814))
                if jj == 1: outfile.write('%.2f  ' % (body[ss, 1]))
                if jj == 2: outfile.write('%.2f  ' % (body[ss, 2]))
                if jj == 3: outfile.write('%.2f  ' % (body[ss, 3]))
                if jj == 4: outfile.write('%.2f  ' % (body[ss, 4]))
                if jj > 4:
                    # print body[ss,jj]
                    # pausa = raw_input('paused')
                    outfile.write('%s  ' % (body[ss, jj]))
            outfile.write('\n')
        outfile.close()
Exemple #8
0
def runZPcal_catalogue(reference, frame, final):
    """
    ----
filter_ref_cat,alig_frame_cat,alig_cal_frame_cat
    """
    plots = 1
    data2 = C.loaddata(frame)  # Loading the whole catalog2 content.
    head2 = C.loadheader(frame)  # Loading the original header2.
    pos_mags = 12  # ([12,20,21,22])

    mag_r = U.get_data(reference, 12)
    mag_f = U.get_data(frame, 12)
    # good_sample = U.greater_equal(mag_r,16.) * U.less_equal(mag_r,21.5)
    good_sample = U.greater_equal(mag_r, 16.) * U.less_equal(mag_r, 19.)
    mag_r2, mag_f2 = U.multicompress(good_sample, (mag_r, mag_f))
    offset = U.mean_robust(mag_f2 - mag_r2)

    if plots:
        plt.figure(11, figsize=(12, 9), dpi=80, facecolor='w', edgecolor='k')
        plt.clf()
        plt.plot(mag_r, (mag_f - mag_r - offset), 'ko', ms=10, alpha=0.1)
        plt.xlim(16, 25)
        plt.ylim(-5, 5.)
        plt.xlabel('AB', size=25)
        plt.ylabel('Mf-Mr', size=25)
        plt.xticks(fontsize=25)
        plt.yticks(fontsize=25)
        plt.legend(['Offset: %.4f' % (offset)], loc='upper right', numpoints=1)
        plt.title(A.getfilename(frame), size=15)
        plt.grid()
        figurename = final[:-3] + 'png'
        print 'figurename: ', figurename
        plt.savefig(figurename, dpi=100)
        plt.close()

    # Here it saves the offset in an ASCII file
    fileout = open(final[:-3] + 'txt', 'w')
    linea = '%s %.5f \n' % (final, offset)
    fileout.write(linea)
    fileout.close()

    # The offset is only applied to m!=99. magnitudes.
    new_mags = U.where(abs(mag_f) < 99, mag_f - offset, mag_f)
    data2[:, pos_mags] = new_mags
    C.savedata(data2, final, dir="", header=head2)
    print ' '
def flagging_dobledetections_mergecolumns(catalog):
    """
    This serves to append an extra column (each to both inputted catalogs)
    indicating either a detection was repeated and with the lowest S/N
    of the two.
    Sources flagged as 1 are those detections to be excluded when combining
    both catalogs into a single one.
--------
import alhambra_overlap as alhov
cat2 = '/Volumes/amb22/catalogos/reduction_v4e/f02/f02p01_colorproext_2_ISO.cat'
alhov.flagging_dobledetections_mergecolumns(cat2)    
    
    """
    
    data = coeio.loaddata(catalog)      # Loading the whole catalog content.
    head = coeio.loadheader(catalog)    # Loading the original header.
    nc = len(data.T)      # Number columns
    dim = len(data[:,0])  # Number elements
    print 'nc,dim',nc,dim
    
    var1 = head[-3].split()[-1]
    var2 = head[-2].split()[-1]
    if var1 == var2:
       print 'Duplicated columns. Merging information...'
       uno = data[:,72]
       dos = data[:,73]
       tres = uno+dos
       newdata = U.zeros((dim,nc-1),float)
       for ii in range(nc-1):
           for jj in range(dim):
               if ii == nc-1:
                  print 'pepe'
                  newdata[jj,ii] = tres[jj]  
               else:
                  newdata[jj,ii] = data[jj,ii]

       head2 = head[:-1]
       head2[-1]='#'
       outcat = catalog[:-4]+'.mergedcolumns.cat'
       coeio.savedata(newdata,outcat, dir="",header=head2)     # Saving and creating the new catalog.
                
       # Renaming files
       ap.renamefile(catalog,catalog+'.oldold.cat')
       if not os.path.exists(catalog): ap.renamefile(outcat,catalog)
def remove_detections_bysegmmaps(field, pointing, ccd):
    """
    It uses the segmentation-maps to remove fake detections
    when masking out saturated stars.
----
import alhambra_fakedets as AF
AF.remove_detections_bysegmmaps(2,1,1)

    """
    root = '/Volumes/amb22/catalogos/reduction_v4f/f0%i/' % (field)
    root2images = '/Volumes/amb22/imagenes/f0%i/' % (field)
    catalog = root + 'f0%ip0%i_colorproext_%i_ISO.irmsF814W.free.cat' % (
        field, pointing, ccd)
    ids, x, y, area = U.get_data(catalog, (0, 3, 4, 5))
    dim = len(ids)
    valor = U.zeros(dim)
    ima1 = root2images + 'f0%ip0%i_F814W_%i.swp.seg.fits' % (field, pointing,
                                                             ccd)
    ima2 = root2images + 'f0%ip0%i_F814W_%i.swp.segnomask.fits' % (
        field, pointing, ccd)
    segm1 = pyfits.open(ima1)[0].data
    segm2 = pyfits.open(ima2)[0].data
    for ii in range(dim):
        xo = x[ii]
        yo = y[ii]
        dimx = U.shape(datos[yo - size:yo + size, xo - size:xo + size])[1]
        dimy = U.shape(datos[yo - size:yo + size, xo - size:xo + size])[0]
        perc[ii] = (datos[yo - size:yo + size, xo - size:xo + size].sum() /
                    (dimx * dimy * 1.))

    # Defining the sample to be keep.
    good = U.greater(valor, 0)
    idr = U.compress(good, ids)
    dim2 = len(idr)
    print 'Dimensions: Original: %i, Final: %i, Excluded: %i detections. ' % (
        dim, dim2, dim - dim2)
    finalcat = root + 'f0%ip0%i_colorproext_%i_ISO.irmsF814W.free.cat' % (
        field, pointing, ccd)
    data1 = coeio.loaddata(catalog)  # Loading the whole catalog content.
    head = coeio.loadheader(catalog)
    data2 = data1[good, :]
    coeio.savedata(data2, finalcat, dir="",
                   header=head)  # Saving & creating a new catalog.
def remove_fakeabsorptions_F814W(field, pointing, ccd):
    """
    Using the rmsweight images, it gets rid of
    detections with imrs_F814W < 0.5.
    -------------------------------------------
import alhambra_fakedets as AF
AF.remove_fakeabsorptions_F814W(2,1,1)

    """
    root = '/Volumes/amb22/catalogos/reduction_v4f/f0%i/' % (field)
    catalog = root + 'f0%ip0%i_colorproext_%i_ISO.cat' % (field, pointing, ccd)
    ids, x, y, area = U.get_data(catalog, (0, 3, 4, 5))
    dim = len(ids)
    perc = U.zeros(dim)
    # Opening F814W Weight image
    ima = alh.alhambra_invrmsimagelist(field, pointing, ccd)[-1]
    datos = pyfits.open(ima)[0].data
    for ii in range(dim):
        if area[ii] > 1:
            size = int(round(U.sqrt(area[ii]) / 2.))
            xo = x[ii]
            yo = y[ii]
            dimx = U.shape(datos[yo - size:yo + size, xo - size:xo + size])[1]
            dimy = U.shape(datos[yo - size:yo + size, xo - size:xo + size])[0]
            perc[ii] = (datos[yo - size:yo + size, xo - size:xo + size].sum() /
                        (dimx * dimy * 1.))

    # Defining the sample to be keep.
    good = U.greater(perc, 0.5)
    idr = U.compress(good, ids)
    dim2 = len(idr)
    print 'Dimensions: Original: %i, Final: %i, Excluded: %i detections. ' % (
        dim, dim2, dim - dim2)
    finalcat = root + 'f0%ip0%i_colorproext_%i_ISO.irmsF814W.free.cat' % (
        field, pointing, ccd)
    data1 = coeio.loaddata(catalog)  # Loading the whole catalog content.
    head = coeio.loadheader(catalog)
    data2 = data1[good, :]
    coeio.savedata(data2, finalcat, dir="",
                   header=head)  # Saving & creating a new catalog.
Exemple #12
0
def get_alhambra_GOLD(field,pointing,ccd):
    """

import alhambragold as alhgold
alhgold.get_alhambra_GOLD(2,1,1)

    
    """

    root_catalogs = '/Volumes/amb22/catalogos/reduction_v4f/f0%i/'%(field)
    root_gold = '/Volumes/amb22/catalogos/reduction_v4f/GOLD/'
    catalog = root_catalogs+'alhambra.F0%iP0%iC0%i.ColorProBPZ.cat' %(field,pointing,ccd)
    if os.path.exists(catalog):
       data1 = coeio.loaddata(catalog)      # Loading the whole catalog content.
       head1 = coeio.loadheader(catalog)    # Loading the original header.
       nc1 = len(data1.T)
       dim1 = len(data1[:,0])
       nh = len(head1)
       
       # Final catalog. 
       catout = root_gold+'alhambra.gold.F0%iP0%iC0%i.ColorProBPZ.cat' %(field,pointing,ccd)
       outfile = open(catout,'w')
       
       # Reducing the length of the catalogs according to input ids
       ids = U.get_str(catalog,0)
       mo  = U.get_data(catalog,65)
       cond1 = U.less(mo,23.000)
       
       data2 = data1[cond1,:]
       nraws = U.shape(data2)[0]
       ncols = U.shape(data2)[1]

       # Setting the IDs to its final values (including F814W+field+pointing+ccd)
       finalids = alh.getalhambrafinalids(field,pointing,ccd,'ISO')
       finalids2 = U.compress(cond1,finalids)
       
       # Restoring header...
       for ii in range(nh):
           outfile.write('%s \n'%(head1[ii]))
           
       formato = '%s  %i  %i  %i  %.4f  %.4f  %.3f  %.3f  %i  %.2f  %.2f  %.4f  %.3f  %.3f  %.1f  %.2f  %.3f  %.2f  %i  '
       formato += '%.3f  %.3f  %.3f  %.3f  %.3f  %.3f  %.3f  %.3f  %.3f  %.3f  %.3f  %.3f  %.3f  %.3f  %.3f  %.3f  '
       formato += '%.3f  %.3f  %.3f  %.3f  %.3f  %.3f  %.3f  %.3f  %.3f  %.3f  %.3f  %.3f  %.3f  %.3f  %.3f  %.3f  '
       formato += '%.3f  %.3f  %.3f  %.3f  %.3f  %.3f  %.3f  %.3f  %.3f  %.3f  %.3f  %.3f  %.3f  %.3f  %.3f  %.3f  '
       formato += '%.3f  %.3f  %.3f  '
       formato += '%i  %i  %.3f  %i  %.2f  %i  '
       formato += '%.3f  %.3f  %.3f  %.3f  %.3f  %.3f  %.3f  '
       formato += '%.3f  %.3f  %.3f  %.3f  '
       formato += '%.3f  %.3f  %.3f  %.3f  %.3f  %.3f  %.3f  %.3f  %.3f  %.3f  '  
       formato += '%.3f  %.3f  %.3f  %.3f  %.3f  %.3f  %.3f  %.3f  %.3f  %.3f  '  
       formato += '%.3f  %.3f  %.3f  %.3f  %i  %i  '                              
       form = formato.split()
       
       # Here it defines the format to be used.    
       for jj in range(nraws):
           for ss in range(ncols):
               goodform = ''
               goodform = form[ss]+'  '
               if ss == 0:
                  outfile.write(goodform%(int(finalids2[jj]))) 
               else:
                  outfile.write(goodform%(data2[jj,ss]))
           outfile.write(' \n')    
           
       outfile.close()
Exemple #13
0
import coeio as C
import useful as U

root_to_cat = '/Users/albertomolino/doctorado/articulos/SPLUS/'
root_to_cat += 'calibration/sample4simulations/data/20180329/NGSL_convolved/'
#catalog = root_to_cat + 'SPLUS_SDSS_filters_4Laura.list_sample_stars_'
#catalog += 'SDSSDR10_aa_chi2_red_min.eq.0.03_20102017.cat'
catalog = root_to_cat + 'sample_NGSL_convolved_29032018.dat'
new_catalog = catalog[:-3]+'extended.cat'

# display information?
verbose = 0

# Reading the catalogue.
datos = C.loaddata(catalog)
head = C.loadheader(catalog)

# Reading catalog dimensions.
n_stars_ori = N.shape(datos)[0]
n_cols = N.shape(datos)[1]
n_filters = 12 #20  #####################################

# Magnitude range we want to expand.
m_min = 12  ####################################
m_max = 21  ####################################
delta_m = 0.01  ####################################
base_m = N.arange(m_min,m_max+delta_m,delta_m)
n_m_ele = len(base_m)

# Number of stars per magnitude-bin.
n_stars_final = 200 ####################################
Exemple #14
0
def check_variable_candidate(alhambraid):
    """
    It replaces failed magnitudes in the ALHAMBRA catalogues (artificial absorptions,
    non-observed sources assigned as non-detected with upper limits) by m=-99,em=99
    It might decrease the amount of low Odds at bright magnitudes.
----
import alhambra_photools as A
A.replacing_fakeabsorptions(2,1,2)

A.check_sample(image,catalog,posID,posX,posY,posMAG)
A.alhambra_id_finder(ra1,dec1)
    idd = int(id[pos])

A.alhambra_colorstamp_byID(id)

f,p,c,ids = A.alhambra_id_finder(37.4992,1.2482)
        
    """

    field = int(str(alhambraid)[3])
    pointing = int(str(alhambraid)[4])
    ccd = int(str(alhambraid)[5])

    numero = str(alhambraid)[-5:]
    print numero
    for ii in range(2):
        if numero[0] == '0': numero = numero[1:]
    print numero

    root2cats = '/Volumes/amb22/catalogos/reduction_v4f/f0%i/' % (field)
    catalog = root2cats + 'originals/f0%ip0%i_colorproext_%i_ISO.cat' % (
        field, pointing, ccd)

    cols1 = root2cats + 'f0%ip0%i_%i_tot_ISO_eB10.columns' % (field, pointing,
                                                              ccd)
    cols2 = root2cats + 'f0%ip0%i_colorproext_%i_ISO_phz_eB10.columns' % (
        field, pointing, ccd)
    if os.path.exists(cols1): columns = cols1
    else: columns = cols2

    filters = B.get_filter_list(columns)
    print filters

    data = C.loaddata(catalog)  # Loading the whole catalog content.
    head = C.loadheader(catalog)  # Loading the original header.
    m = A.get_magnitudes(catalog, columns)
    # em   = get_errmagnitudes(catalog,columns)

    root2 = '/Volumes/amb22/catalogos/reduction_v4e/'
    fluxc1 = root2 + 'f0%i/f0%ip0%i_colorproext_%i_ISO_phz_eB11.flux_comparison' % (
        field, field, pointing, ccd)
    fluxc2 = root2 + 'f0%i/f0%ip0%i_colorproext_%i_ISO.flux_comparison' % (
        field, field, pointing, ccd)
    if os.path.exists(fluxc1): fluxc = fluxc1
    else: fluxc = fluxc2
    ido, ftt, foo, efoo, zb, tb, mm = P.get_usefulfluxcomparison(
        columns, fluxc)

    pos = A.get_position(ido, int(numero))
    print ido[pos], mm[pos]

    plt.figure(1, figsize=(10, 7), dpi=80, facecolor='w', edgecolor='k')
    plt.clf()
    # P.plot1sedfitting(foo[:,jj],efoo[:,jj],ftt[:,jj],zb[jj],tb[jj],root_bpz_sed+'eB11.list',filters)
    plt.plot(U.arange(20) + 1, foo[0:20, pos], 'k-', alpha=0.4, lw=6)
    plt.plot(U.arange(20) + 1, foo[0:20, pos], 'ko', alpha=0.4, ms=12)
    plt.errorbar(U.arange(20) + 1,
                 foo[0:20, pos], (efoo[0:20, pos] / 1.),
                 fmt="ko",
                 alpha=0.4,
                 ms=10)
    minf = (foo[0:20, pos].min()) * 1.1
    maxf = (foo[0:20, pos].max()) * 1.1
    maxef = (efoo[0:20, pos].max()) * 1.1
    # plt.ylim(minf-maxef,maxf+maxef)
    plt.xlim(0, 21)
    plt.xlabel('Filter', size=25)
    plt.ylabel('Flux', size=25)
    plt.legend(['Magnitude: %.2f' % (m[pos][-1])],
               loc='upper right',
               numpoints=1,
               fontsize=20)
    plt.title(alhambraid, size=25)
    plt.grid()
    plt.show()
    namefig = '/Users/albertomolino/doctorado/photo/variability/analysis/vocheck.ID%s.png' % (
        alhambraid)
    plt.savefig(namefig, dpi=125)

    outcat = '/Users/albertomolino/doctorado/photo/variability/analysis/vocheck.ID%s.cat' % (
        alhambraid)
    A.select_rows_bylist_pro(catalog, ido[pos], outcat)
    print ' '
Exemple #15
0
mastercat = root2cat + 'S82_master_gals.cat'
"""
# This loop was needed for the original version
# where the FIELDS were strings rather than integers.
tiles_name = U.get_str(mastercat,0)
nt = len(tiles_name)
tiles = N.zeros(nt)
for ss in range(nt):
    tiles[ss] = int(tiles_name[ss].split('-')[-1])
"""
tiles = U.get_data(mastercat, 0)  # use get_str with original cat!
single_tiles = N.unique(tiles).astype(int)
n_single_tiles = len(single_tiles)

# Reading entire catalogue.
header_mastercat = C.loadheader(mastercat)
data_mastercat = C.loaddata(mastercat)

for ii in range(n_single_tiles):
    tile_cat = root2cat + 'tile%i_S82.cat' % (single_tiles[ii])
    good = N.equal(tiles, single_tiles[ii])
    n_gals = len(data_mastercat[good, 1])
    if not os.path.exists(tile_cat):
        """
       fwhm = data_mastercat[good,9]
       magr = data_mastercat[good,82] # Petro
       seeing,stars = sct.get_seeing_from_data_pro(fwhm,magr)
       fwhm_norm = fwhm /(1.*seeing)
       data_mastercat[good,9] = fwhm_norm
       data_mastercat[good,0] = N.ones(n_gals) * int(single_tiles[ii])
       """
def replace_fakeabsorptions_pro(field, pointing, ccd):
    """
    Updated version to get rid of detections with imrs_* < 0.5
    setting their magnitudes to m=-99,em=0.0.
    Additionally,it removes detections with imrs_F814W == 0.0
    ----------------------------------------------------------------------------
    It replaces failed magnitudes in the ALHAMBRA catalogues (artificial absorptions,
    non-observed sources assigned as non-detected with upper limits) by m=-99,em=99
    It might decrease the amount of low Odds at bright magnitudes.
----
import alhambra_photools as A
A.replace_fakeabsorptions_pro(2,1,2)
    
    """
    plots = 1

    root = '/Volumes/amb22/catalogos/reduction_v4f/f0%i/' % (field)
    catalog = root + 'f0%ip0%i_colorproext_%i_ISO.irmsF814W.free.cat' % (
        field, pointing, ccd)
    catweight = root + 'f0%ip0%i_ColorProBPZ_%i_ISO.rmsweights.dat' % (
        field, pointing, ccd)
    dataweight = coeio.loaddata(catweight)
    # print U.shape(dataweight)
    # ids,x,y,area = U.get_data(catalog,(0,3,4,5))
    cols1 = root + 'f0%ip0%i_%i_tot_ISO_eB10.columns' % (field, pointing, ccd)
    cols2 = root + 'f0%ip0%i_colorproext_%i_ISO_phz_eB10.columns' % (
        field, pointing, ccd)
    if os.path.exists(cols1): columns = cols1
    else: columns = cols2

    data = coeio.loaddata(catalog)  # Loading the whole catalog content.
    head = coeio.loadheader(catalog)  # Loading the original header.
    vars, evars, posref, zpe, zpo = alh.get_usefulcolumns(columns)
    # print 'dim vars', len(vars)
    mags = data[:, vars]

    nl = U.shape(data)[
        0]  # nl is the number of detections inside every single band.
    nf = len(vars)  # nf is the number of bands inside the catalog.
    # print 'nl,nf: ',nl,nf

    kk = 0
    for jj in range(nl):
        filtoff = 0
        for ii in range(nf):
            pos_mag = vars[ii]
            pos_emag = evars[ii]
            if dataweight[jj, ii] < 0.5:
                # print data[jj,pos_mag],pos_mag,pos_emag
                data[jj, pos_mag] = -99.0000
                data[jj, pos_emag] = 0.0000
                data[jj, 67] -= 1
                kk += 1
                filtoff += 1
                # print data[jj,0]
                # print data[jj,pos_mag]

        # if filtoff > 0:
        #    print '%i excluded for detection %i: '%(filtoff,data[jj,0])
        #    # pausa = raw_input('paused')

    print 'Replaced %i magnitudes. ' % (kk)
    # New values of mags error overwrites now the original data.
    finalcatalog = root + 'f0%ip0%i_colorproext_%i_ISO.test.cat' % (
        field, pointing, ccd)
    coeio.savedata(data, finalcatalog, dir="",
                   header=head)  # Saving & creating a new catalog.
Exemple #17
0
import coeio as C
import useful as U

root_to_cat = '/Users/albertomolino/doctorado/articulos/SPLUS/'
root_to_cat += 'calibration/sample4simulations/data/20180329/NGSL_convolved/'
catalog = root_to_cat + 'sample_final_models_convolved_04042018.cat'
catalog_sdss_colors = root_to_cat + 'IveReal_GR_normed.cat'
models = U.get_str(root_to_cat + 'ngsl_pick_wd.list', 0)
new_catalog = catalog[:-3] + 'extended_gr.cat'

# display information?
verbose = 0

# Reading the general catalogue.
datos_main = C.loaddata(catalog)
head_main = C.loadheader(catalog)

# gr synthetic models
#gr_models = datos_main[:,6]-datos_main[:,8] #SPLUS
gr_models = datos_main[:, 16] - datos_main[:, 17]  #SDSS

# Reading the SDSS_color catalog.
sdss_gr, density_gr = U.get_data(catalog_sdss_colors, (0, 1))
n_color_bins = len(sdss_gr) - 1

# Reading catalog dimensions.
n_stars_ori = N.shape(datos_main)[0]
n_cols = N.shape(datos_main)[1]
n_filters = n_cols - 1  #12 #20  #####################################

# Magnitude range we want to expand.
Exemple #18
0
root2bpz = '/Users/albertomolino/codigos/bpz-1.99.2/'
#spectra = 'COSMOSeB11new_recal'
spectra = 'eB11'

# J-PLUS related data
mainroot = '/Users/albertomolino/Postdoc/T80S_Pipeline/Commisioning/S82/Dec2017/'
root2cats = mainroot + 'splus_cats_NGSL/JPLUS_specz_sample_March2018/'

jplus_spz_cat = root2cats + 'jplus2sdssdr12_z05.cat'
jplus_spz_columns = root2cats + 'jplus2sdssdr12_z05.columns'
tiles = U.get_data(jplus_spz_cat, 1)
single_tiles = N.unique(tiles).astype(int)
n_single_tiles = len(single_tiles)

# Reading entire catalogue.
header_jplus = C.loadheader(jplus_spz_cat)
data_jplus = C.loaddata(jplus_spz_cat)
#C.savedata(data1[sample,:],outname,dir="",header=head1)

#n_single_tiles = 1
for ii in range(n_single_tiles):
    tile_jplus_spz_upl_cat = root2cats + 'tile_%i_jplus2sdssdr12_z05.ecor.upp.cat' % (
        single_tiles[ii])
    good = N.equal(tiles, single_tiles[ii])
    n_gals = len(data_jplus[good, 0])
    if n_gals > 50:

        if not os.path.exists(tile_jplus_spz_upl_cat):
            tile_jplus_spz_cat = root2cats + 'tile_%i_jplus2sdssdr12_z05.cat' % (
                single_tiles[ii])
            # Individual (tile) catalogue.