Exemplo n.º 1
0
def remove_detections_bysegmmaps(field, pointing, ccd):
    """
    It uses the segmentation-maps to remove fake detections
    when masking out saturated stars.
----
import alhambra_fakedets as AF
AF.remove_detections_bysegmmaps(2,1,1)

    """
    root = '/Volumes/amb22/catalogos/reduction_v4f/f0%i/' % (field)
    root2images = '/Volumes/amb22/imagenes/f0%i/' % (field)
    catalog = root + 'f0%ip0%i_colorproext_%i_ISO.irmsF814W.free.cat' % (
        field, pointing, ccd)
    ids, x, y, area = U.get_data(catalog, (0, 3, 4, 5))
    dim = len(ids)
    valor = U.zeros(dim)
    ima1 = root2images + 'f0%ip0%i_F814W_%i.swp.seg.fits' % (field, pointing,
                                                             ccd)
    ima2 = root2images + 'f0%ip0%i_F814W_%i.swp.segnomask.fits' % (
        field, pointing, ccd)
    segm1 = pyfits.open(ima1)[0].data
    segm2 = pyfits.open(ima2)[0].data
    for ii in range(dim):
        xo = x[ii]
        yo = y[ii]
        dimx = U.shape(datos[yo - size:yo + size, xo - size:xo + size])[1]
        dimy = U.shape(datos[yo - size:yo + size, xo - size:xo + size])[0]
        perc[ii] = (datos[yo - size:yo + size, xo - size:xo + size].sum() /
                    (dimx * dimy * 1.))

    # Defining the sample to be keep.
    good = U.greater(valor, 0)
    idr = U.compress(good, ids)
    dim2 = len(idr)
    print 'Dimensions: Original: %i, Final: %i, Excluded: %i detections. ' % (
        dim, dim2, dim - dim2)
    finalcat = root + 'f0%ip0%i_colorproext_%i_ISO.irmsF814W.free.cat' % (
        field, pointing, ccd)
    data1 = coeio.loaddata(catalog)  # Loading the whole catalog content.
    head = coeio.loadheader(catalog)
    data2 = data1[good, :]
    coeio.savedata(data2, finalcat, dir="",
                   header=head)  # Saving & creating a new catalog.
Exemplo n.º 2
0
def remove_fakeabsorptions_F814W(field, pointing, ccd):
    """
    Using the rmsweight images, it gets rid of
    detections with imrs_F814W < 0.5.
    -------------------------------------------
import alhambra_fakedets as AF
AF.remove_fakeabsorptions_F814W(2,1,1)

    """
    root = '/Volumes/amb22/catalogos/reduction_v4f/f0%i/' % (field)
    catalog = root + 'f0%ip0%i_colorproext_%i_ISO.cat' % (field, pointing, ccd)
    ids, x, y, area = U.get_data(catalog, (0, 3, 4, 5))
    dim = len(ids)
    perc = U.zeros(dim)
    # Opening F814W Weight image
    ima = alh.alhambra_invrmsimagelist(field, pointing, ccd)[-1]
    datos = pyfits.open(ima)[0].data
    for ii in range(dim):
        if area[ii] > 1:
            size = int(round(U.sqrt(area[ii]) / 2.))
            xo = x[ii]
            yo = y[ii]
            dimx = U.shape(datos[yo - size:yo + size, xo - size:xo + size])[1]
            dimy = U.shape(datos[yo - size:yo + size, xo - size:xo + size])[0]
            perc[ii] = (datos[yo - size:yo + size, xo - size:xo + size].sum() /
                        (dimx * dimy * 1.))

    # Defining the sample to be keep.
    good = U.greater(perc, 0.5)
    idr = U.compress(good, ids)
    dim2 = len(idr)
    print 'Dimensions: Original: %i, Final: %i, Excluded: %i detections. ' % (
        dim, dim2, dim - dim2)
    finalcat = root + 'f0%ip0%i_colorproext_%i_ISO.irmsF814W.free.cat' % (
        field, pointing, ccd)
    data1 = coeio.loaddata(catalog)  # Loading the whole catalog content.
    head = coeio.loadheader(catalog)
    data2 = data1[good, :]
    coeio.savedata(data2, finalcat, dir="",
                   header=head)  # Saving & creating a new catalog.
Exemplo n.º 3
0
def match_spz_sample(cluster): # TO CHECK
       
    finalcat1 = catalog2[:-3]+'CLASH.redu.cat'
    finalcat2 = catalog2[:-3]+'nada.cat'
    # if not os.path.exists(finalcat1):
    if not os.path.exists(finalcat2):
        # print 'Final catalog does not exist yet.'                           
        if os.path.exists(catalog1) and os.path.exists(catalog2):
            # It matches up detections to its Spectroscopic Sample.
            # Reading specz catalog
            print 'Reading info1 before matching...'
            speczsample = catalog1
            idsp,xsp,ysp = U.get_data(speczsample,(0,3,4))
            goodsp = U.greater_equal(xsp,1500) * U.less_equal(xsp,3500)
            goodsp *= U.greater_equal(ysp,1500) * U.less_equal(ysp,3500)
            idsp,xsp,ysp = U.multicompress(goodsp,(idsp,xsp,ysp))
            print 'New dimension for specz catalogue: ',len(xsp)
            # rasp,decsp,xsp,ysp,zsp = get_data(speczsample,(0,1,2,3,4))
            # xsp,ysp,zsp = get_data(speczsample,(1,2,7))
            ####### idsp = U.arange(len(xsp))+1 
            # idsp = arange(len(rasp))+1
            # Reading ColorPro catalog
            print 'Reading info2 before matching...'
            idcol,xcol,ycol = U.get_data(catalog2,(0,3,4))
            print 'Dimension for input catalogue before compressing: ',len(idcol)
            gsp = U.greater_equal(xcol,1500) * U.less_equal(xcol,3500)
            gsp *= U.greater_equal(ycol,1500) * U.less_equal(ycol,3500)
            idcol,xcol,ycol = U.multicompress(gsp,(idcol,xcol,ycol))
            print 'Dimension for input catalogue after compressing: ',len(idcol)
            # Using "matching_vects" to match up samples...
            print 'Matching samples....'
            pepe = CT.matching_vects(idcol,xcol,ycol,idsp,xsp,ysp,1.1)   # We use now X,Y instead RA,Dec
            # Compressing matches for ColorPro...
            print 'Compressing matches...'
            matchidcol = pepe[:,0].astype(int)
            gdet_col = U.greater(matchidcol,0)  # Excluding 0's (non matched detections)
            matchidcol = U.compress(gdet_col,(matchidcol))
            # Compressing matches for Spectroscopic...
            matchidsp = pepe[:,1].astype(int)
            gdet_spz = U.greater(matchidsp,0)   # Excluding 0's (non matched detections)
            matchidsp = U.compress(gdet_spz,(matchidsp))
            print 'len(idcol)',len(idcol)
            print 'len(idsp)',len(idsp)
            if len(matchidcol) == len(matchidsp):
                print 'Creating idredu & zsredu '
                print 'Dimension of matchidsp ',len(matchidsp)
                idredu = U.zeros(len(matchidsp))
                idspredu = U.zeros(len(matchidsp))
                for ii in range(len(matchidsp)):
                    colindex = A.id2pos(idcol,matchidcol[ii]) # Position for Index idcol
                    spzindex = A.id2pos(idsp,matchidsp[ii])   # Position for Index idsp
                    idredu[ii] = idcol[colindex]  # ID for ColorPro
                    idspredu[ii] = idsp[spzindex]    # Specz for Specz
                    
                # A new smaller catalog will be created containing specz info as an extra column.
                print 'Selecting by rows... ' 
                finalcat1 = catalog2[:-3]+'UDF.redu.cat'
                finalcat2 = catalog2[:-3]+'CLASH.redu.cat'
                U.put_data(catalog2[:-3]+'idsfrommatch.txt',(idredu,idspredu))
                A.select_rows_bylist_sorted(catalog1,idspredu,finalcat1)
                A.select_rows_bylist_sorted(catalog2,idredu,finalcat2)               
Exemplo n.º 4
0
def correct_SExt_uncertainties(cluster):
# (catalog,columns,zpts,gains,area2rms,weightimas,arinarout,finalcat):
    """
    It reads the input catalogue and corrects*
    its photometric errors empirically using direct area_v_sigma estimations*.
    It returns the name of the new and corrected catalog. 
    --
    *** THE POSITION OF AREA,MAGS & ERRMAGS NEED TO BE CHECK OUT BEFORE RUNNING IT.
    ----------------------------------------------------------------------------
    REQUIREMENTS:
      - An input catalogue (catalog)
      - Its corresponding COLUMNS file (columns)
      - A list with all ZPS (2nd column) (zpts)
      - A list with all GAINS (2nd column) (gains)
      - A list with all area2sigma files (1 per band) (area2rms)
      - A list with all WEIGHT-MAPS (weightimas)
    """
    # If weight = 1, it uses the Weight-maps to calculate photo.uncertainties.
    weight = 0
    # If verbose = 1, additional information is displayed during the analysis.
    verbose = 0
    # This factor serves to plot several check figures.
    check1 = 1
    check2 = 1

    catalog = finalroot +'/%s/images/%s.photo.cat'%(cluster,cluster)
    columns = finalroot +'/%s/images/%s.photo.columns'%(cluster,cluster)
    scimas  = finalroot+'/%s/images/sci.list'%(cluster)
    weightimas = finalroot+'/%s/images/wht.list'%(cluster)
    zpts = U.get_data(finalroot+'/%s/images/%s.zpt.cat'%(cluster,cluster),0)
    gains = get_JPLUS_gains(scimas)

    # Name for the final corrected catalogue
    newphotcat = finalroot +'/%s/images/%s.photo.err.cat'%(cluster,cluster)

    # To account for differences in exposure time, we need a list with ALL WEIGHT-maps.
    if weight: wimas = U.get_str(weightimas,0)

    if verbose: print 'Catalog: ',catalog
    if os.path.exists(catalog):
       mm  = get_magnitudes(catalog,columns)
       em = get_errmagnitudes(catalog,columns)
       xx,yy,aper = U.get_data(catalog,(3,4,5))
       data = C.loaddata(catalog)       # Loading the whole catalog content.
       head = C.loadheader(catalog)     # Loading the original header.
       zps  = U.get_data(zpts,1)        # Loading Zeropoint values
       # gain = U.get_data(gains,1)     # Loading Gain Values.
       filters = get_filters(columns)   # It gets the filter names for plots.
       # Defining new variables
       ng = len(mm[:,0])                # ng is the number of galaxies.
       nf = len(mm[0,:])                # nf is the number of filters.
       if verbose: print 'ng,nl',ng,nl 
       errmag = N.zeros((ng,nf),float)  # Where the new photo errors will be saved.
       # Starting the game..
       for jj in range(nf):
           if verbose: print 'Analyzing filter %i'%(jj+1)
           # For every single band we need to read the apert_v_sigma file (area2rms)
           # to interpolate the real values of area from the catalog.
           # print 'area2rms[%i]'%(jj),area2rms
           area2rms_bands = U.get_str(area2rms,0)
           sqrtarea, sbackg, smean = U.get_data(area2rms_bands[jj],(0,1,2))
           rmsfit  = N.poly1d(N.polyfit(sqrtarea, sbackg, 3)) # CHECK
           meanfit = N.poly1d(N.polyfit(sqrtarea, smean, 2))  # CHECK
           
           # It reads the WEIGHT-map if requested
           if weight:
              if verbose: print 'Reading Weight image: ', wimas[jj] 
              wdata = fits.open(wimas[jj])[0].data
              wdatanorm = wdata / wdata.max()

           if check1:
              # Sanity plot to assure the interpolated area2sigma function was right   
              plt.figure(0, figsize = (7,6),dpi=70, facecolor='w', edgecolor='k')
              plt.clf()
              plt.plot(sqrtarea,sbackg,'ko',sqrtarea,rmsfit(sqrtarea),'r-',linewidth=2)
              plt.xlabel('$\sqrt{N}$',size=18)
              plt.ylabel('$\sigma$',size=20)
              plt.legend(['Data','Interpolation'],numpoints=1,loc='upper left') 
              plt.grid()
              plt.savefig(catalog[:-3]+'.%s.Ar2Si.check.png'%(filters[jj]),dpi=80)

              # Sanity plot to assure the interpolated area2sigma function was right   
              # plt.figure(2, figsize = (7,6),dpi=70, facecolor='w', edgecolor='k')
              plt.clf()
              plt.plot(sqrtarea, smean,'ko',sqrtarea, meanfit(sqrtarea),'r-',linewidth=2)
              plt.xlabel('$\sqrt{N}$',size=18)
              plt.ylabel('$mean$',size=20)
              plt.legend(['Data','Interpolation'],numpoints=1,loc='upper left') 
              plt.grid()
              plt.savefig(catalog[:-3]+'.%s.Ar2Mean.check.png'%(filters[jj]),dpi=80)

           fluxgal = B.mag2flux(mm[:,jj]-zps[jj]) # -meansignal+U.sqrt(fluxcorr4aperture)
           # good_sample = U.less_equal(abs(mm[:,jj]),30) # good sample
           # bad_sample  = U.greater(abs(mm[:,jj]),30)    # bad sample

           # Values to estimates the mags error.
           sqar = N.sqrt(aper)
           sigback = rmsfit(sqar)
           meansignal = meanfit(sqar)
           
           # There will be non-detected galaxies
           # with m=99 magnitudes.
           # Those numbers should not change here.
           detected     = U.less_equal(abs(mm[:,jj]),30)  # good sample
           nondetected  = U.greater(abs(mm[:,jj]),30)    # bad sample
           
           # Photom. error (as defined by SExtractor) but using the new sigma value!       
           if weight:
               pixw = N.zeros(ng)
               for hhh in range(ng): pixw[hhh]=wdatanorm[int(yy[hhh])-1,int(xx[hhh])-1]
               fluxcor = fluxgal*pixw   
               newerror=N.sqrt((aper*sigback*sigback/fluxcor**2)+(fluxcor*gain))
               newerror *= 1.0857
           else:
               newerror=N.sqrt((aper*sigback*sigback/fluxgal**2)+(fluxgal*gain))
               newerror *= 1.0857
               
           # Assesing new uncertainties.    
           errmag[detected,jj]    = newerror[detected] 
           errmag[nondetected,jj] = em[nondetected,jj]
           
           # A new figure is create to compare SExtractor vs Empirical uncert.
           if check2:       
              line = N.arange(16.,30.,0.25)
              SExline  = U.bin_stats(mm[:,jj],em[:,jj],line,stat='mean_robust') 
              aperline = U.bin_stats(mm[:,jj],errmag[:,jj],line,stat='mean_robust')   
              # plt.figure(1,figsize = (8,7),dpi=70, facecolor='w', edgecolor='k')
              plt.clf()
              plt.plot(mm[:,jj],em[:,jj],'r+',mm[:,jj],errmag[:,jj],'k+')
              plt.plot(line,SExline,'-ro',line,aperline,'-ko',linewidth=6,alpha=0.2)
              plt.legend(['$SExtractor$','$Apertures$'],numpoints=1,loc='upper left')
              plt.xlabel('$Mags$',size=17)
              plt.ylabel('$ErrMags$',size=17)
              plt.xlim(17.,30.)
              plt.ylim(0.,1.0)
              plt.grid()
              plt.savefig(catalog[:-3]+'.%s.uncert.comparison.png'%(filters[jj]),dpi=80)
              
       # The new values of mags error are now overwrited in the original data.
       vars,evars,posref,zpe,zpo = get_usefulcolumns(columns)
       data[:,evars] = errmag[:,N.arange(nf)]
       C.savedata(data,finalcat, dir="",header=head)     # Saving and creating the new catalog.
Exemplo n.º 5
0
def appending_ids2catalogues(field, pointing, ccd):
    """

import alhambra_3arcs as A3
A3.appending_ids2catalogues(2,1,1)


    """

    catalhambra = root + 'f0%i/alhambra.F0%iP0%iC0%i.ColorProBPZ.cat' % (
        field, field, pointing, ccd)
    idalh = U.get_str(catalhambra, 0)
    idalh2 = U.arange(len(idalh)) + 1
    xalh, yalh = U.get_data(catalhambra, (6, 7))

    cat3arcs = finalroot + 'f0%i/alhambra.f0%ip0%ic0%i.3arcs.cat' % (
        field, field, pointing, ccd)
    id3arcs, x3arcs, y3arcs = U.get_data(cat3arcs, (0, 3, 4))
    print len(id3arcs)

    matchfile = cat3arcs[:-3] + 'idsfrommatch.txt'
    if not os.path.exists(matchfile):
        idcol = idalh2
        xcol = xalh
        ycol = yalh
        idsp = id3arcs
        xsp = x3arcs
        ysp = y3arcs

        pepe = CT.matching_vects(idcol, xcol, ycol, idsp, xsp, ysp, 5)

        # Compressing matches for ColorPro...
        print 'Compressing matches...'
        matchidcol = pepe[:, 0].astype(int)
        gdet_col = U.greater(matchidcol,
                             0)  # Excluding 0's (non matched detections)
        matchidcol = U.compress(gdet_col, (matchidcol))
        # Compressing matches for Spectroscopic...
        matchidsp = pepe[:, 1].astype(int)
        gdet_spz = U.greater(matchidsp,
                             0)  # Excluding 0's (non matched detections)
        matchidsp = U.compress(gdet_spz, (matchidsp))
        print 'len(idcol)', len(idcol)
        print 'len(idsp)', len(idsp)
        if len(matchidcol) == len(matchidsp):
            print 'Creating idredu & zsredu '
            print 'Dimension of matchidsp ', len(matchidsp)
            idredu = U.zeros(len(matchidsp))
            idspredu = U.zeros(len(matchidsp))
            for ii in range(len(matchidsp)):
                colindex = A.id2pos(idcol,
                                    matchidcol[ii])  # Position for Index idcol
                spzindex = A.id2pos(idsp,
                                    matchidsp[ii])  # Position for Index idsp
                idredu[ii] = idcol[colindex]  # ID for ColorPro
                idspredu[ii] = idsp[spzindex]  # Specz for Specz

            matchfile = cat3arcs[:-3] + 'idsfrommatch.txt'
            U.put_data(matchfile, (idredu, idspredu))

    if os.path.exists(matchfile):
        pepa = open(matchfile[:-3] + 'bis.cat', 'w')
        idredu, idspredu = U.get_data(matchfile, (0, 1))
        i11 = idredu.astype(int) - 1
        i22 = idspredu.astype(int)
        lista = []
        for ii in range(len(i11)):
            lista.append(idalh[i11[ii]])
            pepa.write('%s  %s  \n' % (idalh[i11[ii]], i22[ii]))
        pepa.close()

        finalfinal = cat3arcs[:-3] + 'final.cat'
        if os.path.exists(finalfinal): A.deletefile(finalfinal)
        if not os.path.exists(finalfinal):
            print 'Preparing ', finalfinal
            idsa = U.get_str(matchfile[:-3] + 'bis.cat', 0)
            append_IDs2_3arcs_catalogues(cat3arcs, idsa)
Exemplo n.º 6
0
def figura33(lista):
    """

    I'm using the stellar classification from the version_e.
    ----
import alhambra_completeness as alhc
lista = '/Volumes/amb22/catalogos/reduction_v4d/globalcats/lista.list'
alhc.figura33(lista)

    """
    blue = 0
    red = 1
    cats = U.get_str(lista, 0)
    cats2 = U.get_str(lista, 1)
    nc = len(cats)
    dx = 0.2
    dy = 0.4
    nxbins = 4
    nybins = 2
    ods = 0.05
    mmin = 16.0
    mmax = 23.75
    zbmin = 0.0001
    zbmax = 1.4
    Mmin = -24
    Mmax = -17
    if red:
        Tbmin = 1  # 7.
        Tbmax = 5  # 11.
        resolmag = 0.2  # 0.2
        resolz = 0.05
    if blue:
        Tbmin = 7.
        Tbmax = 11.
        resolmag = 0.2
        resolz = 0.05

    resol = 0.025
    areas = ([0.45, 0.47, 0.23, 0.24, 0.47, 0.47, 0.46, 2.79])

    plt.figure(111, figsize=(21.5, 11.5), dpi=70, facecolor='w', edgecolor='k')
    ss = 0
    for jj in range(nybins):
        for ii in range(nxbins):
            # Reading data from catalogs.
            mo, zb, tb, odds, m814 = U.get_data(cats[ss], (81, 72, 75, 76, 62))
            sf = U.get_data(cats2[ss], 71)
            # mo,zb,tb,sf,odds,m814 = U.get_data(cats[ss],(81,72,75,71,76,62))
            g = U.greater_equal(abs(m814), mmin) * U.less_equal(
                abs(m814), mmax)
            # g* = U.greater_equal(odds,ods)
            g *= U.greater_equal(tb, Tbmin) * U.less_equal(tb, Tbmax)
            g *= U.less_equal(sf, 0.8)
            yy = -0.014 * m814 + 0.38
            g *= U.greater(odds, yy)
            g *= U.less_equal(mo, Mmax + resol) * U.greater(mo, Mmin - resol)
            g *= U.greater(zb, zbmin) * U.less_equal(zb, zbmax)
            mo, zb, tb, odds = U.multicompress(g, (mo, zb, tb, odds))
            print 'dimension', len(mo)
            # Plotting density.
            # cuadrado = plt.axes([.1+(ii*dx),.1+((nybins-jj-1)*dy),dx,dy])
            if ii == nxbins - 1:
                cuadrado = plt.axes([
                    .1 + (ii * dx), .1 + ((nybins - jj - 1) * dy),
                    dx + (dx * 0.2), dy
                ])
            else:
                cuadrado = plt.axes(
                    [.1 + (ii * dx), .1 + ((nybins - jj - 1) * dy), dx, dy])
            matrix, axis2, axis1 = rs.CC_numberdensity_contour_zvolume(
                zb, mo, resolz, resolmag, 1)
            if blue:
                plt.contourf(axis2,
                             axis1,
                             U.log10(matrix / areas[ss]),
                             250,
                             vmin=-11.,
                             vmax=-7.)  # blue galaxies
            if red:
                plt.contourf(axis2,
                             axis1,
                             U.log10(matrix / areas[ss]),
                             250,
                             vmin=-12.,
                             vmax=-7.65)  # red galaxies

            if ii == nxbins - 1:
                aa = plt.colorbar(pad=0., format='%.1f')
                aa.set_label('Log. Density [N/Mpc$^{3}$/deg$^{2}$]', size=18)
            if jj != nybins - 1: plt.setp(cuadrado, xticks=[])
            if ii != 0: plt.setp(cuadrado, yticks=[])
            if jj == nybins - 1:
                plt.xlabel('M$_{B}$', size=27)
                plt.xticks(fontsize=17)
            if ii == 0:
                plt.ylabel('redshift', size=28)
                plt.yticks(fontsize=17)

            # plotting axis manually
            base1 = U.arange(Mmin, Mmax + 1., 1.)
            base2 = U.arange(0, zbmax + (2. * resol), resol)
            dim1 = len(base1)
            dim2 = len(base2)
            for rr in range(dim1):
                plt.plot(base2 * 0. + base1[rr],
                         base2,
                         'k--',
                         linewidth=1.,
                         alpha=0.25)
            for rr in range(dim2):
                plt.plot(base1,
                         base1 * 0. + base2[rr],
                         'k--',
                         linewidth=1.,
                         alpha=0.25)

            # plt.grid()
            plt.ylim(zbmin + 0.0001, zbmax - 0.001)
            plt.xlim(Mmin + 0.0001, Mmax - 0.0001)
            if ss == 7: labelleg = 'Global'
            else: labelleg = 'A%i' % (ss + 2)
            xypos = (Mmax - 1.6, zbmax - 0.18)
            if ss == 7: xypos = (Mmax - 3.5, zbmax - 0.18)
            plt.annotate(labelleg, xy=xypos, fontsize=40, color='black')
            ss += 1

    plt.savefig('completeness.alhambra.png', dpi=200)
Exemplo n.º 7
0
def flagging_dobledetections(cat1,cat2):
    """
    This serves to append an extra column (each to both inputted catalogs)
    indicating either a detection was repeated and with the lowest S/N
    of the two.
    Sources flagged as 1 are those detections to be excluded when combining
    both catalogs into a single one.
--------
import alhambra_overlap as alhov
cat1 = '/Volumes/amb22/catalogos/reduction_v4e/f02/f02p02_colorproext_1_ISO.cat'
cat2 = '/Volumes/amb22/catalogos/reduction_v4e/f02/f02p01_colorproext_1_ISO.cat'
alhov.flagging_dobledetections(cat1,cat2)    
    
    """
    
    id1,ra1,dec1,x1,y1,s2n1 = U.get_data(cat1,(0,1,2,3,4,14))
    id2,ra2,dec2,x2,y2,s2n2 = U.get_data(cat2,(0,1,2,3,4,14))
    ne1 = len(id1)
    ne2 = len(id2)
    g1 = U.greater_equal(ra1,min(ra2))
    g2 = U.less_equal(ra2,max(ra1))
    id1r,ra1r,dec1r,x1r,y1r,s2n1r = U.multicompress(g1,(id1,ra1,dec1,x1,y1,s2n1))
    id2r,ra2r,dec2r,x2r,y2r,s2n2r = U.multicompress(g2,(id2,ra2,dec2,x2,y2,s2n2))
    flag1 = U.zeros(ne1)
    flag2 = U.zeros(ne2)
    
    dim1 = len(id1r)
    dim2 = len(id2r)
    print 'dim1,dim2',dim1,dim2
    if dim1>0 and dim2>0:
       print 'Matching samples....'
       pepe = matching_vects_ddet(id1r,ra1r,dec1r,id2r,ra2r,dec2r,0.000312)   # We use now X,Y instead RA,Dec
       # Purging null elements
       matchidcol = pepe[:,0].astype(int)
       good_det1 = U.greater(matchidcol,0)  # Excluding 0's (non matched detections)
       matchidcol = U.compress(good_det1,(matchidcol))
       matchidsp = pepe[:,1].astype(int)
       good_det2 = U.greater(matchidsp,0) # Excluding 0's (non matched detections)
       matchidsp = U.compress(good_det2,(matchidsp))
       if len(matchidcol) == len(matchidsp) and len(matchidcol) >0 :
           newdim = len(matchidsp)
           print 'Dimension of matching',newdim
           idr1  = U.zeros(newdim)
           idr2  = U.zeros(newdim)
           s2nr1 = U.zeros(newdim)
           s2nr2 = U.zeros(newdim)
           for ii in range(newdim):
               idr1index = ap.id2pos(id1r,matchidcol[ii]) 
               idr2index = ap.id2pos(id2r,matchidsp[ii]) 
               idr1[ii]  = id1r[idr1index]
               s2nr1[ii] = s2n1r[idr1index]               
               idr2[ii]  = id2r[idr2index] 
               s2nr2[ii] = s2n2r[idr2index]
               
           # Select/Purge detections according to its S/N
           marcador1 = U.zeros(newdim)
           marcador2 = U.zeros(newdim)
           for ss in range(newdim):
               cociente = s2nr1[ss]/s2nr2[ss]  
               if cociente >= 1.: marcador1[ss] = 1.
               else: marcador2[ss] = 1.     
                   
           cond1 = U.less(marcador1,1)
           cond2 = U.less(marcador2,1)
           idr1b = U.compress(cond1,idr1)
           dim1rr = len(idr1b)
           idr2b = U.compress(cond2,idr2)
           dim2rr = len(idr2b)
           
           # Two new IDs (finalid1 & finalid2) are generated with 
           # the final elements to be included in the output catalog.
           for hh1 in range(ne1):
               if id1[hh1] in idr1b:
                  flag1[hh1] = 1
                  
           for hh2 in range(ne2):
               if id2[hh2] in idr2b:
                  flag2[hh2] = 1

           # A new smaller catalog will be created containing specz info as an extra column.
           outcat1 = ap.decapfile(cat1)+'.doubledetect.cat'
           outcat2 = ap.decapfile(cat2)+'.doubledetect.cat'
           print 'outcat1',outcat1
           print 'outcat2',outcat2
           ap.appendcol(cat1,flag1,'Flag2Detected',outcat1)
           ap.appendcol(cat2,flag2,'Flag2Detected',outcat2)

           # Renaming files
           ap.renamefile(cat1,cat1+'.old.cat')
           if not os.path.exists(cat1): ap.renamefile(outcat1,cat1)
           ap.renamefile(cat2,cat2+'.old.cat')
           if not os.path.exists(cat2): ap.renamefile(outcat2,cat2)           
           
    else:
       print 'No common sources in betwen the catalogs'
       # A new smaller catalog will be created containing specz info as an extra column.
       outcat1 = ap.decapfile(cat1)+'.doubledetect.cat'
       outcat2 = ap.decapfile(cat2)+'.doubledetect.cat'
       print 'outcat1',outcat1
       print 'outcat2',outcat2
       ap.appendcol(cat1,flag1*0,'Flag2Detected',outcat1)
       ap.appendcol(cat2,flag2*0,'Flag2Detected',outcat2)
       
       # Renaming files
       ap.renamefile(cat1,cat1+'.old.cat')
       if not os.path.exists(cat1): ap.renamefile(outcat1,cat1)
       ap.renamefile(cat2,cat2+'.old.cat')
       if not os.path.exists(cat2): ap.renamefile(outcat2,cat2)   
Exemplo n.º 8
0
def purging_dobledetections(cat1,cat2):
    """

import alhambra_overlap
from alhambra_overlap import *
cat1 = '/Volumes/amb22/catalogos/reduction_v4e/f02/f02p02_colorproext_1_ISO.cat'
cat2 = '/Volumes/amb22/catalogos/reduction_v4e/f02/f02p01_colorproext_1_ISO.cat'
purging_dobledetections(cat1,cat2)    
    
    """
    
    id1,ra1,dec1,x1,y1,s2n1 = U.get_data(cat1,(0,1,2,3,4,14))
    id2,ra2,dec2,x2,y2,s2n2 = U.get_data(cat2,(0,1,2,3,4,14))
    ne1 = len(id1)
    ne2 = len(id2)
    g1 = U.greater_equal(ra1,min(ra2))
    g2 = U.less_equal(ra2,max(ra1))
    id1r,ra1r,dec1r,x1r,y1r,s2n1r = U.multicompress(g1,(id1,ra1,dec1,x1,y1,s2n1))
    id2r,ra2r,dec2r,x2r,y2r,s2n2r = U.multicompress(g2,(id2,ra2,dec2,x2,y2,s2n2))

    dim1 = len(id1r)
    dim2 = len(id2r)
    print 'dim1,dim2',dim1,dim2
    if dim1>0 and dim2>0:
       print 'Matching samples....'
       pepe = matching_vects_ddet(id1r,ra1r,dec1r,id2r,ra2r,dec2r,0.000312)   # We use now X,Y instead RA,Dec
       # Purging null elements
       matchidcol = pepe[:,0].astype(int)
       good_det1 = U.greater(matchidcol,0)  # Excluding 0's (non matched detections)
       matchidcol = U.compress(good_det1,(matchidcol))
       matchidsp = pepe[:,1].astype(int)
       good_det2 = U.greater(matchidsp,0) # Excluding 0's (non matched detections)
       matchidsp = U.compress(good_det2,(matchidsp))
       if len(matchidcol) == len(matchidsp) and len(matchidcol) >0 :
           newdim = len(matchidsp)
           print 'Dimension of matching',newdim
           idr1  = U.zeros(newdim)
           idr2  = U.zeros(newdim)
           s2nr1 = U.zeros(newdim)
           s2nr2 = U.zeros(newdim)
           for ii in range(newdim):
               idr1index = ap.id2pos(id1r,matchidcol[ii]) 
               idr2index = ap.id2pos(id2r,matchidsp[ii]) 
               idr1[ii]  = id1r[idr1index]
               s2nr1[ii] = s2n1r[idr1index]               
               idr2[ii]  = id2r[idr2index] 
               s2nr2[ii] = s2n2r[idr2index]
               
           # Select/Purge detections according to its S/N
           marcador1 = U.zeros(newdim)
           marcador2 = U.zeros(newdim)
           for ss in range(newdim):
               cociente = s2nr1[ss]/s2nr2[ss]  
               if cociente >= 1.: marcador1[ss] = 1.
               else: marcador2[ss] = 1.     
                   
           cond1 = U.less(marcador1,1)
           cond2 = U.less(marcador2,1)
           idr1b = U.compress(cond1,idr1)
           dim1rr = len(idr1b)
           idr2b = U.compress(cond2,idr2)
           dim2rr = len(idr2b)
           print ''
           print 'Number of detections to be removed from cat1: ', dim1rr
           print 'Number of detections to be removed from cat2: ', dim2rr
           print ''
           
           # Two new IDs (finalid1 & finalid2) are generated with 
           # the final elements to be included in the output catalog.
           finalid1 = U.zeros((ne1-dim1rr))
           finalid2 = U.zeros((ne2-dim2rr))
           kk1 = 0
           for hh1 in range(ne1):
               if id1[hh1] not in idr1b:
                  finalid1[kk1] = id1[hh1]
                  kk1 += 1
                  
           print 'kk1',kk1
           
           kk2 = 0       
           for hh2 in range(ne2):
               if id2[hh2] not in idr2b:
                  if kk2 <= (ne2-dim2rr-1): 
                     finalid2[kk2] = id2[hh2]
                     kk2+=1
                  
           print 'kk2',kk2       
                  
           # A new smaller catalog will be created containing specz info as an extra column.
           outcat1 = ap.decapfile(cat1)+'.wo2detect.cat'
           outcat2 = ap.decapfile(cat2)+'.wo2detect.cat'
           print 'outcat1',outcat1
           print 'outcat2',outcat2
           ap.select_rows_bylist(cat1,finalid1,outcat1)
           ap.select_rows_bylist(cat2,finalid2,outcat2)
           
           
    else:
       print 'No common sources in betwen the catalogs'
Exemplo n.º 9
0
def globalimage_zb(image, cat, posx, posy, posarea, poszb, shape, save,
                   outfile):
    """
============
from alhambra_webpage import *
image = '/Users/albertomolino/Desktop/emss2137/emss2137.png'
cat = '/Volumes/amb2/SUBARU/emss2137/catalogs/MS2137_Subaru.bpz.2.cat'
posx = 3
posy = 4
posarea = 5
poszb = 17
shape = 'circle'
save = 'yes'
outfile = '/Users/albertomolino/Desktop/emss2137/emss2137_22.png'
globalimage_zb(image,cat,posx,posy,posarea,poszb,shape,save,outfile)
-----------------
import alhambra_photools
from alhambra_photools import *
import alhambra_webpage
from alhambra_webpage import *
image = '/Users/albertomolino/Desktop/macs1206/macs1206.color.png'
cat = '/Users/albertomolino/Desktop/UDF/Molino12/catalogs/ColorPro/macs1206_UDFconf_NIR_July2012_ISO_RS.cat'
posx = 3
posy = 4
shape = 'circle'
save = 'yes'
posarea = 5
poszb = 0
outfile = '/Users/albertomolino/Desktop/macs1206/macs1206.color.RS.purged.png'
globalimage_zb(image,cat,posx,posy,posarea,poszb,shape,save,outfile)
--------
import alhambra_photools
from alhambra_photools import *
import alhambra_webpage
from alhambra_webpage import *
image = '/Users/albertomolino/Desktop/rxj2248/HST/rxj2248_acs.png'
cat = '/Users/albertomolino/Desktop/rxj2248/HST/catalogs/rxj2248_IR_RedSeq.cat'
posx = 3
posy = 4
shape = 'circle'
save = 'yes'
posarea = 5
poszb = 0
outfile = '/Users/albertomolino/Desktop/rxj2248/HST/rxj2248_acs.RS.png'
globalimage_zb(image,cat,posx,posy,posarea,poszb,shape,save,outfile)
--------

    """
    colorfile = image
    im = Image.open(colorfile)
    imsize = nx, ny = im.size
    stamp = im.crop((0, 0, nx, ny))
    draw = ImageDraw.Draw(stamp)

    try:
        x, y, area, zb = U.get_data(cat, (posx, posy, posarea, poszb))
        sf = U.get_data(cat, 74)
        # good = greater(zb,0.28) * less(zb,0.34)
        good = U.greater(zb, 0.001) * U.less(sf, 0.7)
        x, y, area, zb = U.multicompress(good, (x, y, area, zb))
    except:
        print 'Impossible to read the data from catalog. Check it out!!'

    for ii in range(len(x)):
        xx = x[ii]
        yy = y[ii]
        aa = area[ii]
        zzb = zb[ii]
        zbval = ' %.2f ' % (zzb)
        # print 'x,y',xx,yy
        # print 'zbval',zbval
        # print 'ii',ii
        # dx = dy = 1.5 * aa
        shapesize = 20  # aa * 1.05
        # print 'shapesize',shapesize
        # dxo = dyo = 0

        colores = (255, 255, 1)
        # if zzb < 0.1  : colores = (255,1,1)   # red
        # elif zzb >= 0.1 and zzb < 0.3 : colores = (255,255,1) # yellow
        # elif zzb >= 0.3 and zzb < 1.  : colores = (1,255,1)   # Green
        # elif zzb >= 1.  and zzb < 3.  : colores = (1,255,255) # Blue
        # else: colores = (255,1,255) # Purple

        if shape != 'None':
            if shape == 'circle':
                draw.ellipse((xx - shapesize, ny - yy - shapesize,
                              xx + shapesize, ny - yy + shapesize),
                             fill=None)
            elif shape == 'crosshair':
                draw.line((xx + shapesize, yy, xx + shapesize - 10, yy),
                          fill=None,
                          width=3)
                draw.line((xx - shapesize, yy, xx - shapesize + 10, yy),
                          fill=None,
                          width=3)
                draw.line((xx, yy + shapesize, xx, yy + shapesize - 10),
                          fill=None,
                          width=3)
                draw.line((xx, yy - shapesize, xx, yy - shapesize + 10),
                          fill=None,
                          width=3)
            elif shape == 'rectangle':
                draw.rectangle((dx - shapesize, dy - shapesize, dx + shapesize,
                                dy + shapesize),
                               fill=colores)  # fill=None)
            else:
                print 'Shape not found!. It will not be overlaid...'

        draw.text((xx - (shapesize / 2.), ny - yy - (shapesize / 2.)),
                  zbval,
                  fill=(255, 255, 1))
        # draw.text((xx-(1.5*shapesize),ny-yy-(2.*shapesize)),zbval,fill=(255,255,255))
        # # draw.text((xx-(1.5*shapesize),ny-yy-(2.*shapesize)),zbval,fill=colores)

    if save == 'yes':
        if outfile != 'None':
            stamp.save(outfile)
        else:
            stamp.save('imcutoff.png')