예제 #1
0
def get_usefulcolumns(columns):
    """
    It extracts the vars,evars,posref,zpe,zpo information
    from a columns file.
    vars & evars: mag & emags positions inside the catalog. 
====USAGE=====================================
columns = 'Abell383.columns'
vars,evars,posref,zpe,zpo = get_usefulcolumns(columns)
----
    """
    filt = U.get_str(columns,0)
    nf = 0
    for ii in range(len(filt)):
        if filt[ii][-4:] == '.res': nf += 1
        if filt[ii] == 'M_0': 
           posM0 = ii

    print 'Number of filters detected... ', nf
    filtref = int(U.get_str(columns,1)[posM0])-1
    rawvars = U.get_str(columns,1,nf)
    vars = U.zeros(nf) 
    evars = U.zeros(nf)
    for jj in range(nf):
        vars[jj] = int(rawvars[jj].split(',')[0])-1   # -1 because of columns's notation
        evars[jj] = int(rawvars[jj].split(',')[1])-1  # -1 because of columns's notation
        if vars[jj] == filtref: posref = int(vars[jj])
    zpe,zpo = U.get_data(columns,(3,4),nf)
    vars = vars.astype(int)
    evars = evars.astype(int)
    return vars,evars,posref,zpe,zpo
예제 #2
0
def replacing_nans_catalogs(catalog, newname):
    """

    vars = []
    evars = []
    data = C.loaddata(catalog)
    mags = data[:,vars]
    emags = data[:,evars]

    """

    vars = N.array([
        15, 18, 21, 24, 27, 30, 33, 36, 39, 42, 45, 48, 51, 54, 57, 60, 63, 66,
        69, 72, 75, 78, 81, 84, 87, 90, 93, 96, 99, 102, 105, 108, 111
    ])
    evars = vars[:] + 1
    s2n_vars = vars[:] + 2

    data = C.loaddata(catalog)  # Loading the whole catalog content.
    head = C.loadheader(catalog)  # Loading the original header.
    mm = data[:, vars]
    em = data[:, evars]
    s2n = data[:, s2n_vars]

    nl = len(mm[:,
                0])  # nl is the number of detections inside every single band.
    nf = len(mm[0, :])  # nf is the number of bands inside the catalog.
    newmag = U.zeros((nl, nf),
                     float)  # Where the new photo errors will be saved.
    errmag = U.zeros((nl, nf),
                     float)  # Where the new photo errors will be saved.
    new_s2n = U.zeros((nl, nf), float)

    for jj in range(len(vars)):
        for ii in range(nl):
            if abs(mm[ii, jj]) > 60.:
                newmag[ii, jj] = -99.0
                errmag[ii, jj] = 0.00
                new_s2n[ii, jj] = -1
            elif s2n[ii, jj] < 0.00001:
                new_s2n[ii, jj] = 0.
            else:
                newmag[ii, jj] = mm[ii, jj]
                errmag[ii, jj] = em[ii, jj]
                new_s2n[ii, jj] = s2n[ii, jj]

    # New values of mags error overwrites now the original data.
    data[:, vars] = newmag[:, U.arange(nf)]
    data[:, evars] = errmag[:, U.arange(nf)]
    data[:, s2n_vars] = new_s2n[:, U.arange(nf)]
    C.savedata(data, newname, dir="",
               header=head)  # Saving & creating a new catalog.
예제 #3
0
def replace_photo_uncert(catalog, columns):
    """

    """
    data = C.loaddata(catalog)  # Loading the whole catalog content.
    head = C.loadheader(catalog)  # Loading the original header.
    mm = A.get_magnitudes(catalog, columns)
    em = A.get_errmagnitudes(catalog, columns)
    filters = B.get_filter_list(columns)

    nl = len(mm[:,
                0])  # nl is the number of detections inside every single band.
    nf = len(mm[0, :])  # nf is the number of bands inside the catalog.
    errmag = U.zeros((nl, nf),
                     float)  # Where the new photo errors will be saved.

    for jj in range(nf):
        maglim = B.get_limitingmagnitude(mm[:, jj], em[:, jj], 1., 0.25)
        print 'Limiting Magnitude for filter %s: %.3f' % (filters[jj], maglim)
        for ii in range(nl):
            if mm[ii, jj] == -99.:
                errmag[ii, jj] = 0.00
            elif mm[ii, jj] == 99.:
                errmag[ii, jj] = maglim
            else:
                errmag[ii, jj] = em[ii, jj]

    # New values of mags error overwrites now the original data.
    vars, evars, posref, zpe, zpo = get_usefulcolumns(columns)
    data[:, evars] = errmag[:, U.arange(nf)]
    finalcatalog = catalog[:-3] + 'upp.cat'
    C.savedata(data, finalcatalog, dir="",
               header=head)  # Saving & creating a new catalog.
def replace_kerttu_errmags(catalog, columns, finalcatalog):
    """

import alhambra_kerttu_fixerrmags as AFM
catalog = '/Users/albertomolino/doctorado/articulos/ALHAMBRA/kerttu/test_photoz/kerttu.cat'
columns = '/Users/albertomolino/doctorado/articulos/ALHAMBRA/kerttu/test_photoz/kerttu.columns'
finalcatalog = '/Users/albertomolino/doctorado/articulos/ALHAMBRA/kerttu/test_photoz/kerttu3.cat'
AFM.replace_kerttu_errmag(catalog,columns,finalcatalog)
------

    """

    data = C.loaddata(catalog)  # Loading the whole catalog content.
    head = C.loadheader(catalog)  # Loading the original header.
    mm = A.get_magnitudes(catalog, columns)
    em = A.get_errmagnitudes(catalog, columns)
    filters = B.get_filter_list(columns)

    nl = len(mm[:,
                0])  # nl is the number of detections inside every single band.
    nf = len(mm[0, :])  # nf is the number of bands inside the catalog.
    errmag = U.zeros((nl, nf),
                     float)  # Where the new photo errors will be saved.

    for jj in range(nf):
        for ii in range(nl):
            if mm[ii, jj] == -99.: errmag[ii, jj] = 0.00
            else: errmag[ii, jj] = em[ii, jj]

    # New values of mags error overwrites now the original data.
    vars, evars, posref, zpe, zpo = A.get_usefulcolumns(columns)
    data[:, evars] = errmag[:, U.arange(nf)]
    C.savedata(data, finalcatalog, dir="",
               header=head)  # Saving & creating a new catalog.
예제 #5
0
def replace_upplimits(cluster):
    """

    """
    data = C.loaddata(catalog)      # Loading the whole catalog content.
    head = C.loadheader(catalog)    # Loading the original header.
    m = get_magnitudes(catalog,columns)
    em = get_errmagnitudes(catalog,columns)
    filters = bpt.get_filter_list(columns)
    
    nl = len(m[:,0])    # nl is the number of detections inside every single band.
    nf = len(m[0,:])    # nf is the number of bands inside the catalog. 
    errmag = U.zeros((nl,nf),float)  # Where the new photo errors will be saved. 

    for jj in range(nf):
        # maglim = get_limitingmagnitude(m[:,jj],em[:,jj],3.,0.25)
        maglim = bpt.get_limitingmagnitude(m[:,jj],em[:,jj],1.,0.25)
        print 'Limiting Magnitude for filter %s: %.3f'%(filters[jj],maglim)
        for ii in range(nl):
            # print 'm[%i,%i]'%(ii,jj),m[ii,jj]
            if m[ii,jj] != 99. :         
               errmag[ii,jj] = em[ii,jj]    
            else:
               # print 'UNDETECTED OBJECT. SAVING ITS LIMITING MAGNITUDE !!'
               errmag[ii,jj] = maglim
    
    
    # New values of mags error overwrites now the original data.
    vars,evars,posref,zpe,zpo = get_usefulcolumns(columns)
    data[:,evars] = errmag[:,U.arange(nf)]
    C.savedata(data,finalcatalog, dir="",header=head) # Saving & creating a new catalog.
예제 #6
0
def inverse_flag_images(image):
    """
    It creates a RMS-like image using its Weight-map
    where it is defined as RMS = 1 / sqrt(WeightMap)
    ----
import alhambra_invflagimas as AIV
image = '/Volumes/amb22/imagenes/f04/f04p01_F814W_1.swp.weight.flag.fits'
AIV.inverse_flag_images(image)

    """

    print 'Processing image: ', image
    wim = image
    data = P.open(wim)[0].data
    nc = len(data[0, :])
    nf = len(data[:, 0])
    matrix = U.zeros((nf, nc), float)

    for ii in range(nc):
        for jj in range(nf):
            if data[jj, ii] < 1:
                matrix[jj, ii] = 1
            else:
                matrix[jj, ii] = 0

    nameout = A.decapfile(wim) + '.inv.fits'
    print 'Saving new image as...', nameout
    P.writeto(nameout, matrix)

    try:
        A.addheader2another(image, nameout)
    except:
        print 'Impossible to update its header!!!'
예제 #7
0
def stamping(imagein, coordx, coordy, size):

    image = imagein
    xx = coordx
    yy = coordy
    data = pyfits.open(image)[0].data
    matrix = U.zeros((size, size), float)
    range = size / 2.0
    stamp = data[yy - range:yy + range, xx - range:xx + range]

    return stamp
예제 #8
0
def flagging_dobledetections_mergecolumns(catalog):
    """
    This serves to append an extra column (each to both inputted catalogs)
    indicating either a detection was repeated and with the lowest S/N
    of the two.
    Sources flagged as 1 are those detections to be excluded when combining
    both catalogs into a single one.
--------
import alhambra_overlap as alhov
cat2 = '/Volumes/amb22/catalogos/reduction_v4e/f02/f02p01_colorproext_2_ISO.cat'
alhov.flagging_dobledetections_mergecolumns(cat2)    
    
    """
    
    data = coeio.loaddata(catalog)      # Loading the whole catalog content.
    head = coeio.loadheader(catalog)    # Loading the original header.
    nc = len(data.T)      # Number columns
    dim = len(data[:,0])  # Number elements
    print 'nc,dim',nc,dim
    
    var1 = head[-3].split()[-1]
    var2 = head[-2].split()[-1]
    if var1 == var2:
       print 'Duplicated columns. Merging information...'
       uno = data[:,72]
       dos = data[:,73]
       tres = uno+dos
       newdata = U.zeros((dim,nc-1),float)
       for ii in range(nc-1):
           for jj in range(dim):
               if ii == nc-1:
                  print 'pepe'
                  newdata[jj,ii] = tres[jj]  
               else:
                  newdata[jj,ii] = data[jj,ii]

       head2 = head[:-1]
       head2[-1]='#'
       outcat = catalog[:-4]+'.mergedcolumns.cat'
       coeio.savedata(newdata,outcat, dir="",header=head2)     # Saving and creating the new catalog.
                
       # Renaming files
       ap.renamefile(catalog,catalog+'.oldold.cat')
       if not os.path.exists(catalog): ap.renamefile(outcat,catalog)
예제 #9
0
def remove_detections_bysegmmaps(field, pointing, ccd):
    """
    It uses the segmentation-maps to remove fake detections
    when masking out saturated stars.
----
import alhambra_fakedets as AF
AF.remove_detections_bysegmmaps(2,1,1)

    """
    root = '/Volumes/amb22/catalogos/reduction_v4f/f0%i/' % (field)
    root2images = '/Volumes/amb22/imagenes/f0%i/' % (field)
    catalog = root + 'f0%ip0%i_colorproext_%i_ISO.irmsF814W.free.cat' % (
        field, pointing, ccd)
    ids, x, y, area = U.get_data(catalog, (0, 3, 4, 5))
    dim = len(ids)
    valor = U.zeros(dim)
    ima1 = root2images + 'f0%ip0%i_F814W_%i.swp.seg.fits' % (field, pointing,
                                                             ccd)
    ima2 = root2images + 'f0%ip0%i_F814W_%i.swp.segnomask.fits' % (
        field, pointing, ccd)
    segm1 = pyfits.open(ima1)[0].data
    segm2 = pyfits.open(ima2)[0].data
    for ii in range(dim):
        xo = x[ii]
        yo = y[ii]
        dimx = U.shape(datos[yo - size:yo + size, xo - size:xo + size])[1]
        dimy = U.shape(datos[yo - size:yo + size, xo - size:xo + size])[0]
        perc[ii] = (datos[yo - size:yo + size, xo - size:xo + size].sum() /
                    (dimx * dimy * 1.))

    # Defining the sample to be keep.
    good = U.greater(valor, 0)
    idr = U.compress(good, ids)
    dim2 = len(idr)
    print 'Dimensions: Original: %i, Final: %i, Excluded: %i detections. ' % (
        dim, dim2, dim - dim2)
    finalcat = root + 'f0%ip0%i_colorproext_%i_ISO.irmsF814W.free.cat' % (
        field, pointing, ccd)
    data1 = coeio.loaddata(catalog)  # Loading the whole catalog content.
    head = coeio.loadheader(catalog)
    data2 = data1[good, :]
    coeio.savedata(data2, finalcat, dir="",
                   header=head)  # Saving & creating a new catalog.
예제 #10
0
def comparing_populations_HDF5(hdf5file):

    p = h5py.File(hdf5file, mode='r')
    pdz = p.get('FullProbability')
    # pdz = p.get('Likelihood')
    zz = p.get('redshift')[:]
    tt = p.get('type')[:]
    nz = len(zz)
    ng = N.shape(pdz)[0]

    probs = U.zeros((nz, 2), float)

    for ii in range(ng):
        pepe1 = U.sum(pdzr[ii, :, 0:35], axis=1)
        pepe2 = U.sum(pdzr[ii, :, 36:], axis=1)
        pepe3 = (U.sum(pepe1) + U.sum(pepe2)) * 1.
        # Normalized PDZs
        pdz1 = pepe1 / pepe3
        pdz2 = pepe2 / pepe3
        probs[:, 0] += pdz1
        probs[:, 1] += pdz2

        plt.plot(zz[::30],
                 probs[::30, 0] * 275.,
                 'r-',
                 zz[::30],
                 probs[::30, 1] * 275,
                 'b-',
                 lw=5)
        plt.xlim(0., 1.5)

    for ii in range(ng):
        pepe1 = U.sum(paca[ii, :, 0:35], axis=1)
        pepe2 = U.sum(paca[ii, :, 36:], axis=1)
        pepe3 = (U.sum(pepe1) + U.sum(pepe2)) * 1.
        # Normalized PDZs
        pdz1 = pepe1 / pepe3
        pdz2 = pepe2 / pepe3
        probs[:, 0] += pdz1
        probs[:, 1] += pdz2
예제 #11
0
def remove_fakeabsorptions_F814W(field, pointing, ccd):
    """
    Using the rmsweight images, it gets rid of
    detections with imrs_F814W < 0.5.
    -------------------------------------------
import alhambra_fakedets as AF
AF.remove_fakeabsorptions_F814W(2,1,1)

    """
    root = '/Volumes/amb22/catalogos/reduction_v4f/f0%i/' % (field)
    catalog = root + 'f0%ip0%i_colorproext_%i_ISO.cat' % (field, pointing, ccd)
    ids, x, y, area = U.get_data(catalog, (0, 3, 4, 5))
    dim = len(ids)
    perc = U.zeros(dim)
    # Opening F814W Weight image
    ima = alh.alhambra_invrmsimagelist(field, pointing, ccd)[-1]
    datos = pyfits.open(ima)[0].data
    for ii in range(dim):
        if area[ii] > 1:
            size = int(round(U.sqrt(area[ii]) / 2.))
            xo = x[ii]
            yo = y[ii]
            dimx = U.shape(datos[yo - size:yo + size, xo - size:xo + size])[1]
            dimy = U.shape(datos[yo - size:yo + size, xo - size:xo + size])[0]
            perc[ii] = (datos[yo - size:yo + size, xo - size:xo + size].sum() /
                        (dimx * dimy * 1.))

    # Defining the sample to be keep.
    good = U.greater(perc, 0.5)
    idr = U.compress(good, ids)
    dim2 = len(idr)
    print 'Dimensions: Original: %i, Final: %i, Excluded: %i detections. ' % (
        dim, dim2, dim - dim2)
    finalcat = root + 'f0%ip0%i_colorproext_%i_ISO.irmsF814W.free.cat' % (
        field, pointing, ccd)
    data1 = coeio.loaddata(catalog)  # Loading the whole catalog content.
    head = coeio.loadheader(catalog)
    data2 = data1[good, :]
    coeio.savedata(data2, finalcat, dir="",
                   header=head)  # Saving & creating a new catalog.
예제 #12
0
def run_Mastercatalogue(framelist, finalname):
    """
    It generates the MASTER.FILTER HDF5-catalogue with all the individual filters
    + reference Filter image, according to the list yielded by 'runListing_cats'
    ------
    list_calib_frames,master_filter_cat
    """
    # Make sure the filelist do exist.
    if not os.path.exists(framelist):
        print 'The file %s does not exists!' % (framelist)
        pausa = raw_input('Stop at run_Mastercatalogue!')

    # /Volumes/amb4/ALHAMBRA/images/individuals/globalist/f02p01c01.info.sort.txt
    # 1.Filter 2.Date 3.Time 4.EXPT 5.NAME 6.INDEX 7.Pmin 8.Phours 9.Pdays 10.Pyears
    filt, date, expt, perm, perh, perd, pery = U.get_data(
        framelist, (0, 1, 3, 6, 7, 8, 9))
    # Name of individual align+bias+ZPcal frames
    frame_names = U.get_str(framelist, 4)
    # Total number of frames to store.
    nframes = len(frame_names)

    # Initializing the HDF5-file.
    filtros = tb.Filters(complevel=5,
                         complib="lzo")  #lz0 is much faster than zlib
    fp_file = tb.openFile(finalname,
                          mode="w",
                          title="ALHAMBRA VARIAB MASTER CATALOGUE")
    # Defining and Extracting gral.info from F814W-detection image.
    ff = framelist.split('/')[-1][2]
    po = framelist.split('/')[-1][5]
    ccd = framelist.split('/')[-1][8]
    cat_f814image = root2cats + 'f0%s/' % (
        ff) + 'f0%sp0%s_F814W_%s.swp.cat' % (ff, po, ccd)
    if not os.path.exists(cat_f814image):
        print 'File %s does not exists! ' % (cat_f814image)
        pausa = raw_input('paused inside run_Mastercatalogue.')
    else:
        ra, dec, xx, yy, area, fwhm, aa, bb, flag = U.get_data(
            cat_f814image, (1, 2, 3, 4, 5, 6, 7, 8, 9))

    # Retrieving the original ALHAMBRA-ID for each detection.
    alhids = find_alhambraids(ra, dec)

    # Estimating the dimensions for the HDF5-file.
    ng = len(ra)  # number of galaxies.
    nf = nframes  # number of frames per galaxy.
    nc = 15  # number of variables to be stored (see below).

    # Including this information in the HDF5-file.
    ids_f814 = fp_file.createArray(fp_file.root, "ALHIDs", alhids)
    ra_f814 = fp_file.createArray(fp_file.root, "RA", ra)
    dec_f814 = fp_file.createArray(fp_file.root, "Dec", dec)
    x_f814 = fp_file.createArray(fp_file.root, "Xpos", xx)
    y_f814 = fp_file.createArray(fp_file.root, "Ypos", yy)
    area_f814 = fp_file.createArray(fp_file.root, "Area", area)
    fwhm_f814 = fp_file.createArray(fp_file.root, "FWHM", fwhm)
    a_f814 = fp_file.createArray(fp_file.root, "MajorAxis", aa)
    b_f814 = fp_file.createArray(fp_file.root, "MinorAxis", bb)
    Flag_f814 = fp_file.createArray(fp_file.root, "SExFlag", flag)
    # Defining dimension for the main matrix.
    full_table = fp_file.createCArray(fp_file.root,
                                      "AllData",
                                      tb.Float32Atom(),
                                      shape=(ng, nf, nc),
                                      chunkshape=(1, nf, nc),
                                      filters=filtros)

    # Start filling-in the matrix
    for sss in range(nframes):
        temporal_frame = frame_names[sss]
        # print 'Frame to be read: ',temporal_frame
        # f02p01_954_1.indiv.1.expt500s.20050928.bias.fits
        ff = temporal_frame.split('.')[0][2]  # Extracting the ALHAMBRA-Field
        frame = temporal_frame.split('.')[2]  # Extracting the Temporal-Frame
        temporal_frame_cat = root2cats + 'f0%s/' % (
            ff) + temporal_frame[:-4] + 'ZPcal.cat'
        # print 'temporal_frame_cat: ',temporal_frame_cat
        # f02p01_954_1.indiv.1.expt500s.20050928.bias.ZPcal.cat
        ima_filter_ref = root2images + 'f0%s/' % (ff) + temporal_frame.split(
            '.')[0] + '.swp.fits'
        # f02p01_954_1.swp.fits
        # print 'Assoc. Filter Image: ',ima_filter_ref
        cat_filter_ref = root2cats + 'f0%s/' % (ff) + A.getfilename(
            ima_filter_ref)[:-4] + 'cat'
        # f02p01_954_1.swp.cat
        # print 'Assoc. Filter Catalogue: ',cat_filter_ref
        # Reading information from Catalogues:
        # from Frame catalogue:
        fl, efl, m, em = U.get_data(temporal_frame_cat, (10, 11, 12, 13))
        S2N = (fl / efl)
        # from Reference Filter Catalogue.
        if sss < 1:
            current_cat = cat_filter_ref
            flr, eflr, mr, emr = U.get_data(cat_filter_ref, (10, 11, 12, 13))
        # This prevents it to re-read the same reference catalogue
        # for indiv.frames with the same filter.
        if sss > 0 and cat_filter_ref != current_cat:
            flr, eflr, mr, emr = U.get_data(cat_filter_ref, (10, 11, 12, 13))
            S2Nr = (flr / eflr)
        else:
            S2Nr = (flr / eflr)
        # Writing information into the HDF5-File.
        # Frame > Galaxies > Columns
        for iii in range(ng):
            values = U.zeros(nc)
            # 1.Frame 2.Mag 3.dMag 4.Flux 5.dFlux 6.MagRef 7.dMagRef
            # 8. FluxRef 9.dFluxRef 10.S2N 11.S2NRef 12.ExpTime
            # 13. TimeLapse['] 14.Date 15.Filter
            values[0] = frame  # 1.  Frame Number (1,2,3,...).
            values[1] = m[iii]  # 2.  ISO.Magnitude on indiv.frame.
            values[2] = em[iii]  # 3.  Unc.ISO.Magnitude on indiv.frame.
            values[3] = fl[iii]  # 4.  ISO.Flux on indiv.frame.
            values[4] = efl[iii]  # 5.  Unc.ISO.Flux on indiv.frame.
            values[5] = mr[iii]  # 6.  ISO.Magnitude on Ref.Filter.
            values[6] = emr[iii]  # 7.  Unc.ISO.Magnitude on Ref.Filter.
            values[7] = flr[iii]  # 8.  ISO.Flux on Ref.Filter.
            values[8] = eflr[iii]  # 9.  Unc.ISO.Flux on Ref.Filter.
            values[9] = S2N[iii]  # 10. signal-to-noise on indiv.frame.
            values[10] = S2Nr[iii]  # 11. signal-to-noise on Ref.Filter.
            values[11] = expt[sss]  # 12. Exposure-Time on indiv.frame.
            values[12] = perm[
                sss]  # 13. Time-Lapse from First Observation [sec]
            values[13] = date[sss]  # 14. Date when indiv.frame was observed.
            values[14] = filt[sss]  # 15. Filter for indiv.frame

            full_table[iii, sss, :] = values[:]

    fp_file.close()

    master_cat = root2cats + 'f0%s/f0%sp0%sc0%s.hdf5' % (ff, ff, po, ccd)
    if not os.path.exists(master_cat):
        cmd5 = '/bin/mv %s %s ' % (finalname, master_cat)
        print cmd5
        os.system(cmd5)

    if os.path.exists(finalname):
        print '============================================================='
        print 'The HDF5-file %s was successfully created.' % (
            A.getfilename(finalname))
        print '============================================================='
        cmd6 = '/bin/rm -rf %s ' % (finalname)
        # print cmd6
        os.system(cmd6)
예제 #13
0
def match_spz_sample(cluster): # TO CHECK
       
    finalcat1 = catalog2[:-3]+'CLASH.redu.cat'
    finalcat2 = catalog2[:-3]+'nada.cat'
    # if not os.path.exists(finalcat1):
    if not os.path.exists(finalcat2):
        # print 'Final catalog does not exist yet.'                           
        if os.path.exists(catalog1) and os.path.exists(catalog2):
            # It matches up detections to its Spectroscopic Sample.
            # Reading specz catalog
            print 'Reading info1 before matching...'
            speczsample = catalog1
            idsp,xsp,ysp = U.get_data(speczsample,(0,3,4))
            goodsp = U.greater_equal(xsp,1500) * U.less_equal(xsp,3500)
            goodsp *= U.greater_equal(ysp,1500) * U.less_equal(ysp,3500)
            idsp,xsp,ysp = U.multicompress(goodsp,(idsp,xsp,ysp))
            print 'New dimension for specz catalogue: ',len(xsp)
            # rasp,decsp,xsp,ysp,zsp = get_data(speczsample,(0,1,2,3,4))
            # xsp,ysp,zsp = get_data(speczsample,(1,2,7))
            ####### idsp = U.arange(len(xsp))+1 
            # idsp = arange(len(rasp))+1
            # Reading ColorPro catalog
            print 'Reading info2 before matching...'
            idcol,xcol,ycol = U.get_data(catalog2,(0,3,4))
            print 'Dimension for input catalogue before compressing: ',len(idcol)
            gsp = U.greater_equal(xcol,1500) * U.less_equal(xcol,3500)
            gsp *= U.greater_equal(ycol,1500) * U.less_equal(ycol,3500)
            idcol,xcol,ycol = U.multicompress(gsp,(idcol,xcol,ycol))
            print 'Dimension for input catalogue after compressing: ',len(idcol)
            # Using "matching_vects" to match up samples...
            print 'Matching samples....'
            pepe = CT.matching_vects(idcol,xcol,ycol,idsp,xsp,ysp,1.1)   # We use now X,Y instead RA,Dec
            # Compressing matches for ColorPro...
            print 'Compressing matches...'
            matchidcol = pepe[:,0].astype(int)
            gdet_col = U.greater(matchidcol,0)  # Excluding 0's (non matched detections)
            matchidcol = U.compress(gdet_col,(matchidcol))
            # Compressing matches for Spectroscopic...
            matchidsp = pepe[:,1].astype(int)
            gdet_spz = U.greater(matchidsp,0)   # Excluding 0's (non matched detections)
            matchidsp = U.compress(gdet_spz,(matchidsp))
            print 'len(idcol)',len(idcol)
            print 'len(idsp)',len(idsp)
            if len(matchidcol) == len(matchidsp):
                print 'Creating idredu & zsredu '
                print 'Dimension of matchidsp ',len(matchidsp)
                idredu = U.zeros(len(matchidsp))
                idspredu = U.zeros(len(matchidsp))
                for ii in range(len(matchidsp)):
                    colindex = A.id2pos(idcol,matchidcol[ii]) # Position for Index idcol
                    spzindex = A.id2pos(idsp,matchidsp[ii])   # Position for Index idsp
                    idredu[ii] = idcol[colindex]  # ID for ColorPro
                    idspredu[ii] = idsp[spzindex]    # Specz for Specz
                    
                # A new smaller catalog will be created containing specz info as an extra column.
                print 'Selecting by rows... ' 
                finalcat1 = catalog2[:-3]+'UDF.redu.cat'
                finalcat2 = catalog2[:-3]+'CLASH.redu.cat'
                U.put_data(catalog2[:-3]+'idsfrommatch.txt',(idredu,idspredu))
                A.select_rows_bylist_sorted(catalog1,idspredu,finalcat1)
                A.select_rows_bylist_sorted(catalog2,idredu,finalcat2)               
예제 #14
0
def appending_ids2catalogues(field, pointing, ccd):
    """

import alhambra_3arcs as A3
A3.appending_ids2catalogues(2,1,1)


    """

    catalhambra = root + 'f0%i/alhambra.F0%iP0%iC0%i.ColorProBPZ.cat' % (
        field, field, pointing, ccd)
    idalh = U.get_str(catalhambra, 0)
    idalh2 = U.arange(len(idalh)) + 1
    xalh, yalh = U.get_data(catalhambra, (6, 7))

    cat3arcs = finalroot + 'f0%i/alhambra.f0%ip0%ic0%i.3arcs.cat' % (
        field, field, pointing, ccd)
    id3arcs, x3arcs, y3arcs = U.get_data(cat3arcs, (0, 3, 4))
    print len(id3arcs)

    matchfile = cat3arcs[:-3] + 'idsfrommatch.txt'
    if not os.path.exists(matchfile):
        idcol = idalh2
        xcol = xalh
        ycol = yalh
        idsp = id3arcs
        xsp = x3arcs
        ysp = y3arcs

        pepe = CT.matching_vects(idcol, xcol, ycol, idsp, xsp, ysp, 5)

        # Compressing matches for ColorPro...
        print 'Compressing matches...'
        matchidcol = pepe[:, 0].astype(int)
        gdet_col = U.greater(matchidcol,
                             0)  # Excluding 0's (non matched detections)
        matchidcol = U.compress(gdet_col, (matchidcol))
        # Compressing matches for Spectroscopic...
        matchidsp = pepe[:, 1].astype(int)
        gdet_spz = U.greater(matchidsp,
                             0)  # Excluding 0's (non matched detections)
        matchidsp = U.compress(gdet_spz, (matchidsp))
        print 'len(idcol)', len(idcol)
        print 'len(idsp)', len(idsp)
        if len(matchidcol) == len(matchidsp):
            print 'Creating idredu & zsredu '
            print 'Dimension of matchidsp ', len(matchidsp)
            idredu = U.zeros(len(matchidsp))
            idspredu = U.zeros(len(matchidsp))
            for ii in range(len(matchidsp)):
                colindex = A.id2pos(idcol,
                                    matchidcol[ii])  # Position for Index idcol
                spzindex = A.id2pos(idsp,
                                    matchidsp[ii])  # Position for Index idsp
                idredu[ii] = idcol[colindex]  # ID for ColorPro
                idspredu[ii] = idsp[spzindex]  # Specz for Specz

            matchfile = cat3arcs[:-3] + 'idsfrommatch.txt'
            U.put_data(matchfile, (idredu, idspredu))

    if os.path.exists(matchfile):
        pepa = open(matchfile[:-3] + 'bis.cat', 'w')
        idredu, idspredu = U.get_data(matchfile, (0, 1))
        i11 = idredu.astype(int) - 1
        i22 = idspredu.astype(int)
        lista = []
        for ii in range(len(i11)):
            lista.append(idalh[i11[ii]])
            pepa.write('%s  %s  \n' % (idalh[i11[ii]], i22[ii]))
        pepa.close()

        finalfinal = cat3arcs[:-3] + 'final.cat'
        if os.path.exists(finalfinal): A.deletefile(finalfinal)
        if not os.path.exists(finalfinal):
            print 'Preparing ', finalfinal
            idsa = U.get_str(matchfile[:-3] + 'bis.cat', 0)
            append_IDs2_3arcs_catalogues(cat3arcs, idsa)
예제 #15
0
def flagging_dobledetections(cat1,cat2):
    """
    This serves to append an extra column (each to both inputted catalogs)
    indicating either a detection was repeated and with the lowest S/N
    of the two.
    Sources flagged as 1 are those detections to be excluded when combining
    both catalogs into a single one.
--------
import alhambra_overlap as alhov
cat1 = '/Volumes/amb22/catalogos/reduction_v4e/f02/f02p02_colorproext_1_ISO.cat'
cat2 = '/Volumes/amb22/catalogos/reduction_v4e/f02/f02p01_colorproext_1_ISO.cat'
alhov.flagging_dobledetections(cat1,cat2)    
    
    """
    
    id1,ra1,dec1,x1,y1,s2n1 = U.get_data(cat1,(0,1,2,3,4,14))
    id2,ra2,dec2,x2,y2,s2n2 = U.get_data(cat2,(0,1,2,3,4,14))
    ne1 = len(id1)
    ne2 = len(id2)
    g1 = U.greater_equal(ra1,min(ra2))
    g2 = U.less_equal(ra2,max(ra1))
    id1r,ra1r,dec1r,x1r,y1r,s2n1r = U.multicompress(g1,(id1,ra1,dec1,x1,y1,s2n1))
    id2r,ra2r,dec2r,x2r,y2r,s2n2r = U.multicompress(g2,(id2,ra2,dec2,x2,y2,s2n2))
    flag1 = U.zeros(ne1)
    flag2 = U.zeros(ne2)
    
    dim1 = len(id1r)
    dim2 = len(id2r)
    print 'dim1,dim2',dim1,dim2
    if dim1>0 and dim2>0:
       print 'Matching samples....'
       pepe = matching_vects_ddet(id1r,ra1r,dec1r,id2r,ra2r,dec2r,0.000312)   # We use now X,Y instead RA,Dec
       # Purging null elements
       matchidcol = pepe[:,0].astype(int)
       good_det1 = U.greater(matchidcol,0)  # Excluding 0's (non matched detections)
       matchidcol = U.compress(good_det1,(matchidcol))
       matchidsp = pepe[:,1].astype(int)
       good_det2 = U.greater(matchidsp,0) # Excluding 0's (non matched detections)
       matchidsp = U.compress(good_det2,(matchidsp))
       if len(matchidcol) == len(matchidsp) and len(matchidcol) >0 :
           newdim = len(matchidsp)
           print 'Dimension of matching',newdim
           idr1  = U.zeros(newdim)
           idr2  = U.zeros(newdim)
           s2nr1 = U.zeros(newdim)
           s2nr2 = U.zeros(newdim)
           for ii in range(newdim):
               idr1index = ap.id2pos(id1r,matchidcol[ii]) 
               idr2index = ap.id2pos(id2r,matchidsp[ii]) 
               idr1[ii]  = id1r[idr1index]
               s2nr1[ii] = s2n1r[idr1index]               
               idr2[ii]  = id2r[idr2index] 
               s2nr2[ii] = s2n2r[idr2index]
               
           # Select/Purge detections according to its S/N
           marcador1 = U.zeros(newdim)
           marcador2 = U.zeros(newdim)
           for ss in range(newdim):
               cociente = s2nr1[ss]/s2nr2[ss]  
               if cociente >= 1.: marcador1[ss] = 1.
               else: marcador2[ss] = 1.     
                   
           cond1 = U.less(marcador1,1)
           cond2 = U.less(marcador2,1)
           idr1b = U.compress(cond1,idr1)
           dim1rr = len(idr1b)
           idr2b = U.compress(cond2,idr2)
           dim2rr = len(idr2b)
           
           # Two new IDs (finalid1 & finalid2) are generated with 
           # the final elements to be included in the output catalog.
           for hh1 in range(ne1):
               if id1[hh1] in idr1b:
                  flag1[hh1] = 1
                  
           for hh2 in range(ne2):
               if id2[hh2] in idr2b:
                  flag2[hh2] = 1

           # A new smaller catalog will be created containing specz info as an extra column.
           outcat1 = ap.decapfile(cat1)+'.doubledetect.cat'
           outcat2 = ap.decapfile(cat2)+'.doubledetect.cat'
           print 'outcat1',outcat1
           print 'outcat2',outcat2
           ap.appendcol(cat1,flag1,'Flag2Detected',outcat1)
           ap.appendcol(cat2,flag2,'Flag2Detected',outcat2)

           # Renaming files
           ap.renamefile(cat1,cat1+'.old.cat')
           if not os.path.exists(cat1): ap.renamefile(outcat1,cat1)
           ap.renamefile(cat2,cat2+'.old.cat')
           if not os.path.exists(cat2): ap.renamefile(outcat2,cat2)           
           
    else:
       print 'No common sources in betwen the catalogs'
       # A new smaller catalog will be created containing specz info as an extra column.
       outcat1 = ap.decapfile(cat1)+'.doubledetect.cat'
       outcat2 = ap.decapfile(cat2)+'.doubledetect.cat'
       print 'outcat1',outcat1
       print 'outcat2',outcat2
       ap.appendcol(cat1,flag1*0,'Flag2Detected',outcat1)
       ap.appendcol(cat2,flag2*0,'Flag2Detected',outcat2)
       
       # Renaming files
       ap.renamefile(cat1,cat1+'.old.cat')
       if not os.path.exists(cat1): ap.renamefile(outcat1,cat1)
       ap.renamefile(cat2,cat2+'.old.cat')
       if not os.path.exists(cat2): ap.renamefile(outcat2,cat2)   
예제 #16
0
def matching_vects_ddet(c10,c11,c12,c20,c21,c22,precision):
    """
  -------------------------------------------------------------------------
  The program matchs objects using their coordinates.
  -------------------------------------------------------------------------
  c10: identification number from set1 
  c11 & c12: coordinates to be used when matching the common objects.
  c20: identification number from set2.
  c21 & c22: coordinates to be used when matching the common objects.
  An output matrix (outmatrix) will provide the matched elements.
  outmatrix[0]: c10_matched & outmatrix[1]: c20_matched    
  --------------------------------------------------------------------------
      Alberto Molino amb.at.iaa.es // July_09 //
  --------------------------------------------------------------------------

    """
    # Variable definition.

    id1= c10 
    x1 = c11
    y1 = c12
    dim1 = len(c10)

    id2= c20 
    x2 = c21
    y2 = c22
    dim2 = len(c20)

    delta_xx = U.zeros(dim1+dim2,float)
    delta_yy = U.zeros(dim1+dim2,float)
    INSIDE = U.zeros((dim1+dim2,2),float)
    MATCHING = U.zeros((dim1+dim2,2),float)

    kmin = 0
    nn = 0

    prec = float(precision) 
    thr = 1e-30

    for jj in range(dim1):
     kk = 0

     for ii in range(dim2):
  
      delta_xx[ii] = (x2[ii]-x1[jj])
      delta_yy[ii] = (y2[ii]-y1[jj])
    
      circle = U.sqrt(delta_xx[ii]**2 + delta_yy[ii]**2)

      if circle < prec:
       INSIDE[kk,0]=float(circle)
       INSIDE[kk,1]=id2[ii]
       kk += 1

     kmin = 0

     if kk > 0:
        if kk > 1:
         kmin = U.argmin(INSIDE[0:kk,0])
         MATCHING[nn,0]=id1[jj] 
         MATCHING[nn,1]=INSIDE[kmin,1]   # id2

        else:

         MATCHING[nn,0]=id1[jj] 
         MATCHING[nn,1]=INSIDE[0,1]   # id2

        nn += 1   # Real dimension of matched objects


    print '-------------------------------------'
    print ' Matched elements= ',nn
    if nn > 0 :
     print MATCHING[0:nn,0]
     print MATCHING[0:nn,1] 
     
    return MATCHING 
예제 #17
0
def purging_dobledetections(cat1,cat2):
    """

import alhambra_overlap
from alhambra_overlap import *
cat1 = '/Volumes/amb22/catalogos/reduction_v4e/f02/f02p02_colorproext_1_ISO.cat'
cat2 = '/Volumes/amb22/catalogos/reduction_v4e/f02/f02p01_colorproext_1_ISO.cat'
purging_dobledetections(cat1,cat2)    
    
    """
    
    id1,ra1,dec1,x1,y1,s2n1 = U.get_data(cat1,(0,1,2,3,4,14))
    id2,ra2,dec2,x2,y2,s2n2 = U.get_data(cat2,(0,1,2,3,4,14))
    ne1 = len(id1)
    ne2 = len(id2)
    g1 = U.greater_equal(ra1,min(ra2))
    g2 = U.less_equal(ra2,max(ra1))
    id1r,ra1r,dec1r,x1r,y1r,s2n1r = U.multicompress(g1,(id1,ra1,dec1,x1,y1,s2n1))
    id2r,ra2r,dec2r,x2r,y2r,s2n2r = U.multicompress(g2,(id2,ra2,dec2,x2,y2,s2n2))

    dim1 = len(id1r)
    dim2 = len(id2r)
    print 'dim1,dim2',dim1,dim2
    if dim1>0 and dim2>0:
       print 'Matching samples....'
       pepe = matching_vects_ddet(id1r,ra1r,dec1r,id2r,ra2r,dec2r,0.000312)   # We use now X,Y instead RA,Dec
       # Purging null elements
       matchidcol = pepe[:,0].astype(int)
       good_det1 = U.greater(matchidcol,0)  # Excluding 0's (non matched detections)
       matchidcol = U.compress(good_det1,(matchidcol))
       matchidsp = pepe[:,1].astype(int)
       good_det2 = U.greater(matchidsp,0) # Excluding 0's (non matched detections)
       matchidsp = U.compress(good_det2,(matchidsp))
       if len(matchidcol) == len(matchidsp) and len(matchidcol) >0 :
           newdim = len(matchidsp)
           print 'Dimension of matching',newdim
           idr1  = U.zeros(newdim)
           idr2  = U.zeros(newdim)
           s2nr1 = U.zeros(newdim)
           s2nr2 = U.zeros(newdim)
           for ii in range(newdim):
               idr1index = ap.id2pos(id1r,matchidcol[ii]) 
               idr2index = ap.id2pos(id2r,matchidsp[ii]) 
               idr1[ii]  = id1r[idr1index]
               s2nr1[ii] = s2n1r[idr1index]               
               idr2[ii]  = id2r[idr2index] 
               s2nr2[ii] = s2n2r[idr2index]
               
           # Select/Purge detections according to its S/N
           marcador1 = U.zeros(newdim)
           marcador2 = U.zeros(newdim)
           for ss in range(newdim):
               cociente = s2nr1[ss]/s2nr2[ss]  
               if cociente >= 1.: marcador1[ss] = 1.
               else: marcador2[ss] = 1.     
                   
           cond1 = U.less(marcador1,1)
           cond2 = U.less(marcador2,1)
           idr1b = U.compress(cond1,idr1)
           dim1rr = len(idr1b)
           idr2b = U.compress(cond2,idr2)
           dim2rr = len(idr2b)
           print ''
           print 'Number of detections to be removed from cat1: ', dim1rr
           print 'Number of detections to be removed from cat2: ', dim2rr
           print ''
           
           # Two new IDs (finalid1 & finalid2) are generated with 
           # the final elements to be included in the output catalog.
           finalid1 = U.zeros((ne1-dim1rr))
           finalid2 = U.zeros((ne2-dim2rr))
           kk1 = 0
           for hh1 in range(ne1):
               if id1[hh1] not in idr1b:
                  finalid1[kk1] = id1[hh1]
                  kk1 += 1
                  
           print 'kk1',kk1
           
           kk2 = 0       
           for hh2 in range(ne2):
               if id2[hh2] not in idr2b:
                  if kk2 <= (ne2-dim2rr-1): 
                     finalid2[kk2] = id2[hh2]
                     kk2+=1
                  
           print 'kk2',kk2       
                  
           # A new smaller catalog will be created containing specz info as an extra column.
           outcat1 = ap.decapfile(cat1)+'.wo2detect.cat'
           outcat2 = ap.decapfile(cat2)+'.wo2detect.cat'
           print 'outcat1',outcat1
           print 'outcat2',outcat2
           ap.select_rows_bylist(cat1,finalid1,outcat1)
           ap.select_rows_bylist(cat2,finalid2,outcat2)
           
           
    else:
       print 'No common sources in betwen the catalogs'
    nl = len(globalist)
    for ii in range(nl):
        # imas = U.get_str(globalist[ii],0)
        # ni = len(imas)
        infofile_name = globalist[ii][:-4] + 'info.txt'
        fi, da, ti, ex, ix = U.get_data(infofile_name, (0, 1, 2, 3, 5))
        im, ti2 = U.get_str(infofile_name, (4, 2))
        ni = len(im)

        infofile_name_sorted = globalist[ii][:-4] + 'info.sort.txt'
        print 'Creating ', infofile_name_sorted
        infofile = open(infofile_name_sorted, 'w')
        header = '#  FILTER  DATE  TIME  EXPTIME  NAME  INDEX   PERIOD [min]   PERIOD [hours]   PERIOD [days]     PERIOD [years] \n'
        infofile.write(header)

        newdate = U.zeros(ni, 'int')
        for ii in range(ni):
            uno = int(da[ii])
            dos = ti2[ii]
            ll = '%s%s' % (uno, dos)
            newdate[ii] = float(ll)

        newdatasor = U.sort(newdate)
        # # Sorted by date
        # fir,dar,tir,exr,imr,ixr,ti2r = U.multisort(da,(fi,da,ti,ex,im,ix,ti2))
        # # sorted by time
        # fir2,dar2,ti2r2,exr2,imr2,ixr2 = U.multisort(tir,(fir,dar,ti2r,exr,imr,ixr))
        fir, dar, tir, exr, imr, ixr, ti2r = U.multisort(
            newdate, (fi, da, ti, ex, im, ix, ti2))

        period = U.zeros(ni, 'int')
예제 #19
0
def run_getframelist(field, pointing, ccd):
    """
    It creates the bias-free image list for a given FPC.
    
    """

    ff = field
    po = pointing
    ccd = ccd
    refF814ima = root2f814 + 'f0%s/f0%sp0%s_F814W_%s.swp.fits' % (ff, ff, po,
                                                                  ccd)
    if os.path.exists(refF814ima):
        listaname = root2lists + 'f0%sp0%sc0%s.list' % (ff, po, ccd)
        imas = root2alig + 'f0%s/f0%sp0%s_*_%s.indiv.*.fits' % (ff, ff, po,
                                                                ccd)
        cmd = 'ls %s > %s' % (imas, listaname)
        print cmd
        os.system(cmd)

    if os.path.exists(listaname):
        globalist = U.get_str(listaname, 0)
        ni = len(globalist)
        infofile_name = listaname[:-4] + 'info.txt'
        infofile = open(infofile_name, 'w')
        header = '#  FILTER  DATE  TIME   EXPTIME  NAME  INDEX   \n'
        infofile.write(header)

        for ss in range(ni):
            nick = globalist[ss].split('/')[-1]
            ff = nick[2:3]
            po = nick[5:6]
            ccd = nick[11:12]
            filt = nick[7:10]
            date = nick.split('.')[4]
            expt = nick.split('.')[3][4:][:-1]
            try:
                head = fits.getheader(globalist[ss])
                # head = pyfits.open(globalist[ss])[0].header
                hours = head['DATE-OBS'].split('T')[1].split(':')[0]
                minut = head['DATE-OBS'].split('T')[1].split(':')[1]
                secs = head['DATE-OBS'].split('T')[1].split(':')[2]
                reloj = hours + minut + secs

                print 'image: ', nick
                linea = '%s   %s   %s   %s   %s   %i  \n' % (
                    filt, date, reloj, expt, nick, ss + 1)
                infofile.write(linea)
            except:
                continue
        infofile.close()

    if os.path.exists(infofile_name):
        fi, da, ti, ex, ix = U.get_data(infofile_name, (0, 1, 2, 3, 5))
        im, ti2 = U.get_str(infofile_name, (4, 2))
        ni = len(im)
        infofile_name_sorted = infofile_name[:-4] + '.sort.txt'
        print 'Creating ', infofile_name_sorted
        infofile = open(infofile_name_sorted, 'w')
        header = '#  FILTER  DATE  TIME  EXPTIME  NAME  INDEX   PERIOD [min]  PERIOD [hours]  PERIOD [days]  PERIOD [years] \n'
        infofile.write(header)

        # Sorting frames by date
        newdate = U.ones(ni, dtype='int')
        for ii in range(ni):
            uno = int(da[ii])
            dos = ti2[ii]
            ll = '%s%s' % (uno, dos)
            newdate[ii] = float(ll)
        newdatasor = U.sort(newdate)
        fir, dar, tir, exr, imr, ixr, ti2r = U.multisort(
            newdate, (fi, da, ti, ex, im, ix, ti2))

        # Estimating Time-Lapses ('period') among observations.
        period = U.zeros(ni, dtype='Float64')
        # period  = U.ones(ni,dtype='int')
        for ii in range(ni - 1):
            period[ii + 1] = A.get_observ_freq(int(dar[0]), ti2r[0],
                                               int(dar[ii + 1]), ti2r[ii + 1])

        for ss in range(ni):
            linea = '%i   %i   %s   %i   %s   %i   %i   %i   %i    %i  \n' % (
                fir[ss], dar[ss], ti2r[ss], exr[ss], imr[ss], ixr[ss],
                period[ss] / 60., period[ss] / 3600., period[ss] / 86400.,
                period[ss] / 31536000.)
            infofile.write(linea)
        infofile.close()
        print ' '
예제 #20
0
def singlemosaico(images, coox, cooy, size, nt, nx, ny, imageout):
    """
   ================================================================
   It generates a single mosaic-like image according to the 
   inputed image and the coordinates (X,Y)
   ----------
   imagein: image to be used.
   coox: 1D Integer vector with Axis-X coordinates [pixel]
   cooy: 1D Integer vector with Axis-Y coordinates [pixel]
   size: size for individual internal substamps  
   nt:   number of total substamps (numb. objects)
   nx:   number of horizontal stamps
   ny:   number of vertical stamps
   imageout: final name for the mosaic image
   ================================================================   
   USAGE:

----------
import mosaicing
from mosaicing import *
image = '/Volumes/amb2/SUBARU/CLJ1226/images/clj1226_Z_2003_20130227_sw.fits'
size = 25
nt = 418
nx=21
ny=20
coox,cooy = U.get_data('/Volumes/amb2/SUBARU/CLJ1226/catalogs/stars.cat',(0,1))
imageout = '/Volumes/amb2/SUBARU/CLJ1226/images/clj1226_Z_2003_20130227_sw.mosaic.fits'
singlemosaic(image,coox,cooy,size,nt,nx,ny,imageout)

   =================================================================
   """

    # Definition of variables.
    mad = size  # Size of every sub-squared-stamp. 100 is a good choice!
    mad2 = mad + 1
    albumdata = U.zeros((ny * mad2 + 1, nx * mad2 + 1), float)
    # coox = coox.astype(int)
    # cooy = cooy.astype(int)

    print ' --------------------------------------------------------------------------- '
    print ' A stamp size = %d x %d has been chosen ' % (mad, mad)
    print ' One galaxy will be display in a %d x %d album-like image' % (nx,
                                                                         ny)
    print ' --------------------------------------------------------------------------- '

    for i in range(nt):
        ix = i % nx
        iy = ny - (i / nx) - 1

        # It picks the ith-submatrix from the ith image.
        # So, It creates every single sub-mosaic!
        print 'images[i],coox[i],cooy[i],mad'
        if nt == 1:
            stamp = alhmos.stamping(images, coox, cooy, mad)
        else:
            stamp = alhmos.stamping(images[i], coox[i], cooy[i], mad)
        ax = ix * mad2 + 1
        ay = iy * mad2 + 1

        # Saving the ith-submosaic in albumdata.
        albumdata[ay:ay + mad, ax:ax + mad] = stamp.astype(float)
        print ' Copying submosaic %i from image %i: ' % (i, nt)

    # Creating the new mosaic as a fits file.
    pyfits.writeto(imageout, albumdata, clobber=True)