def get_master_photoBPZcat(catlist): """ From a list of cats and bpz, it creates a master cat+bpz catalogue ---- :param catlist: :param bpzlist: :return: none """ master_list = os.path.dirname(catlist) + 'master.BPZ.list' master_name = os.path.dirname(catlist) + 'master.BPZ.cat' filename = open(master_list, 'w') # header cats = U.get_str(catlist, 0) bpzs = U.get_str(bpzlist, 0) nc = len(cats) nb = len(bpzs) if nc != nb: print 'Dimensions mismatch!' sys.exit() for ii in range(nc): cat = cats[ii] bpz = cats[ii][:-3] + 'bpz' out = cats[ii] + 'BPZ.cat' A.appendColorproBpz(cat, bpz, out) if os.path.exists(out): filename.write(out + ' \n') filename.close() # Compile the master catalogue. A.appendlistcatalog(master_name, master_name)
def get_usefulcolumns(columns): """ It extracts the vars,evars,posref,zpe,zpo information from a columns file. vars & evars: mag & emags positions inside the catalog. ====USAGE===================================== columns = 'Abell383.columns' vars,evars,posref,zpe,zpo = get_usefulcolumns(columns) ---- """ filt = U.get_str(columns,0) nf = 0 for ii in range(len(filt)): if filt[ii][-4:] == '.res': nf += 1 if filt[ii] == 'M_0': posM0 = ii print 'Number of filters detected... ', nf filtref = int(U.get_str(columns,1)[posM0])-1 rawvars = U.get_str(columns,1,nf) vars = U.zeros(nf) evars = U.zeros(nf) for jj in range(nf): vars[jj] = int(rawvars[jj].split(',')[0])-1 # -1 because of columns's notation evars[jj] = int(rawvars[jj].split(',')[1])-1 # -1 because of columns's notation if vars[jj] == filtref: posref = int(vars[jj]) zpe,zpo = U.get_data(columns,(3,4),nf) vars = vars.astype(int) evars = evars.astype(int) return vars,evars,posref,zpe,zpo
def building_alhambraPSFmosaico(listmosaics, listPSFnames, size): """ ================================================================ ================================================================ ------ listmosaics = '/Volumes/amb22/imagenes/f01/PSFmosaics/F01.mosaics.list' listPSFnames = '/Volumes/amb22/imagenes/f01/F01P01C01.PSFs.list' size=25 building_alhambraPSFmosaico(listmosaics,listPSFnames,size) """ # It creates the list of images to be used (from "listing.py"). psflist = U.get_str(listPSFnames, 0) mosaiclist = U.get_str(listmosaics, 0) # STARTING WITH THE PROCESS... # Creating "albumdata" where the whole information will be saved (MOSAIC!). # =================================================================================== n = 27 # Number of images to be used. nx = 6 # Number of objects along the x-axis. ny = 5 # Number of objects along the Y-axis. mad = size # Size of every sub-squared-stamp. 100 is a good choice! mad2 = mad + 1 nf = len(psflist) nm = len(mosaiclist) for ss in range(nf): print 'Creating PSF-model for %s...' % (psflist[ss]) print ' ' temporal = N.zeros((nm, size, size), float) model = N.zeros((size, size), float) ix = ss % nx iy = ny - (ss / nx) - 1 ax = ix * mad2 + 1 ay = iy * mad2 + 1 # print 'ax,ay,mad' # print ax,ay,mad for ii in range(nm): mosaico = mosaiclist[ii] albumdata = fits.open(mosaico)[0].data # print albumdata temporal[ii] = albumdata[ay:ay + mad, ax:ax + mad] for jj in range(mad): for gg in range(mad): if temporal[ii, jj, gg] < 0.: # print temporal[ii,jj,gg] temporal[ii, jj, gg] = 0. # print temporal[ii,jj,gg] model += temporal[ii] / temporal[ii].sum() # Creating the new mosaic as a fits file. fits.writeto(psflist[ss], model)
def run(): cs = U.get_str(lista_fluxcomp, 0) nc = len(cs) cat = U.get_str(lista_catalogs, 0) ncat = len(cat) print 'Number of catalogues to convert: ', nc cols = U.get_str(lista_columns, 0) ncols = len(cols) print 'nc,ncat,ncols: ', nc, ncat, ncols for ii in range(nc): head = coeio.loadheader(cs[ii]) nh = len(head) print 'Number of variables: ', nh # pausa = raw_input('paused') nf = nh - 5 body = coeio.loaddata(cs[ii]) ng = len(body) # pausa = raw_input('paused') field = ((cat[ii].split('/')[-1]).split('.')[1])[2] pointing = ((cat[ii].split('/')[-1]).split('.')[1])[5] ccd = ((cat[ii].split('/')[-1]).split('.')[1])[8] print 'Reading Field: %s, Pointing: %s, CCD: %s' % (field, pointing, ccd) # pausa = raw_input('paused') filename = final_root + cat[ii].split('/')[-1][:-3] + 'flux_comparison' print 'filename', filename filename2 = final_root + cat[ii].split('/')[-1][:-3] + 'columns' A.copyfile(cols[ii], filename2) # pausa = raw_input('paused') outfile = open(filename, 'w') for ii in range(len(head)): outfile.write('%s \n' % (head[ii])) for ss in range(ng): ids = body[ss, 0] # print 'IDs',ids ids814 = convert814ids(ids, int(field), int(pointing), int(ccd)) for jj in range(nh): if jj == 0: outfile.write('%s ' % (ids814)) if jj == 1: outfile.write('%.2f ' % (body[ss, 1])) if jj == 2: outfile.write('%.2f ' % (body[ss, 2])) if jj == 3: outfile.write('%.2f ' % (body[ss, 3])) if jj == 4: outfile.write('%.2f ' % (body[ss, 4])) if jj > 4: # print body[ss,jj] # pausa = raw_input('paused') outfile.write('%s ' % (body[ss, jj])) outfile.write('\n') outfile.close()
def get_SExt_assoc_files(pepe): """ It creates the associated catalogues with the detections to be included in the analysis. """ for ii in range(7): for jj in range(4): for kk in range(4): cat = root + '/f0%i/alhambra.F0%iP0%iC0%i.ColorProBPZ.cat' % ( ii + 2, ii + 2, jj + 1, kk + 1) if os.path.exists(cat): ids = U.get_str(cat, 0) x, y, ar, ra, dec, mm = U.get_data(cat, (6, 7, 8, 4, 5, 65)) nameout = root + '/f0%i/alhambra.F0%iP0%iC0%i.ColorProBPZ.coo' % ( ii + 2, ii + 2, jj + 1, kk + 1) good = U.less_equal(abs(mm), 23.0) ids = U.compress(good, (ids)) x, y, ar, ra, dec, mm = U.multicompress( good, (x, y, ar, ra, dec, mm)) ne = len(x) fileout = open(nameout, 'w') fileout.write('# X Y AREA ID RA DEC F814W \n') print 'Analyzing ', cat for ss in range(ne): linea = '%.3f %.3f %i %s %f %f %.2f \n' % ( x[ss], y[ss], ar[ss], ids[ss], ra[ss], dec[ss], mm[ss]) fileout.write(linea) fileout.close()
def combine_alhambraPSFmodels(listPSFs, finalPSFmodel, norm=1): """ ================================================================ It combines a list of PSFs to derive a normalized final mode ================================================================ ------ listPSFs = '/Users/albertomolino/Desktop/rxj2248/HST/stars.list' finalPSFmodel = '/Users/albertomolino/Desktop/rxj2248/HST/rxj2248_mosaic_065mas_wfc3ir_total_drz_20121105.psfmodel.fits' combine_alhambraPSFmodels(listPSFs,finalPSFmodel,1) """ # combinelistaimages(listPSFs,1,finalPSFmodel,1) psfs = U.get_str(listPSFs, 0) np = len(psfs) data = fits.open(psfs[0])[0].data data2 = data * 1. nc = N.shape(data)[1] nf = N.shape(data)[0] print 'nc,nf:', nc, nf for ii in range(np - 1): datos = fits.open(psfs[ii + 1])[0].data print 'Reading file...', psfs[ii + 1] nc = N.shape(datos)[1] nf = N.shape(datos)[0] print 'nc,nf:', nc, nf data2 += datos if norm == 1: data2 = data2 / data2.sum() fits.writeto(finalPSFmodel, data2)
def get_list_detect_wimas(cluster): """ Here i need to include more info to separate weight than drz images. """ root2images =finalroot+'images/%s/'%(cluster) # R-band cmd1 = '/bin/ls %s*rSDSS*weight*fits '%(root2images) cmd1 += '> %sdetwima.riz.temp'%(root2images) os.system(cmd1) # I-band cmd2 = '/bin/ls %s*iSDSS*weight*fits '%(root2images) cmd2 += '>> %sdetwima.riz.temp'%(root2images) os.system(cmd2) # z-band cmd3 = '/bin/ls %s*zSDSS*weight*fits '%(root2images) cmd3 += '>> %sdetwima.riz.temp'%(root2images) os.system(cmd3) # Reading final list finalist = '%sdetwima.riz.temp'%(root2images) if not os.path.exists(finalist): print 'List of detection images not created!' sys.exit() else: wimas = U.get_str(finalist,0) return wimas
def arrange_alhambraHDF5list_byfield(lista, save='yes'): """ It creates a global P(z)'s --------------------------------- import alhambrahdf5 from alhambrahdf5 import * mat = arrange_alhambraHDF5list_byfield(lista) """ ims = U.get_str(lista, 0) basez = U.arange(0.001, 7.001, 0.001) dim = len(ims) for ii in range(dim): print '%i/%i' % (ii + 1, dim) infile = ims[ii] data = U.get_data(infile, 0) if ii < 1: datos = data else: datos += data if save == 'yes': finaldata = decapfile(lista) + '.global.mat' U.put_data(finaldata, (datos, basez)) return datos
def find_alhambraids(ra1, dec1): """ It looks for the ALHAMBRA-IDs given a set of coordinates (RA,Dec). ----------------------------------------- import alhambra_variability_tools as AVT ra,dec = U.get_data(cat,(1,2)) ids = AVT.find_alhambraids(ra,dec) ----------------------------------------- """ ra, dec = U.get_data( '/Volumes/amb4/ALHAMBRA/catalogos/reduction_v4f/global/alhambra.coo.cat', (1, 2)) ids = U.get_str( '/Volumes/amb4/ALHAMBRA/catalogos/reduction_v4f/global/alhambra.coo.cat', 0) nele = len(ra1) idd = U.ones(nele, dtype='int') for iii in range(nele): dra = abs(ra - ra1[iii]) ddec = abs(dec - dec1[iii]) val = dra + ddec pos = U.where(val == val.min())[0][0] idd[iii] = int(ids[pos]) return idd
def master_PDZerrDistribution(hdf5list, bpzlist, m_max, o_min): plots = 1 hdf5_files = U.get_str(hdf5list, 0) n_hdf5 = len(hdf5_files) bpz_files = U.get_str(bpzlist, 0) n_bpz = len(bpz_files) if n_bpz != n_hdf5: print 'Dimensions mismatch!' sys.exit() for ii in range(n_hdf5): print 'Reading file: ', hdf5_files[ii] if ii < 1: basez, basez2, dz_peak, dz_pdf, ng = get_PDZerrDistribution( hdf5_files[ii], bpz_files[ii], m_max, o_min) else: basez, basez2, dz_peak_temp, dz_pdf_temp, ng_temp = get_PDZerrDistribution( hdf5_files[ii], bpz_files[ii], m_max, o_min) dz_peak += dz_peak_temp dz_pdf += dz_pdf_temp ng += ng_temp if plots: plt.figure(12, figsize=(8.5, 10.), dpi=80, facecolor='w', edgecolor='k') plt.clf() plt.plot(basez2, dz_peak, 'b-', lw=12, alpha=0.6) plt.plot(basez, dz_pdf, 'r-', lw=5, alpha=0.9) plt.grid() plt.xlim(-0.1, 0.1) plt.ylabel('P(z)', size=20, labelpad=+1) plt.legend(['peaks', 'pdfs'], loc='upper left', fontsize=20) plt.xlabel('$\delta_{z}$', size=30) # Saving data into a file. #output_filename = hdf5list[:-4]+'master.deltaz.mmax%.2fAB.mat'%(m_max) #U.put_data(output_filename,(basez,dz_peak,dz_pdf),'z dz_peak dz_PDFs') return basez, basez2, dz_peak, dz_pdf, ng
def get_JPLUS_gains(scimas): """ it gets the gain values for a list of JPLUS images. """ imas = U.get_str(scimas,0) nimas = len(imas) vals = N.zeros(nimas) for ss in range(nimas): vals[ss]=JPLUS_image_gain(imas[ss]) return vals
def alhambramosaico(listofimages, xx, yy, size, ID_number): """ import clash_tools from clash_tools import * lista = '/Volumes/CLASH/psfmodels/May2012/final/listilla.txt' size=25 coox = 13 cooy = 13 ids=777 clashmosaico(lista,coox,cooy,size,ids) """ # It creates the list of images to be used (from "listing.py"). image_list = U.get_str(listofimages, 0) path = A.getpath(image_list[0]) # Paths & outputs imageout = A.decapfile( listofimages) + '.mosaic.X%iY%iID%i.fits' % (xx, yy, ID_number) print 'Imgaeout: ', imageout # STARTING WITH THE PROCESS... # Creating "albumdata" where the whole information will be saved (MOSAIC!). # =================================================================================== n = 27 # Number of images to be used. nx = 6 # Number of objects along the x-axis. ny = 5 # Number of objects along the Y-axis. mad = size # Size of every sub-squared-stamp. 100 is a good choice! mad2 = mad + 1 albumdata = N.zeros((ny * mad2 + 1, nx * mad2 + 1), float) print ' --------------------------------------------------------------------------- ' print ' A stamp size = %d x %d has been chosen ' % (mad, mad) print ' One galaxy will be display in a %d x %d album-like image' % (nx, ny) print ' --------------------------------------------------------------------------- ' for i in range(len(image_list)): # print 'i', i ix = i % nx iy = ny - (i / nx) - 1 image_in = image_list[i] # It picks the ith-submatrix from the ith image. # So, It creates every single sub-mosaic! stamp = sta.stamping(image_in, xx, yy, mad) ax = ix * mad2 + 1 ay = iy * mad2 + 1 # Saving the ith-submosaic in albumdata. albumdata[ay:ay + mad, ax:ax + mad] = stamp.astype(float) print ' Copying submosaic %i from image %i: ' % (i, i) # Creating the new mosaic as a fits file. fits.writeto(imageout, albumdata)
def get_NMAD_vs_seeing(bpzfile, mmax): bpzs = U.get_str(bpzfile, 0) nb = len(bpzs) valor = N.zeros(nb) for ii in range(nb): zb, zs, mo = U.get_data(bpzs[ii], (1, 9, 10)) dz = (zb - zs) / (1. + zs) good = N.less(mo, mmax) valor[ii] = U.std_mad(dz[good]) return valor
def script_slicing_hdf5alhambra(hdf5list, bpzlist): """ import alhambrahdf5 as AH hdf5list = '/Volumes/amb22/catalogos/reduction_v4f/hdf5.list' bpzlist = '/Volumes/amb22/catalogos/reduction_v4f/catalogs.list' AH.script_slicing_hdf5alhambra(hdf5list,bpzlist) """ hdf5s = U.get_str(hdf5list, 0) bpzs = U.get_str(bpzlist, 0) dim1 = len(hdf5s) dim2 = len(bpzs) if dim1 == dim2: for ss in range(dim1): AH.slicing_hdf5alhambra(hdf5s[ss], bpzs[ss]) # try: AH.slicing_hdf5alhambra(hdf5s[ss],bpzs[ss]) # except: print 'Impossible to run get2Dmatrix_HDF5 on iteration ',ss else: print 'Dimensions mismatch!'
def get_filters(columns): """ It extracts the FILTERS from a columns file. =========================================================== USAGE: columns = 'Abell383.columns' filtros = get_filters(columns) ---- """ data = U.get_str(columns,0) filters=[] for ii in range(len(data)): if data[ii][-4:] == '.res': filters.append(data[ii]) return filters
def master_global_PDF(hdf5list, m_max): """ This routine serves to extract the final P(z) from a list of HDF5 files. A magnitude-cut (m<19) is applied. :param hdf5list: list of HDF5 files :return: zz, p_r,p_b_p_a """ plots = 1 hdf5files = U.get_str(hdf5list, 0) n_fields = len(hdf5files) for ii in range(n_fields): if ii < 1: zz, p_red, p_blue, p_global = global_PDZ(hdf5files[ii], m_max) else: zz, p_red_temp, p_blue_temp, p_global_temp = global_PDZ( hdf5files[ii], m_max) p_red += p_red_temp p_blue += p_blue_temp p_global += p_global_temp if plots: plt.figure(12, figsize=(8.5, 10.), dpi=80, facecolor='w', edgecolor='k') plt.clf() plt.plot(zz, p_red, 'r-', lw=5, alpha=0.7) plt.plot(zz, p_blue, 'b-', lw=5, alpha=0.7) plt.plot(zz, p_global, 'k--', lw=5, alpha=0.7) plt.grid() plt.xlim(0., zz.max()) plt.grid() plt.ylabel('P(z)', size=20, labelpad=+1) plt.legend(['early', 'late', 'all'], loc='upper right', fontsize=20) plt.xlabel('$z$', size=30) output_filename = hdf5list[:-4] + 'master.PDF.mmax%.2fAB.mat' % (m_max) U.put_data(output_filename, (zz, p_red, p_blue, p_global), 'z P_r P_b P_a') return zz, p_red, p_blue, p_global
def appendlistcatalog(lista, outfile='None'): """ It appends a list of catalogs via appendcatalogs """ # Declaring some variables. list = U.get_str(lista, 0) temp = len(lista.split('/')[-1]) root = lista[:-temp] print 'Number of catalogs to be appended: %i' % (len(list)) print 'Starting with the appendage...' for jj in range(len(list) - 1): print 'Appending catalog %i/%i...' % (jj + 1, len(list) - 1) ii = jj + 1 if jj == 0: catalog1 = list[jj] catalog2 = list[ii] finalcatalog = root + 'temporal.cat' # trunkcat+'_%i%i.cat' %(ff,ii) raimundo = finalcatalog else: catalog1 = raimundo catalog2 = list[ii] # trunkcat+'_%i%i.cat' %(ff,ii) finalcatalog = root + 'temporal2.cat' appendcatalogs(catalog1, catalog2, finalcatalog) if os.path.exists(root + 'temporal2.cat'): cmd = '' cmd += '/bin/rm %s' % (raimundo) os.system(cmd) cmd = '/bin/mv %s %s' % (root + 'temporal2.cat', root + 'temporal.cat') os.system(cmd) raimundo = root + 'temporal.cat' # Saving the final catalog. if outfile == 'None': final = lista[:-((len(lista.split('.')[-1])) + 1)] + '_appended.cat' else: final = outfile cmd = '/bin/mv %s %s' % (root + 'temporal.cat', final) os.system(cmd) print 'A new catalog created as ', final
def gettingJPLUS_123limmags(clustername): """ This creates a new file with an estimation of the 5-sigma mag-limits for 1",2",3" apertures. """ sigmas = 5 root2images = finalroot + 'images/%s/'%(clustername) listimages = root2images+'sci.list' imas = U.get_str(listimages,0) nims = len(imas) finalcat = root2images+'%s.photo.mlim123.cat'%(clustername) if not os.path.exists(finalcat): outfile.open(finalcat,'w') outfile.write('# 5-sigma threshold \n') outfile.write('# mlim_1arcs mlim_2arcs mlim_3arcs \n') for ii in range(nims): catalog = root2cats + get_nickname(imas[ii])+'.cat' f1,f2,f3,ef1,ef2,ef3,m1 = U.get_data(catalog,(23,24,25,26,27,28,21)) s2n_1 = N.zeros(len(f1)) s2n_2 = N.zeros(len(f2)) s2n_3 = N.zeros(len(f3)) for ggg in range(len(f1)): if f1[ggg]>0. and ef1[ggg]>0.: s2n_1[ggg]=f1[ggg]/(ef1[ggg]*1.) else: s2n_1[ggg]= -1.00 if f2[ggg]>0. and ef2[ggg]>0.: s2n_2[ggg]=f2[ggg]/(ef2[ggg]*1.) else: s2n_2[ggg]= -1.00 if f3[ggg]>0. and ef3[ggg]>0.: s2n_3[ggg]=f3[ggg]/(ef3[ggg]*1.) else: s2n_3[ggg]= -1.00 # Here we clean the sample and select the 5-sigma detections. good_s2n_1 = N.greater(s2n_1,sigmas-0.2) * N.less(s2n_1,sigmas+0.2) good_s2n_2 = N.greater(s2n_2,sigmas-0.2) * N.less(s2n_2,sigmas+0.2) good_s2n_3 = N.greater(s2n_3,sigmas-0.2) * N.less(s2n_3,sigmas+0.2) mlim1 = N.mean(m1[good_s2n_1]) mlim2 = N.mean(m1[good_s2n_2]) mlim3 = N.mean(m1[good_s2n_3]) linea = '# %.2f %.2f %.2f \n'%(filtros[ii],mlim1,mlim2,mlim3) outfile.write(linea)
#! /usr/local/bin python # -*- coding: iso-8859-1 -*- import os,sys import numpy as N sys.path.append('/Users/albertomolino/doctorado/photo/programas/') import useful as U import splus_calib_tools as sct #import matplotlib.pyplot as plt #### script to read and normalize the catalogue to the mean PSF. root = '/Users/albertomolino/Postdoc/T80S_Pipeline/Commisioning/S82/Dec2017/' final_path = root + 'data_quality/' lista_cats = root+'splus_cats_NGSL/photometry.list' cats = U.get_str(lista_cats,0) n_cats = len(cats) seeing = N.zeros(n_cats) x_global = [] y_global = [] s2n_global = [] Flag_global = [] fw_global = [] mu_global = [] mg_global = [] mr_global = [] mi_global = [] mz_global = [] for ii in range(n_cats): catalog = cats[ii]
def figura33(lista): """ I'm using the stellar classification from the version_e. ---- import alhambra_completeness as alhc lista = '/Volumes/amb22/catalogos/reduction_v4d/globalcats/lista.list' alhc.figura33(lista) """ blue = 0 red = 1 cats = U.get_str(lista, 0) cats2 = U.get_str(lista, 1) nc = len(cats) dx = 0.2 dy = 0.4 nxbins = 4 nybins = 2 ods = 0.05 mmin = 16.0 mmax = 23.75 zbmin = 0.0001 zbmax = 1.4 Mmin = -24 Mmax = -17 if red: Tbmin = 1 # 7. Tbmax = 5 # 11. resolmag = 0.2 # 0.2 resolz = 0.05 if blue: Tbmin = 7. Tbmax = 11. resolmag = 0.2 resolz = 0.05 resol = 0.025 areas = ([0.45, 0.47, 0.23, 0.24, 0.47, 0.47, 0.46, 2.79]) plt.figure(111, figsize=(21.5, 11.5), dpi=70, facecolor='w', edgecolor='k') ss = 0 for jj in range(nybins): for ii in range(nxbins): # Reading data from catalogs. mo, zb, tb, odds, m814 = U.get_data(cats[ss], (81, 72, 75, 76, 62)) sf = U.get_data(cats2[ss], 71) # mo,zb,tb,sf,odds,m814 = U.get_data(cats[ss],(81,72,75,71,76,62)) g = U.greater_equal(abs(m814), mmin) * U.less_equal( abs(m814), mmax) # g* = U.greater_equal(odds,ods) g *= U.greater_equal(tb, Tbmin) * U.less_equal(tb, Tbmax) g *= U.less_equal(sf, 0.8) yy = -0.014 * m814 + 0.38 g *= U.greater(odds, yy) g *= U.less_equal(mo, Mmax + resol) * U.greater(mo, Mmin - resol) g *= U.greater(zb, zbmin) * U.less_equal(zb, zbmax) mo, zb, tb, odds = U.multicompress(g, (mo, zb, tb, odds)) print 'dimension', len(mo) # Plotting density. # cuadrado = plt.axes([.1+(ii*dx),.1+((nybins-jj-1)*dy),dx,dy]) if ii == nxbins - 1: cuadrado = plt.axes([ .1 + (ii * dx), .1 + ((nybins - jj - 1) * dy), dx + (dx * 0.2), dy ]) else: cuadrado = plt.axes( [.1 + (ii * dx), .1 + ((nybins - jj - 1) * dy), dx, dy]) matrix, axis2, axis1 = rs.CC_numberdensity_contour_zvolume( zb, mo, resolz, resolmag, 1) if blue: plt.contourf(axis2, axis1, U.log10(matrix / areas[ss]), 250, vmin=-11., vmax=-7.) # blue galaxies if red: plt.contourf(axis2, axis1, U.log10(matrix / areas[ss]), 250, vmin=-12., vmax=-7.65) # red galaxies if ii == nxbins - 1: aa = plt.colorbar(pad=0., format='%.1f') aa.set_label('Log. Density [N/Mpc$^{3}$/deg$^{2}$]', size=18) if jj != nybins - 1: plt.setp(cuadrado, xticks=[]) if ii != 0: plt.setp(cuadrado, yticks=[]) if jj == nybins - 1: plt.xlabel('M$_{B}$', size=27) plt.xticks(fontsize=17) if ii == 0: plt.ylabel('redshift', size=28) plt.yticks(fontsize=17) # plotting axis manually base1 = U.arange(Mmin, Mmax + 1., 1.) base2 = U.arange(0, zbmax + (2. * resol), resol) dim1 = len(base1) dim2 = len(base2) for rr in range(dim1): plt.plot(base2 * 0. + base1[rr], base2, 'k--', linewidth=1., alpha=0.25) for rr in range(dim2): plt.plot(base1, base1 * 0. + base2[rr], 'k--', linewidth=1., alpha=0.25) # plt.grid() plt.ylim(zbmin + 0.0001, zbmax - 0.001) plt.xlim(Mmin + 0.0001, Mmax - 0.0001) if ss == 7: labelleg = 'Global' else: labelleg = 'A%i' % (ss + 2) xypos = (Mmax - 1.6, zbmax - 0.18) if ss == 7: xypos = (Mmax - 3.5, zbmax - 0.18) plt.annotate(labelleg, xy=xypos, fontsize=40, color='black') ss += 1 plt.savefig('completeness.alhambra.png', dpi=200)
import os, sys sys.path.append('/Users/albertomolino/doctorado/photo/programas/') sys.path.append( '/Users/albertomolino/Postdoc/T80S_Pipeline/Commisioning/codes/') import useful as U import numpy as N import splus_s82_hdf5_tools as to import matplotlib.pyplot as plt # Roots and paths sigma = '0150' root = '/Volumes/CLASH/S82/specz/' # Reading photometric catalogues. bpz_cat_list = root + 'bpz_short.list' bpz_cats = U.get_str(bpz_cat_list, 0) n_cats = len(bpz_cats) check_mags = 1 check_types = 1 check_odds = 1 # Definition of variables. CIS = [] MOS = [] ZSS = [] ODS = [] TBB = [] for ii in range(n_cats): hdf5_file = os.path.dirname(bpz_cats[ii]) + '/HDF5/' hdf5_file += os.path.basename(bpz_cats[ii])[:-8] + 'hdf5'
def appending_ids2catalogues(field, pointing, ccd): """ import alhambra_3arcs as A3 A3.appending_ids2catalogues(2,1,1) """ catalhambra = root + 'f0%i/alhambra.F0%iP0%iC0%i.ColorProBPZ.cat' % ( field, field, pointing, ccd) idalh = U.get_str(catalhambra, 0) idalh2 = U.arange(len(idalh)) + 1 xalh, yalh = U.get_data(catalhambra, (6, 7)) cat3arcs = finalroot + 'f0%i/alhambra.f0%ip0%ic0%i.3arcs.cat' % ( field, field, pointing, ccd) id3arcs, x3arcs, y3arcs = U.get_data(cat3arcs, (0, 3, 4)) print len(id3arcs) matchfile = cat3arcs[:-3] + 'idsfrommatch.txt' if not os.path.exists(matchfile): idcol = idalh2 xcol = xalh ycol = yalh idsp = id3arcs xsp = x3arcs ysp = y3arcs pepe = CT.matching_vects(idcol, xcol, ycol, idsp, xsp, ysp, 5) # Compressing matches for ColorPro... print 'Compressing matches...' matchidcol = pepe[:, 0].astype(int) gdet_col = U.greater(matchidcol, 0) # Excluding 0's (non matched detections) matchidcol = U.compress(gdet_col, (matchidcol)) # Compressing matches for Spectroscopic... matchidsp = pepe[:, 1].astype(int) gdet_spz = U.greater(matchidsp, 0) # Excluding 0's (non matched detections) matchidsp = U.compress(gdet_spz, (matchidsp)) print 'len(idcol)', len(idcol) print 'len(idsp)', len(idsp) if len(matchidcol) == len(matchidsp): print 'Creating idredu & zsredu ' print 'Dimension of matchidsp ', len(matchidsp) idredu = U.zeros(len(matchidsp)) idspredu = U.zeros(len(matchidsp)) for ii in range(len(matchidsp)): colindex = A.id2pos(idcol, matchidcol[ii]) # Position for Index idcol spzindex = A.id2pos(idsp, matchidsp[ii]) # Position for Index idsp idredu[ii] = idcol[colindex] # ID for ColorPro idspredu[ii] = idsp[spzindex] # Specz for Specz matchfile = cat3arcs[:-3] + 'idsfrommatch.txt' U.put_data(matchfile, (idredu, idspredu)) if os.path.exists(matchfile): pepa = open(matchfile[:-3] + 'bis.cat', 'w') idredu, idspredu = U.get_data(matchfile, (0, 1)) i11 = idredu.astype(int) - 1 i22 = idspredu.astype(int) lista = [] for ii in range(len(i11)): lista.append(idalh[i11[ii]]) pepa.write('%s %s \n' % (idalh[i11[ii]], i22[ii])) pepa.close() finalfinal = cat3arcs[:-3] + 'final.cat' if os.path.exists(finalfinal): A.deletefile(finalfinal) if not os.path.exists(finalfinal): print 'Preparing ', finalfinal idsa = U.get_str(matchfile[:-3] + 'bis.cat', 0) append_IDs2_3arcs_catalogues(cat3arcs, idsa)
def correct_SExt_uncertainties(cluster): # (catalog,columns,zpts,gains,area2rms,weightimas,arinarout,finalcat): """ It reads the input catalogue and corrects* its photometric errors empirically using direct area_v_sigma estimations*. It returns the name of the new and corrected catalog. -- *** THE POSITION OF AREA,MAGS & ERRMAGS NEED TO BE CHECK OUT BEFORE RUNNING IT. ---------------------------------------------------------------------------- REQUIREMENTS: - An input catalogue (catalog) - Its corresponding COLUMNS file (columns) - A list with all ZPS (2nd column) (zpts) - A list with all GAINS (2nd column) (gains) - A list with all area2sigma files (1 per band) (area2rms) - A list with all WEIGHT-MAPS (weightimas) """ # If weight = 1, it uses the Weight-maps to calculate photo.uncertainties. weight = 0 # If verbose = 1, additional information is displayed during the analysis. verbose = 0 # This factor serves to plot several check figures. check1 = 1 check2 = 1 catalog = finalroot +'/%s/images/%s.photo.cat'%(cluster,cluster) columns = finalroot +'/%s/images/%s.photo.columns'%(cluster,cluster) scimas = finalroot+'/%s/images/sci.list'%(cluster) weightimas = finalroot+'/%s/images/wht.list'%(cluster) zpts = U.get_data(finalroot+'/%s/images/%s.zpt.cat'%(cluster,cluster),0) gains = get_JPLUS_gains(scimas) # Name for the final corrected catalogue newphotcat = finalroot +'/%s/images/%s.photo.err.cat'%(cluster,cluster) # To account for differences in exposure time, we need a list with ALL WEIGHT-maps. if weight: wimas = U.get_str(weightimas,0) if verbose: print 'Catalog: ',catalog if os.path.exists(catalog): mm = get_magnitudes(catalog,columns) em = get_errmagnitudes(catalog,columns) xx,yy,aper = U.get_data(catalog,(3,4,5)) data = C.loaddata(catalog) # Loading the whole catalog content. head = C.loadheader(catalog) # Loading the original header. zps = U.get_data(zpts,1) # Loading Zeropoint values # gain = U.get_data(gains,1) # Loading Gain Values. filters = get_filters(columns) # It gets the filter names for plots. # Defining new variables ng = len(mm[:,0]) # ng is the number of galaxies. nf = len(mm[0,:]) # nf is the number of filters. if verbose: print 'ng,nl',ng,nl errmag = N.zeros((ng,nf),float) # Where the new photo errors will be saved. # Starting the game.. for jj in range(nf): if verbose: print 'Analyzing filter %i'%(jj+1) # For every single band we need to read the apert_v_sigma file (area2rms) # to interpolate the real values of area from the catalog. # print 'area2rms[%i]'%(jj),area2rms area2rms_bands = U.get_str(area2rms,0) sqrtarea, sbackg, smean = U.get_data(area2rms_bands[jj],(0,1,2)) rmsfit = N.poly1d(N.polyfit(sqrtarea, sbackg, 3)) # CHECK meanfit = N.poly1d(N.polyfit(sqrtarea, smean, 2)) # CHECK # It reads the WEIGHT-map if requested if weight: if verbose: print 'Reading Weight image: ', wimas[jj] wdata = fits.open(wimas[jj])[0].data wdatanorm = wdata / wdata.max() if check1: # Sanity plot to assure the interpolated area2sigma function was right plt.figure(0, figsize = (7,6),dpi=70, facecolor='w', edgecolor='k') plt.clf() plt.plot(sqrtarea,sbackg,'ko',sqrtarea,rmsfit(sqrtarea),'r-',linewidth=2) plt.xlabel('$\sqrt{N}$',size=18) plt.ylabel('$\sigma$',size=20) plt.legend(['Data','Interpolation'],numpoints=1,loc='upper left') plt.grid() plt.savefig(catalog[:-3]+'.%s.Ar2Si.check.png'%(filters[jj]),dpi=80) # Sanity plot to assure the interpolated area2sigma function was right # plt.figure(2, figsize = (7,6),dpi=70, facecolor='w', edgecolor='k') plt.clf() plt.plot(sqrtarea, smean,'ko',sqrtarea, meanfit(sqrtarea),'r-',linewidth=2) plt.xlabel('$\sqrt{N}$',size=18) plt.ylabel('$mean$',size=20) plt.legend(['Data','Interpolation'],numpoints=1,loc='upper left') plt.grid() plt.savefig(catalog[:-3]+'.%s.Ar2Mean.check.png'%(filters[jj]),dpi=80) fluxgal = B.mag2flux(mm[:,jj]-zps[jj]) # -meansignal+U.sqrt(fluxcorr4aperture) # good_sample = U.less_equal(abs(mm[:,jj]),30) # good sample # bad_sample = U.greater(abs(mm[:,jj]),30) # bad sample # Values to estimates the mags error. sqar = N.sqrt(aper) sigback = rmsfit(sqar) meansignal = meanfit(sqar) # There will be non-detected galaxies # with m=99 magnitudes. # Those numbers should not change here. detected = U.less_equal(abs(mm[:,jj]),30) # good sample nondetected = U.greater(abs(mm[:,jj]),30) # bad sample # Photom. error (as defined by SExtractor) but using the new sigma value! if weight: pixw = N.zeros(ng) for hhh in range(ng): pixw[hhh]=wdatanorm[int(yy[hhh])-1,int(xx[hhh])-1] fluxcor = fluxgal*pixw newerror=N.sqrt((aper*sigback*sigback/fluxcor**2)+(fluxcor*gain)) newerror *= 1.0857 else: newerror=N.sqrt((aper*sigback*sigback/fluxgal**2)+(fluxgal*gain)) newerror *= 1.0857 # Assesing new uncertainties. errmag[detected,jj] = newerror[detected] errmag[nondetected,jj] = em[nondetected,jj] # A new figure is create to compare SExtractor vs Empirical uncert. if check2: line = N.arange(16.,30.,0.25) SExline = U.bin_stats(mm[:,jj],em[:,jj],line,stat='mean_robust') aperline = U.bin_stats(mm[:,jj],errmag[:,jj],line,stat='mean_robust') # plt.figure(1,figsize = (8,7),dpi=70, facecolor='w', edgecolor='k') plt.clf() plt.plot(mm[:,jj],em[:,jj],'r+',mm[:,jj],errmag[:,jj],'k+') plt.plot(line,SExline,'-ro',line,aperline,'-ko',linewidth=6,alpha=0.2) plt.legend(['$SExtractor$','$Apertures$'],numpoints=1,loc='upper left') plt.xlabel('$Mags$',size=17) plt.ylabel('$ErrMags$',size=17) plt.xlim(17.,30.) plt.ylim(0.,1.0) plt.grid() plt.savefig(catalog[:-3]+'.%s.uncert.comparison.png'%(filters[jj]),dpi=80) # The new values of mags error are now overwrited in the original data. vars,evars,posref,zpe,zpo = get_usefulcolumns(columns) data[:,evars] = errmag[:,N.arange(nf)] C.savedata(data,finalcat, dir="",header=head) # Saving and creating the new catalog.
__author__ = 'albertomolino' import os, sys sys.path.append('/Users/albertomolino/doctorado/photo/programas/') import useful as U import numpy as N import matplotlib.pyplot as plt from matplotlib import cm # Paths root_to_bpz = '/Users/albertomolino/codigos/bpz-1.99.2/' root_to_seds = root_to_bpz + 'SED/' sed = U.get_str(root_to_seds + 'COSMOSeB11new_recal.list', 0) n_seds = len(sed) - 2 def lookcloser(vector, value): dim = len(vector) try: if vector[0] >= value: pos = 0 elif vector[1] >= value: pos = 1 elif vector[-1:] < value: pos = dim - 1 else: for ii in range(len(vector)): if vector[ii - 2] < value < vector[ii]: pos = ii - 1 except: print 'ID not found!' pos = -1 return pos
def get_alhambra_GOLD(field,pointing,ccd): """ import alhambragold as alhgold alhgold.get_alhambra_GOLD(2,1,1) """ root_catalogs = '/Volumes/amb22/catalogos/reduction_v4f/f0%i/'%(field) root_gold = '/Volumes/amb22/catalogos/reduction_v4f/GOLD/' catalog = root_catalogs+'alhambra.F0%iP0%iC0%i.ColorProBPZ.cat' %(field,pointing,ccd) if os.path.exists(catalog): data1 = coeio.loaddata(catalog) # Loading the whole catalog content. head1 = coeio.loadheader(catalog) # Loading the original header. nc1 = len(data1.T) dim1 = len(data1[:,0]) nh = len(head1) # Final catalog. catout = root_gold+'alhambra.gold.F0%iP0%iC0%i.ColorProBPZ.cat' %(field,pointing,ccd) outfile = open(catout,'w') # Reducing the length of the catalogs according to input ids ids = U.get_str(catalog,0) mo = U.get_data(catalog,65) cond1 = U.less(mo,23.000) data2 = data1[cond1,:] nraws = U.shape(data2)[0] ncols = U.shape(data2)[1] # Setting the IDs to its final values (including F814W+field+pointing+ccd) finalids = alh.getalhambrafinalids(field,pointing,ccd,'ISO') finalids2 = U.compress(cond1,finalids) # Restoring header... for ii in range(nh): outfile.write('%s \n'%(head1[ii])) formato = '%s %i %i %i %.4f %.4f %.3f %.3f %i %.2f %.2f %.4f %.3f %.3f %.1f %.2f %.3f %.2f %i ' formato += '%.3f %.3f %.3f %.3f %.3f %.3f %.3f %.3f %.3f %.3f %.3f %.3f %.3f %.3f %.3f %.3f ' formato += '%.3f %.3f %.3f %.3f %.3f %.3f %.3f %.3f %.3f %.3f %.3f %.3f %.3f %.3f %.3f %.3f ' formato += '%.3f %.3f %.3f %.3f %.3f %.3f %.3f %.3f %.3f %.3f %.3f %.3f %.3f %.3f %.3f %.3f ' formato += '%.3f %.3f %.3f ' formato += '%i %i %.3f %i %.2f %i ' formato += '%.3f %.3f %.3f %.3f %.3f %.3f %.3f ' formato += '%.3f %.3f %.3f %.3f ' formato += '%.3f %.3f %.3f %.3f %.3f %.3f %.3f %.3f %.3f %.3f ' formato += '%.3f %.3f %.3f %.3f %.3f %.3f %.3f %.3f %.3f %.3f ' formato += '%.3f %.3f %.3f %.3f %i %i ' form = formato.split() # Here it defines the format to be used. for jj in range(nraws): for ss in range(ncols): goodform = '' goodform = form[ss]+' ' if ss == 0: outfile.write(goodform%(int(finalids2[jj]))) else: outfile.write(goodform%(data2[jj,ss])) outfile.write(' \n') outfile.close()
root2 = '/Volumes/amb4/ALHAMBRA/images/individuals/singlexposures/' if laica: root2 += 'LAICA/' if omega: root2 += 'OMEGA/' imasnames = root2 + 'f0%ip0%i_*_%i.swp.fits.indiv.upd.list' % (ff, po, ccd) new_ccd_list = root2 + 'f0%ip0%ic0%i.list' % (ff, po, ccd) if not os.path.exists(new_ccd_list): print 'Creating the ind.frame-list for F0%iP0%iC0%i ' % (ff, po, ccd) cmd = '/bin/ls %s > %s ' % (imasnames, new_ccd_list) print cmd os.system(cmd) print ' ' print '############################################################' # Reading the final lists of individual frames. if laica: listaimas = U.get_str(finalLAICA + 'f0%ip0%ic0%i.list' % (ff, po, ccd), 0) print 'Reading LAICA list: ', finalLAICA + 'f0%ip0%ic0%i.list' % (ff, po, ccd) if omega: listaimas = U.get_str(finalLAICA + 'omega.upd.list', 0) print 'Reading OMEGA list: ', finalLAICA + 'omega.upd.list' nbands = len(listaimas) #20 filters. for ii in range(nbands): templist = U.get_str(listaimas[ii], 0) # Reading ind.frames per filter. nick = listaimas[ii].split('/')[-1] print 'Reading images from %s ...' % (listaimas[ii]) dim_indframes = len(templist) # Number of ind.frames. print 'List %s contains %i individual frames.' % (nick, dim_indframes) # Reading the corresponding filter
__author__ = 'albertomolino' import os, sys sys.path.append('/Users/albertomolino/doctorado/photo/programas/') import useful as U import numpy as N import matplotlib.pyplot as plt root_to_bpz = '/Users/albertomolino/codigos/bpz-1.99.2/' root_to_filts = root_to_bpz + 'FILTER/' filters = U.get_str(root_to_filts + 'splusNBs.list', 0) narrow = [] narrow.append('F378') narrow.append('F395') narrow.append('F410') narrow.append('F430') narrow.append('F515') narrow.append('F660') narrow.append('F861') #colors for different filters colores = N.zeros((3, 7), float) colores[:, 0] = (0.00, 0.25, 1.00) colores[:, 1] = (0.00, 0.65, 1.00) colores[:, 2] = (0.00, 0.50, 0.00) colores[:, 3] = (0.85, 0.65, 0.00) #colores[:,5]=(0.75,0.50,0.00) colores[:, 4] = (0.80, 0.25, 0.00) #colores[:,7]=(1.00,0.00,0.00) colores[:, 5] = (0.85, 0.00, 0.00)
between both sets (per filter) as the normalized difference of the integrated signal within a filter. """ import sys sys.path.append('/Users/albertomolino/doctorado/photo/programas/') import useful as U import pandas as pd import bpz_tools as B import numpy as N import matplotlib.pyplot as plt root = '/Users/albertomolino/codigos/bpz-1.99.2/FILTER/' names = pd.read_table(root + 'SPLUS_September2018/SPLUS_201809.list') filtros_old = U.get_str(root + 'SPLUS_July2017/SPLUS.list', 0) filtros_new = U.get_str(root + 'SPLUS_September2018/SPLUS_201809.list', 0) base = N.arange(3200, 10000, 10) values = N.zeros(12) eff_wav = N.zeros(12) for ii in range(12): eff_wav[ii] = B.effective_wavelength(filtros_new[ii]) for ii in range(12): x_o, y_o = U.get_data(root + filtros_old[ii], (0, 1)) x_n, y_n = U.get_data(root + filtros_new[ii], (0, 1)) y_o_r = U.match_resol(x_o, y_o, base) y_n_r = U.match_resol(x_n, y_n, base) values[ii] = N.sum(y_o_r - y_n_r) / N.sum(y_o_r)
# Redshift range. z_min = 0.0 z_max = 0.2 delta_z = 0.01 z_range = N.arange(z_min,z_max+delta_z,delta_z) res_z = len(z_range) # Filters filter_x1 = 'g_SDSS' filter_x2 = 'z_SDSS' filter_y1 = 'u_SDSS' filter_y2 = 'g_SDSS' #Models sed_models = U.get_str(sed_path+sed_lib,0) n_models = len(sed_models) # SDSS/S82 Contours sdss_s82_path = '/Users/albertomolino/Postdoc/T80S_Pipeline/targets/SDSS_S82/' sdss_s82_cat = sdss_s82_path+'catalogues/stripe82_spz_extcorr.cat' u_mag,g_mag,r_mag,i_mag,z_mag,z_spec = U.get_data(sdss_s82_cat,(4,6,8,10,12,3)) good_sample = N.less(abs(u_mag-g_mag),5.) good_sample*= N.less(abs(g_mag-z_mag),5.) good_sample*= N.greater_equal(z_spec,z_min) good_sample*= N.less_equal(z_spec,z_max) u_mag,g_mag,r_mag = U.multicompress(good_sample,(u_mag,g_mag,r_mag)) i_mag,z_mag,z_spec = U.multicompress(good_sample,(i_mag,z_mag,z_spec)) #Reduce sample size!
sys.path.append('/Users/albertomolino/doctorado/photo/programas/') import useful as U import splus_s82_hdf5_tools as to # General roots. root = '/Users/albertomolino/Postdoc/T80S_Pipeline/Commisioning/' root += 'S82/Dec2017/' # Reading HDF5 files. root_to_hdf5 = root + 'splus_cats_NGSL/*/' # TBC!! final_root_hdf5 = root_to_hdf5 + 'PDFs/' if not os.path.exists(final_root_hdf5): cmd = '/bin/mkdir %s ' % (final_root_hdf5) os.system(cmd) hdf5list = root_to_hdf5 + 'hdf5.list' hdf5_files = U.get_str(hdf5list, 0) n_hdf5 = len(hdf5_files) # Reading photometric catalogues. root_to_cats = root + 'released_catalogues/' cat_list = root_to_cats + 'master_SPLUS_STRIPE82_photo_BPZ.list' global_cats = U.get_str(cat_list, 0) n_cats = len(global_cats) # Checking dimensionality! if n_cats != n_hdf5: print 'Dimension missmatch!' sys.exit() # Magnitude bins. mag_bins = [14, 16, 18, 20]