コード例 #1
0
def step3(udropscat, galfitcat, outputcat):
    # Now merge the GALFIT results with original U-dropout catalog
    # Both catalogs should be FITS tables
    # Output catalog should
    if os.path.exists(outputcat):
        os.remove(outputcat)
    c1 = Ftable(
        udropscat
    )  # the U-dropout catalog with the same column as the GOODS-S MW catalog
    c2 = Ftable(galfitcat)  # the merged GALFIT catalog
    colnames = c1.Columns + c2.Columns[1:]  # bypass c2.objectid (redundant)
    formats = c1.d.formats + c2.d.formats[1:]
    # Find matching indices
    #i1 = N.arange(len(c1.d),'int')  # indices in c1
    i2 = N.ones(len(c1.d), 'int') * (
        -1)  # to store indices in c2 corresponding to i1
    # e.g., c2.objectid[i2[0]] = c1.id_1[0]
    for i1 in range(len(c1.d)):
        j = N.arange(len(c2.d))[c2.objectid == c1.id_1[i1]]
        if len(j) > 0:
            i2[i1] = j[0]
    # Now initialize the arrays
    arrays = {}
    # First collect all columns from c1
    for col in c1.Columns:
        arrays[col] = getattr(c1, col)
    for i in range(len(c2.Columns)):
        if c2.Columns[i] == 'objectid':
            pass
        elif c2.d.formats[i] == 'D':
            arrays[c2.Columns[i]] = N.ones(len(c1.d), 'float') * (-99.0)
        else:
            arrays[c2.Columns[i]] = N.ones(len(c1.d), 'int') * (-1)
    print arrays.keys()
    # Now copy the arrays in c2
    for i1 in range(len(c1.d)):
        if i2[i1] >= 0:
            for col in c2.Columns[1:]:
                arrays[col][i1] = getattr(c2, col)[i2[i1]]
    # Now convert arrays into a list
    arrays2 = []
    allcolumns = []
    for col in colnames:
        arrays2 += [arrays[col]]
    # Now construct pyfits.ColDefs
    allcolumns = []
    for i in range(len(colnames)):
        allcolumns += [
            pyfits.Column(name=colnames[i],
                          format=formats[i],
                          array=arrays2[i])
        ]
    allcols = pyfits.ColDefs(allcolumns)
    tbhdu = pyfits.new_table(allcols)
    tbhdu.writeto(outputcat)
コード例 #2
0
def plot_RLdist_udrops(cat=catdir+'/udrops/udrops_goodss_ubvy_130517_vflags_galfit.fits',
                       markers=['^','o'], ax=None, ms=8**2,
                       colors=[pms.Bright_Blue,pms.Bright_Red]):
   c = Ftable(cat)
   mag_array1 = c.f606w_magout_gf[(c.udf_fit==True)&(c.f606w_magout_gf<=27.5)]
   re_array1 = c.f606w_reout_gf[(c.udf_fit==True)&(c.f606w_magout_gf<=27.5)]
   n_array1 = c.f606w_nout_gf[(c.udf_fit==True)&(c.f606w_magout_gf<=27.5)]

   mag_array2 = c.f606w_magout_gf[(c.deep_fit==True)&(c.f606w_magout_gf<=26.5)]
   re_array2 = c.f606w_reout_gf[(c.deep_fit==True)&(c.f606w_magout_gf<=26.5)]
   n_array2 = c.f606w_nout_gf[(c.deep_fit==True)&(c.f606w_magout_gf<=26.5)]

   mag_array3 = c.f606w_magout_gf[(c.ers_fit==True)&(c.f606w_magout_gf<=26.5)]
   re_array3 = c.f606w_reout_gf[(c.ers_fit==True)&(c.f606w_magout_gf<=26.5)]
   n_array3 = c.f606w_nout_gf[(c.ers_fit==True)&(c.f606w_magout_gf<=26.5)]

   mag_array4 = c.f606w_magout_gf[(c.wide_fit==True)&(c.f606w_magout_gf<=26.5)]
   re_array4 = c.f606w_reout_gf[(c.wide_fit==True)&(c.f606w_magout_gf<=26.5)]
   n_array4 = c.f606w_nout_gf[(c.wide_fit==True)&(c.f606w_magout_gf<=26.5)]

   mag_array = np.concatenate([mag_array1,mag_array2,mag_array3,mag_array4])
   re_array = np.concatenate([re_array1,re_array2,re_array3,re_array4])
   n_array = np.concatenate([n_array1,n_array2,n_array3,n_array4])

   plot_RLdist(mag_array, re_array, n_array, 'U-dropouts', colors=colors,
               markers=markers, ax=ax, ms=ms, gfband='F606W')
コード例 #3
0
 def use_catalog(self, catalog, objid='id', ra='ra', dec='dec', format='fits'):
    if format.lower() == 'fits':
       self.c = Ftable(catalog)
       self.Nc = len(self.c.d)
    else:
       self.c = sextractor(catalog)
       self.Nc = len(self.c)
    self.objid = getattr(self.c, objid)
    self.ra = getattr(self.c, ra)
    self.dec = getattr(self.c, dec)
コード例 #4
0
ファイル: plot_RLdist.py プロジェクト: albertfxwang/mypytools
def plot_RLdist_vdrops(logR0,
                       beta,
                       cat1=catdir + '/vdrops/vdrops_gf_v3.fits',
                       cat2=catdir + '/vdrops/vdrops_udf_gf_v3.fits',
                       marker='o',
                       ax=None,
                       ms=8**2,
                       color=pms.Bright_Blue,
                       bootstrap=False,
                       nsamp=10000):
    c1 = Ftable(cat1)
    c2 = Ftable(cat2)
    mcz = bl.mconvert('kcorr/M1500_to_f850lp_omega_m_0.3.txt')
    mean_kcorr = mcz(5.0)
    gfcrit1 = (c1.f850lp_gfflag == True) & (c1.magout <= 26.5)
    gfcrit2 = (c2.f850lp_gfflag == True) & (c2.magout <= 28.5)
    mag_array1 = c1.magout[gfcrit1]
    re_array1 = c1.reout[gfcrit1]
    n_array1 = c1.nout[gfcrit1]
    mag_array2 = c2.magout[gfcrit2]
    re_array2 = c2.reout[gfcrit2]
    n_array2 = c2.nout[gfcrit2]

    mag_array = np.concatenate([mag_array1, mag_array2])
    re_array = np.concatenate([re_array1, re_array2])
    n_array = np.concatenate([n_array1, n_array2])
    plot_RLdist(mag_array,
                re_array,
                'V-dropouts',
                logR0,
                beta,
                mean_kcorr,
                color=color,
                marker=marker,
                ax=ax,
                ms=ms,
                gfband='F850LP')
    if bootstrap == True:
        p_array = bootstrap_error(mag_array, np.log10(re_array), nsamp=nsamp)
        return p_array
    else:
        return 0
コード例 #5
0
 def __init__(self, catalog):
     # matplotlib keyword arguments
     self.c = Ftable(catalog)
     # Dropout catalog as a FITS table
     self.histkw = {'histtype': 'step', 'lw': 1.5}
     self.errorbarkw = {
         'ms': 6,
         'capsize': 6,
         'mec': 'blue',
         'ecolor': 'blue'
     }
コード例 #6
0
def plot_RLdist_vdrops(cat1=catdir+'/vdrops/vdrops_gf_v3.fits',
                       cat2=catdir+'/vdrops/vdrops_udf_gf_v3.fits',
                       markers=['^','o'], ax=None, ms=8**2,
                       colors=[pms.Bright_Blue,pms.Bright_Red]):
   c1 = Ftable(cat1)
   c2 = Ftable(cat2)
   gfcrit1 = (c1.f850lp_gfflag==True)&(c1.magout<=26.5)
   gfcrit2 = (c2.f850lp_gfflag==True)&(c2.magout<=28.5)
   mag_array1 = c1.magout[gfcrit1]
   re_array1 = c1.reout[gfcrit1]
   n_array1 = c1.nout[gfcrit1]
   mag_array2 = c2.magout[gfcrit2]
   re_array2 = c2.reout[gfcrit2]
   n_array2 = c2.nout[gfcrit2]

   mag_array = np.concatenate([mag_array1, mag_array2])
   re_array = np.concatenate([re_array1, re_array2])
   n_array = np.concatenate([n_array1, n_array2])
   plot_RLdist(mag_array, re_array, n_array, 'V-dropouts', colors=colors,
               markers=markers, ax=ax, ms=ms, gfband='F850LP')
コード例 #7
0
def mergeoutput_gfsim(root,bands,output_root='merged_gfsim',
                      exclude_columns=['filename']):
	""" 
	Make one merged catalog for each band.
	exclude_columns: a kludge to skip certain columns that do not exist in all
	runs. This should not have happened in the first place...
	"""
	timestr = time.strftime('%y%m%d')
	for b in bands:
		broot = root+'_'+b   # the directory of the given band
		mcats = glob.glob(broot+'/gfit*_matched.fits')
		# print "mcats[0]", mcats[0]
		c0 = Ftable(mcats[0])
		colnames = c0.Columns
		# colnames = c0._colnames
		for col in colnames:
			if col in exclude_columns:
				colnames.remove(col)
		colformats = c0.d.formats
		colarrays_dic = {}
		# initialize colarrays
		for i in range(len(colnames)):
			col = colnames[i]
			colarrays_dic[col] = getattr(c0,col)
		# Now concatenate columns from all catalogs
		for i in range(1,len(mcats)):   # we already added the first one
			ci = Ftable(mcats[i])
			# print mcats[i]
			for col in colnames:
				# print len(colarrays_dic[col]), len(getattr(ci, col)), col
				colarrays_dic[col] = N.concatenate([colarrays_dic[col],getattr(ci,col)])
		# Now write the output catalog
		outname = output_root+'_'+b+'_'+timestr+'.fits'
		columns = []
		for i in range(len(colnames)):
			columns += [pyfits.Column(name=colnames[i],format=colformats[i],
			                          array=colarrays_dic[colnames[i]])]
		ColDefs = pyfits.ColDefs(columns)
		tbhdu = pyfits.new_table(ColDefs)
		tbhdu.writeto(outname)
		print "%s done." % outname
コード例 #8
0
	def __call__(self,simcatalog,field,plot_diag=False,ktest=None,
		bands=['acs_f435w','acs_f606w','acs_f775w','acs_f850lp'],
		sn_lolim={'acs_f850lp':5.0},
		sn_hilim={},
		interpolate=False,dznew=0.1):
		"""
		simcatalog --- the simulation catalog as a FITS table.
		"""
		# Initialize i-dropout color criteria
		self.lcc = lcc.colorcrit(lib=lcc.cc_library)
		self.lcc = self.lcc('b','g04')  
		# Use Giavalisco et al. 2004 criteria
		c = Ftable(simcatalog)
		# Now initialize the attributes
		for b in bands:
			bs = filter_dic[b]  # short name of filter b
			flux_iso = getattr(c,'%s_flux_iso'%bs)
			fluxerr_iso = getattr(c,'%s_fluxerr_iso'%bs)
			mag_iso = getattr(c,'%s_mag_iso'%bs)
			# calculate 1-sigma upper limits on sources with S/N < 1
			mag_iso = N.where(flux_iso/fluxerr_iso>1.0,mag_iso,zeropoints[b]-2.5*N.log10(fluxerr_iso))
			setattr(self,b+'_mag',mag_iso)  
			# MAG_ISO --> for color calculation
			setattr(self,b+'_flux',getattr(c,'%s_flux_auto'%bs))  
			# FLUX_AUTO
			setattr(self,b+'_fluxerr',getattr(c,'%s_fluxerr_auto'%bs))  
			# FLUXERR_AUTO
			setattr(self,b+'_sn',
				getattr(self,'%s_flux'%b)/getattr(self,'%s_fluxerr'%b))  
			# S/N calculated using FLUX_AUTO
		# Now construct S/N criteria
		self.sncrit = N.ones(len(c.d),'bool')  
		for b in sn_lolim.keys():  # enforce S/N lower limits
			self.sncrit = self.sncrit & (getattr(self,b+'_sn')>=sn_lolim[b])
		for b in sn_hilim.keys():  # enforce S/N upper limits (veto bands)
			self.sncrit = self.sncrit & (getattr(self,b+'_sn')<sn_hilim[b])
		print "Total number of objects satisfying the S/N criteria:", \
			sum(self.sncrit)
		print "Do selections..."
		self.color1 = self.acs_f435w_mag-self.acs_f606w_mag
		self.color2 = self.acs_f606w_mag-self.acs_f850lp_mag
		self.lcc.select(self.color1,self.color2)  # do color selection!
		self.lcc.crit = self.lcc.crit & self.sncrit  # enforce S/N criteria
		self.dropcrit = self.lcc.crit & (c.detect==True)  # just in case
		self.detect = c.detect.copy()
		print "Selection done."
		print "Total number of objects in the catalog: %d" % len(c.d)
		print "Total number selected as B-dropouts: %d" % (sum(self.dropcrit))

		# Now invoke zdist.zdgrid.__call__ to calculate P(z)
		zdist.zdgrid.__call__(self,c,interpolate=interpolate,dznew=dznew,
			plot_diag=plot_diag,ktest=ktest)
コード例 #9
0
 def add_filternames(self, f):
     # prepend the FITS table columns by filter name
     print "Adding filter names to the columns..."
     c = Ftable(self.sex_fitstable[f])
     old_colnames = []
     new_colnames = []
     for col in c.Columns:
         if col.lower() in self.fixcols:
             pass
         else:
             old_colnames += [col]
             new_colnames += ['%s_%s' % (f, col)]
     fitsutil.change_column_names(self.sex_fitstable[f], old_colnames,
                                  new_colnames)
コード例 #10
0
 def __init__(self,
              catalog,
              field,
              filtername,
              z0,
              maglimits,
              logrlimits,
              good_vflags=[0, 1, 2, 3, 12],
              colnames=colnames_default,
              re_err_lim=0.6,
              chi2nu_lim=5.0,
              sexmaglimit=None):
     self.c = Ftable(catalog)
     self.field = field
     self.z0 = z0
     self.colnames = colnames
     self.re_err_lim = re_err_lim
     self.chi2nu_lim = chi2nu_lim
     self.filtername = filtername
     self.maglimits = maglimits
     # if sexmaglimit != None, we will include galaxies fainter than the
     # magnitude limit where GALFIT is still reliable... we include these
     # faint galaxies in order to better constrain the faint-end slope. This
     # is desirable when the GALFIT magnitude limit is not much fainter than
     # L*, e.g., for i-dropouts. In this case, we do not use the size
     # information at all for these faint galaxies.
     self.sexmaglimit = sexmaglimit
     self.logrlimits = logrlimits
     self.gfsample = np.logical_and(self.check_goodfit(),
                                    self.check_within())
     self.gfsample = np.logical_and(self.gfsample,
                                    self.check_vflag(good_vflags))
     # self.mag, self.logr only include those well-fit by GALFIT and those
     # within maglimits and logrlimits
     self.mag = self.get_col('mag')[self.gfsample]
     self.logr = np.log10(self.get_col('re')[self.gfsample])
     if sexmaglimit == None:
         self.sexsample = np.zeros(len(self.c.d), 'bool')
         self.mag_auto = np.array([])
     else:
         self.sexsample = self.check_within(
             magcol='mag_auto',
             recol='r_hl',
             maglimits=[maglimits[1], sexmaglimit])
         self.mag_auto = self.get_col('mag_auto')[self.sexsample]
     self.data = np.array([self.mag, self.logr]).T  # it is an N by 2 array
コード例 #11
0
 def __init__(self, catalog, column_names=colnames_default, disk=0, 
              diskfrac=0.7, re_err_lim=re_err_lim, chi2nu_lim=5.0, 
              shape='gaussian', expand=[0.2, 0.1], dx=0.02, dy=0.02, 
              xmax=2.56, ymax=2.56, xunit='dmag', yunit='dlogR', 
              xname=r'$\Delta$ mag', yname=r'$\Delta\log R$', Ntot=None):
    # More of a template; don't expect to call this constructor directly
    # dx, dy: pixel size of the kernels (in kernel units)
    # xmax, ymax: extent of kernel
    assert catalog.endswith('.fits'), "Please provide a FITS table as the catalog."
    self.c = Ftable(catalog)
    self.catalog = catalog
    self.column_names = column_names
    self.re_err_lim = re_err_lim
    self.chi2nu_lim = chi2nu_lim 
    self.shape = shape
    assert shape in KPDF_func.keys(), "Kernel shape should be either 'gaussian' or 'epanechnikov'."
    self.expand = expand
    print "self.expand = ", self.expand
    assert disk in [0, 1], "disk should be either 0 or 1."
    self.disk = disk
    if disk == 0:
       self.devauc = 1
    else:
       self.devauc = 0
    self.diskfrac = diskfrac
    # self.Ntot will be the total number of galaxies used from the simulation
    # catalog, after drawing a specified fraction of disks
    if Ntot==None:
       self.Ntot = len(self.c.d)
       self.outindex = np.arange(len(self.c.d))
    else:
       self.Ntot = Ntot
       self.outindex = self.draw_diskfrac()
    print "self.outindex = ", self.outindex
    # Below are some basic kernel properties
    self.dx = dx  # pixel size for kernels, NOT bin width
    self.dy = dy
    self.xlimits = [-xmax/2., xmax/2.]  # limtis of each kernel, centered around zero
    self.ylimits = [-ymax/2., ymax/2.]
    self.xunit = xunit
    self.yunit = yunit
    self.xname = xname  # include LaTex syntax for use in plots
    self.yname = yname
    self._check_goodfit = False
    self.goodfit = self.check_goodfit()
コード例 #12
0
ファイル: plot_gfsim.py プロジェクト: albertfxwang/mypytools
 def __init__(self, catalog):
     self.c = Ftable(catalog)
     self.gfmeasure = (self.c.recovered_galfit == 1) & (
         self.c.re_out_err / self.c.re_out <=
         0.6) & (self.c.mag_auto < 99.0) & (self.c.magerr_auto > 0.)
     self.dmag = (self.c.mag_out - self.c.mag_in)[self.gfmeasure]
     self.dlogRe = (N.log10(self.c.re_out) -
                    N.log10(self.c.re_in))[self.gfmeasure]
     self.mag_auto = self.c.mag_auto[self.gfmeasure]
     self.magerr_auto = self.c.magerr_auto[self.gfmeasure]
     self.SN_auto = (1.0857 / self.c.magerr_auto)[self.gfmeasure]
     self.mag_in = (self.c.mag_in)[self.gfmeasure]
     self.re_in = (self.c.re_in)[self.gfmeasure]
     self.mag_out = (self.c.mag_out)[self.gfmeasure]
     self.re_out = (self.c.re_out)[self.gfmeasure]
     self.robust = False
     self.fig = None
     self.ax = None
コード例 #13
0
def calc_sex_transfunc_1d(simcat,
                          kgridname,
                          limits=limits,
                          field='udf',
                          logre_lims=[0.0, 1.5],
                          dm=0.5):
    """
   Calculate the magnitude transfer functions for each apparent magnitude bin
   for the faint-end of i-dropouts.
   logre_lims are the limits of log(Re) (in pixels) that are used from 
   the simulation catalog. This is to match the observed range of log(Re)
   of i-dropouts at the faint-end in UDF.
   """
    # Read simulation catalog
    c = Ftable(simcatdir + '/' + simcat)
    y_mag_auto = c.y_mag_auto
    y_mag_in = c.y_mag_in
    kw = {'logre_lims': logre_lims, 'm0_arr': m0_arr}
    kgrid = kernelgrid_1d(limits, dm, pixdx=pixdx, xmax=xmax, **kw)
    kgrid.grid1d = np.arange(-xmax / 2., xmax / 2. + pixdx, pixdx)
    logre_crit = (np.log10(c.y_flux_radius_1)>=logre_lims[0]) & \
                 (np.log10(c.y_flux_radius_1)<logre_lims[1]) & \
                 (c.detect==True)

    for m in m0_arr:
        print "m=", m, m + kgrid.binwidth
        bincrit = (y_mag_in >= m) & (y_mag_in < (m + kgrid.binwidth))
        y_mag_in_bin = y_mag_in[(bincrit == True) & (logre_crit == True)]
        y_mag_auto_bin = y_mag_auto[(bincrit == True) & (logre_crit == True)]
        dmag = y_mag_auto_bin - y_mag_in_bin
        h = KPDF.UPDFOptimumBandwidth(dmag)
        pdf = KPDF.UPDFEpanechnikov(dmag, kgrid.grid1d, h)
        pdf = pdf / sum(pdf)  # normalize to 1.0
        kgrid.kernels['%.1f' % m] = kernel1d(m,
                                             m + kgrid.binwidth,
                                             pdf,
                                             xmax=xmax,
                                             pixdx=pixdx)

    # Write kgrid into a pickled file
    f = open(kgridname, 'wb')
    cPickle.dump(kgrid, f, 2)
    f.close()
コード例 #14
0
 def drawUMag(self):
     # Draw U-band magnitude from probability distribution functions calculated
     # using TFIT simulation catalogs in VIMOS U-band.
     # In princple, we draw an output U-band magnitude given U-band input
     # magnitudes to simulate photometric errors. We do it this way because
     # running enough simulations with TFIT is prohibitively expensive.
     cu = Ftable(tfitsim_cat[self.field])
     # mag0 = 22.0
     umag_in = cu.uvimos_mag_in[cu.uvimos_fitquerr > 0.]
     print "Repeat the drawing process %d times" % self.n_repeat
     vimos_u_mag = uu.draw_umag(np.tile(self.c.u_mag_in, self.n_repeat),
                                self.umag_kpdf,
                                mag0=self.mag0)
     vimos_u_mag_1sig = uu.draw_ulimmag(
         self.n_repeat * len(self.c.u_mag_in), self.umag_1sig_kpdf)
     # If the drawn U-band 1-sigma limit mag is brighter than the drawn
     # U-band magnitude, use the 1-sigma limit magnitude as the measured
     # U-band magnitude.
     self._drawn_umag = True
     return np.minimum(vimos_u_mag, vimos_u_mag_1sig)
コード例 #15
0
def calc_umag_pdf(tfitsimcatalog,
                  umag_array=arange(21., 35.0, 0.5),
                  pdfname='uvimos_mag_pdf.p',
                  b_snlim=3.0):
    """
   Calculate the PDF of output magnitude around 
   input value of U-band magnitudes when S/N>=1 in U-band.
   """
    c = Ftable(tfitsimcatalog)
    include = (c.uvimos_mag_out>0.)&(c.detect==True)&(c.uvimos_mag_out<99.)&\
              (c.f435w_magerr_iso < (1.0857/b_snlim)) & \
              (c.uvimos_fitqty > 0.)
    print "sum(include)=", sum(include)
    umag_in = c.uvimos_mag_in[include == True]
    umag_out = c.uvimos_mag_out[include == True]
    dmag_array = arange(-10., 6.05, 0.05)  # x-coordinates of the PDF
    dumag = umag_array[1] - umag_array[
        0]  # the bin width of U-band input magnitude
    kpdf_umag = {}
    kpdf_umag['h'] = {}
    kpdf_umag['dmag_array'] = dmag_array
    for umag in umag_array[:-1]:
        mag0 = umag
        mag1 = mag0 + dumag
        print mag0, mag1
        bincrit = (umag_in >= mag0) & (umag_in < mag1)
        mag_in_bin = umag_in[bincrit]
        mag_out_bin = umag_out[bincrit]
        if sum(bincrit) >= 5:
            h = KPDF.UPDFOptimumBandwidth(mag_out_bin)
            kpdf_umag['h']['%.1f' % mag0] = h
            pdf = KPDF.UPDFEpanechnikov((mag_out_bin - mag_in_bin), dmag_array,
                                        h)
        else:
            pdf = ones(len(dmag_array))
            pdf = pdf / sum(pdf)
        kpdf_umag['%.1f' % mag0] = pdf
    f = open(pdfname, 'wb')
    cPickle.dump(kpdf_umag, f, 2)
    f.close()
コード例 #16
0
 def __init__(self,
              catalog,
              column_names=colnames_sex_default,
              disk=0,
              diskfrac=0.7,
              expand=0.0,
              dx=0.02,
              xmax=2.56,
              xunit='dmag',
              xname=r'$\Delta$ mag',
              Ntot=None):
     assert catalog.endswith(
         '.fits'), "Please provide a FITS table as the catalog."
     self.c = Ftable(catalog)
     self.catalog = catalog
     self.column_names = column_names
     assert disk in [0, 1], "disk should be either 0 or 1."
     self.disk = disk
     if disk == 0:
         self.devauc = 1
     else:
         self.devauc = 0
     self.diskfrac = diskfrac
     self.expand = expand
     # self.Ntot will be the total number of galaxies used from the simulation
     # catalog, after drawing a specified fraction of disks
     if Ntot == None:
         self.Ntot = len(self.c.d)
     else:
         self.Ntot = Ntot
     self.outindex = self.draw_diskfrac()
     # Below are some basic kernel properties
     self.dx = dx  # pixel size for kernels, NOT bin width
     self.xlimits = [-xmax / 2., xmax / 2.
                     ]  # limtis of each kernel, centered around zero
     self.xunit = xunit
     self.xname = xname  # include LaTex syntax for use in plots
コード例 #17
0
 def __init__(self,
              catalog,
              z0,
              zlo,
              zhi,
              dz=0.1,
              interpolate=True,
              interp_dz=0.02,
              n_repeat=1,
              expand=[0., 0.],
              cosmo=cosmo_default,
              mag_in='m1500_in',
              re_in='re_in_arcsec',
              zeropoints=zeropoints):
     # More of a template; don't expect to call this constructor directly
     assert catalog.endswith(
         '.fits'), "Please provide a FITS table as the catalog."
     self.c = Ftable(catalog)
     self.mag_in_col = mag_in
     self.re_in_col = re_in
     self.zeropoints = zeropoints
     assert hasattr(self.c,
                    self.mag_in_col), "Column M1500_in not in catalog."
     assert hasattr(
         self.c,
         self.re_in_col), "Column %s not in catalog." % self.re_in_col
     self.z0 = z0  # the nominal redshift of the LBG sample
     self.zlimits = [zlo, zhi]
     self.dz = dz
     self.Nz = int(round((zhi - zlo) / dz))
     self.interpolate = interpolate
     self.interp_dz = interp_dz
     self.n_repeat = n_repeat
     self.expand = expand
     self.cosmo = cosmo
     self.bands = []  # will be defined by subclass
コード例 #18
0
def calc_ulimmag_pdf(tfitsimcatalog,
                     pdfname,
                     limmag_array=arange(26., 31., 0.02)):
    c = Ftable(tfitsimcatalog)
    uvimos_1sig_mag = uvimos_magzero - 2.5 * log10(c.uvimos_fitquerr)
    umag_crit = uvimos_1sig_mag > 0
    uvimos_1sig_mag = uvimos_1sig_mag[umag_crit]
    kpdf_ulimmag = {}
    kpdf_ulimmag['dmag_array'] = limmag_array
    h = KPDF.UPDFOptimumBandwidth(uvimos_1sig_mag)
    kpdf_ulimmag['h'] = h
    pdf = KPDF.UPDFEpanechnikov(uvimos_1sig_mag, limmag_array, h)
    kpdf_ulimmag['pdf'] = pdf
    # Also calculate the fraction that have S/N < 1
    dlimmag = limmag_array[1] - limmag_array[0]
    limmag_bins = concatenate([limmag_array, [limmag_array[-1] + dlimmag]])
    uvimos_ston = (c.uvimos_fitqty / c.uvimos_fitquerr)[umag_crit]
    # n1 = histogram(uvimos_1sig_mag[uvimos_ston<1.], limmag_bins)[0]
    # n2 = histogram(uvimos_1sig_mag, limmag_bins)[0]
    # limfrac = n1.astype('float') / maximum(1.,n2.astype('float'))
    # kpdf_ulimmag['limfrac'] = limfrac
    f = open(pdfname, 'wb')
    cPickle.dump(kpdf_ulimmag, f, 2)
    f.close()
コード例 #19
0
 def __init__(self, simcatalog):
     self.c = Ftable(simcatalog)
コード例 #20
0
    def __call__(self,
                 simcatalog,
                 field,
                 plot_diag=False,
                 ktest=None,
                 bands=def_bands,
                 key='Hua13',
                 sn_lolim={
                     'wfc3_f160w': 5.0,
                     'wfc3_f125w': 3.5,
                     'acs_f850lp': 2.0
                 },
                 sn_hilim={'acs_f435w': 2.0},
                 interpolate=False,
                 dznew=0.1,
                 expand=[0, 0]):
        """
      simcatalog --- the simulation catalog as a FITS table.
      """
        # Initialize i-dropout color criteria
        print "dznew=", dznew
        print "interpolate?", interpolate
        self.lcc = lcc.colorcrit()
        self.lcc = self.lcc('f775w_drop', key)
        c = Ftable(simcatalog)
        # Now initialize the attributes
        for b in bands:
            bs = filter_dic[b]  # short name of filter b
            flux_iso = getattr(c, '%s_flux_iso' % bs)
            fluxerr_iso = getattr(c, '%s_fluxerr_iso' % bs)
            mag_iso = getattr(c, '%s_mag_iso' % bs)
            # calculate 1-sigma upper limits on sources with S/N < 1
            mag_iso = N.where(flux_iso / fluxerr_iso > 1.0, mag_iso,
                              zeropoints[b] - 2.5 * N.log10(fluxerr_iso))
            setattr(self, b + '_mag', mag_iso)
            # MAG_ISO --> for color calculation
            setattr(self, b + '_flux', getattr(c, '%s_flux_auto' % bs))
            # FLUX_AUTO
            setattr(self, b + '_flux_aper', getattr(c, '%s_flux_aper_2' % bs))
            # Aperture flux within 0.18" aperture (2.94 pixels)
            setattr(self, b + '_fluxerr', getattr(c, '%s_fluxerr_auto' % bs))
            # FLUXERR_AUTO
            setattr(self, b + '_fluxerr_aper',
                    getattr(c, '%s_fluxerr_aper_2' % bs))
            # Aperture flux errors within 0.18" aperture
            setattr(
                self, b + '_sn',
                getattr(self, '%s_flux' % b) / getattr(self, '%s_fluxerr' % b))
            # S/N calculated using AUTO APERTURE
            setattr(
                self, b + '_sn_aper',
                getattr(self, '%s_flux_aper' % b) /
                getattr(self, '%s_fluxerr_aper' % b))
            # S/N calculated using 0.18" APERTURE
        # Now construct S/N criteria
        #self.sncrit = N.ones(len(c.d),'bool')
        self.sn_lolim_crit = N.ones(len(c.d), 'bool')
        self.sn_hilim_crit = N.ones(len(c.d), 'bool')
        for b in sn_lolim.keys():  # enforce S/N lower limits
            self.sn_lolim_crit = self.sn_lolim_crit & \
                  (getattr(self,b+'_sn')>=sn_lolim[b])
        for b in sn_hilim.keys():
            # enforce S/N upper limits (veto bands)
            # but use fixed aperture S/N
            self.sn_hilim_crit = self.sn_hilim_crit & \
                  (getattr(self,b+'_sn_aper')<sn_hilim[b])
        print "Total number of objects satisfying the S/N criteria:", \
           sum((self.sn_hilim_crit)&(self.sn_lolim_crit))
        self.sncrit = (self.sn_hilim_crit == True) & (self.sn_lolim_crit
                                                      == True)
        print "Do selections..."
        self.color1 = self.acs_f775w_mag - self.acs_f850lp_mag
        self.color2 = self.acs_f850lp_mag - self.wfc3_f125w_mag
        self.lcc.select(self.color1, self.color2)  # do color selection!
        self.colorcrit = self.lcc.crit.copy()
        self.lcc.crit = self.lcc.crit & self.sncrit  # enforce S/N criteria
        self.dropcrit = self.lcc.crit & (c.detect == True)  # just in case
        self.detect = c.detect.copy()
        print "Selection done."
        print "Total number of objects in the catalog: %d" % len(c.d)
        print "Total number selected as i-dropouts: %d" % (sum(self.dropcrit))

        # Now invoke zdist.zdgrid.__call__ to calculate P(z)
        zdist.zdgrid.__call__(self,
                              c,
                              interpolate=interpolate,
                              dznew=dznew,
                              plot_diag=plot_diag,
                              ktest=ktest,
                              expand=expand)
コード例 #21
0
 def __call__(self,
              simcatalog,
              bands=def_bands,
              key='Hua13',
              sn_lolim={
                  'wfc3_f160w': 5.0,
                  'wfc3_f125w': 3.5,
                  'acs_f850lp': 2.0
              },
              sn_hilim={'acs_f435w': 2.0},
              expand=[0., 0.]):
     ## Most stuff is the same as the 2D case:
     self.lcc = lcc.colorcrit()
     self.lcc = self.lcc('f775w_drop', key)
     c = Ftable(simcatalog)
     # Now initialize the attributes
     for b in bands:
         bs = filter_dic[b]  # short name of filter b
         flux_iso = getattr(c, '%s_flux_iso' % bs)
         fluxerr_iso = getattr(c, '%s_fluxerr_iso' % bs)
         mag_iso = getattr(c, '%s_mag_iso' % bs)
         # calculate 1-sigma upper limits on sources with S/N < 1
         mag_iso = N.where(flux_iso / fluxerr_iso > 1.0, mag_iso,
                           zeropoints[b] - 2.5 * N.log10(fluxerr_iso))
         setattr(self, b + '_mag', mag_iso)
         # MAG_ISO --> for color calculation
         setattr(self, b + '_flux', getattr(c, '%s_flux_auto' % bs))
         # FLUX_AUTO
         setattr(self, b + '_flux_aper', getattr(c, '%s_flux_aper_2' % bs))
         # Aperture flux within 0.18" aperture (2.94 pixels)
         setattr(self, b + '_fluxerr', getattr(c, '%s_fluxerr_auto' % bs))
         # FLUXERR_AUTO
         setattr(self, b + '_fluxerr_aper',
                 getattr(c, '%s_fluxerr_aper_2' % bs))
         # Aperture flux errors within 0.18" aperture
         setattr(
             self, b + '_sn',
             getattr(self, '%s_flux' % b) / getattr(self, '%s_fluxerr' % b))
         # S/N calculated using AUTO APERTURE
         setattr(
             self, b + '_sn_aper',
             getattr(self, '%s_flux_aper' % b) /
             getattr(self, '%s_fluxerr_aper' % b))
         # S/N calculated using 0.18" APERTURE
     # Now construct S/N criteria
     #self.sncrit = N.ones(len(c.d),'bool')
     self.sn_lolim_crit = N.ones(len(c.d), 'bool')
     self.sn_hilim_crit = N.ones(len(c.d), 'bool')
     for b in sn_lolim.keys():  # enforce S/N lower limits
         self.sn_lolim_crit = self.sn_lolim_crit & \
               (getattr(self,b+'_sn')>=sn_lolim[b])
     for b in sn_hilim.keys():
         # enforce S/N upper limits (veto bands)
         # but use fixed aperture S/N
         self.sn_hilim_crit = self.sn_hilim_crit & \
               (getattr(self,b+'_sn_aper')<sn_hilim[b])
     print "Total number of objects satisfying the S/N criteria:", \
        sum((self.sn_hilim_crit)&(self.sn_lolim_crit))
     self.sncrit = (self.sn_hilim_crit == True) & (self.sn_lolim_crit
                                                   == True)
     print "Do selections..."
     self.color1 = self.acs_f775w_mag - self.acs_f850lp_mag
     self.color2 = self.acs_f850lp_mag - self.wfc3_f125w_mag
     self.lcc.select(self.color1, self.color2)  # do color selection!
     self.colorcrit = self.lcc.crit.copy()
     self.lcc.crit = self.lcc.crit & self.sncrit  # enforce S/N criteria
     self.dropcrit = self.lcc.crit & (c.detect == True)  # just in case
     self.detect = c.detect.copy()
     print "Selection done."
     print "Total number of objects in the catalog: %d" % len(c.d)
     print "Total number selected as i-dropouts: %d" % (sum(self.dropcrit))
     for M in self.M1500arr:
         # For each magnitude bin, calculate completeness P(z)
         bincrit = (c.m1500_in >= M) & (c.m1500_in < (M + self.dM))
         z_in_bin = c.z_in[bincrit == True]
         z_in_bin_detect = c.z_in[(bincrit == True) & (c.detect == True)]
         zhist_bin_input = N.histogram(z_in_bin, self.zarr_edges)[0]
         zhist_bin_detect = N.histogram(z_in_bin_detect, self.zarr_edges)[0]
         self.zdist['%.1f'%M] = zhist_bin_detect.astype('float') / \
               N.maximum(zhist_bin_input.astype('float'), 1.0)
         self.zdist['%.1f' % M] = self.zdist['%.1f' % M]
コード例 #22
0
#!/usr/bin/env python

from numpy import *
import udropsim_uflux as uu
from pygoods import Ftable
import fitsutil
import os

rootdir = '/Users/khuang/Dropbox/Research/bivariate/udrops_fitting'
rootcat = rootdir + 'udropsim_run2m_goodss_deep_130505.fits'
c = Ftable(rootcat)


def draw_umags(niter=1):
    for i in range(niter):
        vimos_u_mag = uu.draw_umag(c.u_mag_in,
                                   'simcatalogs/udrops/uvimos_mag_pdf.p')
        vimos_u_mag_1sig = uu.draw_umag(
            c.u_mag_in,
            'simcatalogs/udrops/uvimos_mag_1sigma_pdf.p',
            onesigma=True)
        vimos_u_mag = minimum(vimos_u_mag, vimos_u_mag_1sig)
        os.system('cp %s udropsim_umag_drawn_%d.fits' % (rootcat, i))
        fitsutil.add_columns('udropsim_umag_drawn_%d.fits' % i,
                             ['vimos_u_mag_out'], [vimos_u_mag], ['D'])
コード例 #23
0
    def __call__(self,
                 simcatalog,
                 field,
                 plot_diag=False,
                 ktest=None,
                 n_repeat=1,
                 sn_lolim={
                     'wfc3_f160w': 5.0,
                     'wfc3_f105w': 5.0,
                     'acs_f435w': 3.0
                 },
                 mode='ubvy',
                 interpolate=False,
                 dznew=0.1,
                 redraw=False,
                 drawnflux=None,
                 testumag=False,
                 testdraw=False,
                 mag0=22.0,
                 expand=[0., 0.],
                 mag_in_col='m1500_in',
                 re_in_col='re_in'):
        """
      simcatalog --- the simulation catalog as a FITS table.
      Because the U-band magnitudes are randomly drawn from each object, we 
      would like to repeat the magnitude drawing process for a few times to 
      average out any statistical anomalies. 
      mode --- the bands to use for selection (uby v.s. ubvy)
      """
        # Initialize U-dropout color criteria
        print "n_repeat", n_repeat
        print "dznew", dznew
        print "interpolate?", interpolate
        print "Redraw?", redraw
        self.lcc = lcc.colorcrit()
        if mode == 'uby':
            if field == 'ers':
                self.lcc = self.lcc('uvimos_drop', 'UBY098')
            else:
                self.lcc = self.lcc('uvimos_drop', 'UBY105')
        elif mode == 'ubvy':
            if field == 'ers':
                self.lcc = self.lcc('uvimos_drop', 'UBVY098')
            else:
                self.lcc = self.lcc('uvimos_drop', 'UBVY105')
        c = Ftable(simcatalog)
        #self.c = c
        ## First: draw U-band magnitudes, and initialize correct attributes of c
        ## Tile the arrays by `n_repeat` times
        if field == 'ers':
            bands = ['acs_f435w', 'acs_f606w', 'wfc3_f098m', 'wfc3_f160w']
            # HST bands only
        else:
            bands = ['acs_f435w', 'acs_f606w', 'wfc3_f105w', 'wfc3_f160w']
        for b in bands:
            bs = filter_dic[b]  # short name of filter b
            flux_iso = getattr(c, '%s_flux_iso' % bs)
            fluxerr_iso = getattr(c, '%s_fluxerr_iso' % bs)
            mag_iso = getattr(c, '%s_mag_iso' % bs)
            # calculate 1-sigma upper limits on sources with S/N < 1
            mag_iso = N.where((flux_iso / fluxerr_iso) > 1.0, mag_iso,
                              zeropoints[b] - 2.5 * N.log10(fluxerr_iso))
            setattr(self, b + '_mag', N.tile(mag_iso, n_repeat))
            # MAG_ISO --> for color calculation
            # setattr(self,b + '_flux',
            #               N.tile(getattr(c, '%s_flux_auto'%bs),n_repeat))
            setattr(self, b + '_flux',
                    N.tile(getattr(c, '%s_flux_iso' % bs), n_repeat))
            # FLUX_AUTO
            # setattr(self, b + '_fluxerr',
            #               N.tile(getattr(c,'%s_fluxerr_auto'%bs),n_repeat))
            setattr(self, b + '_fluxerr',
                    N.tile(getattr(c, '%s_fluxerr_iso' % bs), n_repeat))
            # FLUXERR_AUTO
            setattr(
                self, b + '_sn',
                getattr(self, '%s_flux' % b) / getattr(self, '%s_fluxerr' % b))
            # S/N calculated using FLUX_ISO

        print "Step 1: draw U-band magnitudes and 1-sigma limits from files \
            %s and %s" % (self.umag_kpdf, self.umag_1sig_kpdf)
        # Draw U-band magnitude
        # Calculate the fraction that have U-band S/N >= 1
        if field == 'udf':
            cu = Ftable(
                '/Users/khuang/Dropbox/Research/bivariate/bivariate_fit/udrops_fitting/simcatalogs/run1_udf_130625.fits'
            )
            umag_bins = N.arange(25., 38.5, 0.5)
        else:
            cu = Ftable(
                '/Users/khuang/Dropbox/Research/bivariate/bivariate_fit/udrops_fitting/simcatalogs/run2_tfitsim_130322.fits'
            )
            umag_bins = N.arange(25., 38.5, 0.5)
        umag_in = cu.uvimos_mag_in[cu.uvimos_fitquerr > 0.]
        u_ston = (cu.uvimos_fitqty /
                  cu.uvimos_fitquerr)[cu.uvimos_fitquerr > 0.]
        n1 = N.histogram(umag_in[u_ston >= 1.], umag_bins)[0]
        n2 = N.histogram(umag_in, umag_bins)[0]
        u_detect_frac = n1.astype('float') / N.maximum(1., n2.astype('float'))

        if testumag == False:
            # If not testing anything...
            if redraw == True:
                print "Repeat the drawing process %d times" % n_repeat
                self.vimos_u_mag = uu.draw_umag(N.tile(c.u_mag_in, n_repeat),
                                                self.umag_kpdf,
                                                mag0=mag0)
                self.vimos_u_mag_1sig = uu.draw_ulimmag(
                    n_repeat * len(c.u_mag_in), self.umag_1sig_kpdf)
                # Draw 1-sigma magnitude upper limit
                # Now replace U-band magnitude with 1-sigma upper limit by using
                # the S/N > 1 fraction
                # index_detfrac = (c.u_mag_in - c.u_mag_in.min()) - (c.u_mag_in % 0.5)
                # index_detfrac = N.minimum(len(u_detect_frac)-1, index_detfrac)
                # detfrac = u_detect_frac.take(index_detfrac.astype('int'))
                # # Now randomly draws a reference level to compare with the detection
                # # fraction
                # rdn_level = N.random.uniform(0., 1., size=len(c.d))
                # self.vimos_u_mag = N.where(detfrac>=rdn_level, self.vimos_u_mag,
                #                            self.vimos_u_mag_1sig)
                self.vimos_u_mag = N.minimum(self.vimos_u_mag,
                                             self.vimos_u_mag_1sig)
            else:
                print "Read U-band magnitudes from %s." % drawnflux
                # Read the drawn U-band magnitudes from file drawnflux
                df = cPickle.load(open(drawnflux))
                self.vimos_u_mag_1sig = df.vimos_u_mag_1sig
                self.vimos_u_mag = df.vimos_u_mag
        else:
            self.vimos_u_mag = c.u_mag_in
            self.vimos_u_mag_1sig = c.u_mag_in
        # Now construct S/N criteria
        #self.sncrit = N.ones(len(c.d)*n_repeat,'bool')
        self.sn_lolim_crit = N.ones(len(c.d) * n_repeat, 'bool')
        self.detect = c.detect.copy()
        #print len(self.sncrit), len(self.acs_f435w_sn)
        if len(sn_lolim.keys()) > 0:
            print "sn_lolim", sn_lolim
            for b in sn_lolim.keys():
                self.sn_lolim_crit = self.sn_lolim_crit & \
                   (getattr(self,b+'_sn')>=sn_lolim[b])
                #self.sncrit = self.sncrit & (getattr(self,'%s_sn'%b)>=sn_lolim[b])
        self.sncrit = (self.sn_lolim_crit == True)
        ## Second: calculate dropcrit (do color selection)
        #import select_udrops_uby as suu
        print "Do selections..."
        if field == 'ers':
            self.color1 = self.vimos_u_mag - self.acs_f435w_mag
            if mode == 'uby':
                self.color2 = self.acs_f435w_mag - self.wfc3_f098m_mag
            elif mode == 'ubvy':
                self.color2 = self.acs_f606w_mag - self.wfc3_f098m_mag
            self.lcc.select(self.color1, self.color2)

        else:
            self.color1 = self.vimos_u_mag - self.acs_f435w_mag
            if mode == 'uby':
                self.color2 = self.acs_f435w_mag - self.wfc3_f105w_mag
            elif mode == 'ubvy':
                self.color2 = self.acs_f606w_mag - self.wfc3_f105w_mag
            self.lcc.select(self.color1, self.color2)
        self.colorcrit = self.lcc.crit.copy()
        self.lcc.crit = self.lcc.crit & self.sncrit  # Fold in S/N criteria
        #if field=='ers':
        #   self.udrops = suu.udrops_ubvy098(self.c,
        #      bands=['acs_f435w','acs_f606w','wfc3_f098m','wfc3_f160w'],
        #      sn_lolim={'wfc3_f160w':5.0,'acs_f435w':3.0,'wfc3_f098m':5.0})
        #else:
        #   self.udrops = suu.udrops_ubvy105(self.c,
        #      bands=['acs_f435w','acs_f606w','wfc3_f105w','wfc3_f160w'],
        #      sn_lolim={'wfc3_f160w':5.0,'acs_f435w':3.0,'wfc3_f105w':5.0})
        #dropcrit = self.udrops.crit   # the dropout-selection array
        dropcrit = self.lcc.crit & (N.tile(c.detect, n_repeat) == True)
        #self.dropcrit = dropcrit[:len(c.d)]
        self.dropcrit = dropcrit
        self.detect = c.detect.copy()
        print "Selections done."
        print "Total number of objects in the catalog: %d" % len(c.d)
        print "Total number selected as U-dropouts: %d" % (sum(dropcrit) /
                                                           float(n_repeat))
        ## Third: call make_Pz to calculate P(z)

        if not testdraw:
            # Now invoke zdist.zdgrid.__call__ to calculate P(z)
            zdist.zdgrid.__call__(self,
                                  c,
                                  interpolate=interpolate,
                                  dznew=dznew,
                                  plot_diag=plot_diag,
                                  ktest=ktest,
                                  n_repeat=n_repeat,
                                  expand=expand,
                                  mag_in_col=mag_in_col,
                                  re_in_col=re_in_col)
        if self.n_repeat > 30:
            delattr(self, 'color1')
            delattr(self, 'color2')
            delattr(self, 'sncrit')
コード例 #24
0
    def __init__(self, paramfile, filtername, **kwargs):
        # Additional keyword arguments goes to GalaxySample.
        p = yaml.load(open(paramfile))
        self.filtername = filtername
        # The surveyed comoving volume in each field will be factored into the
        # respective dropout-selection kernels, so no need to include them here.
        self.guess = p['GUESS']
        (self.alpha, self.mstar, self.logr0, self.sigma,
         self.beta) = p['GUESS']
        self.phistar = 0.
        self.fields = p['FIELDS']
        self.z0 = p['Z0']  # nominal redshift of the galaxy sample
        # pixel scales of the image on which the sizes are measured
        self.pixscales = dict(zip(p['FIELDS'], p['PIXSCALES']))
        self.datafiles = dict(zip(p['FIELDS'], p['DATAFILES']))
        self.maglimits = dict(zip(p['FIELDS'], p['MAGLIMITS']))
        self.logrlimits = dict(zip(p['FIELDS'], p['LOGRLIMITS']))
        if p.has_key('SEXMAGLIMITS'):
            self.sexmaglimits = dict(zip(p['FIELDS'], p['SEXMAGLIMITS']))
            self.sex_kgrids = dict(
                zip(p['FIELDS'], map(pu.load_pickle, p['SEX_KGRIDS'])))
        else:
            self.sexmaglimits = dict(
                zip(p['FIELDS'], [None] * len(self.fields)))
            self.sex_kgrids = dict(zip(p['FIELDS'], [None] * len(self.fields)))
        self.galfit_kgrids = dict(
            zip(p['FIELDS'], map(pu.load_pickle, p['GALFIT_KGRIDS'])))
        self.re_err_lim = p['RE_ERR_LIM']
        self.chi2nu_lim = p['CHI2NU_LIM']
        self.technique = p['TECHNIQUE']
        self.magconvert = RLDist.MagConvert(p['MCFILE'])
        self.dropout_kgrids = dict(
            zip(p['FIELDS'], map(pu.load_pickle, p['DROPOUT_KGRIDS'])))
        if p['NPROC_MODEL'] > 0:
            self.nproc_model = p['NPROC_MODEL']
        else:
            self.nproc_model = multiprocessing.cpu_count()
        # interloper fraction parameters
        self.add_interloper = p['ADD_INTERLOPER']
        # self.sdfiles = dict(zip(p['FIELDS'], p['SDFILES']))
        self.size_catalog = p['SIZE_CATALOG']
        self.mag_size_catalog = p['MAG_SIZE_CATALOG']
        self.re_size_catalog = p['RE_SIZE_CATALOG']
        self.intfracfiles = dict(zip(p['FIELDS'], p['INTFRACFILES']))
        self.mag_lolims = dict(zip(p['FIELDS'], p['MAG_LOLIMS']))
        self.goodflags = p['GOOD_VFLAGS']
        if self.add_interloper:
            # Calculate the *multiplicative* interloper fractions in the RL plane
            self.sd = {}
            self.intfrac = {}
            c = Ftable(self.size_catalog)
            mag = getattr(c, self.mag_size_catalog)
            re = getattr(c, self.re_size_catalog)
            for f in self.fields:
                # first calculate the size distribution for all sources
                sd_f = ID.InterloperSizeDist(self.maglimits[f],
                                             self.logrlimits[f], 0.02, 0.02,
                                             0.5, 0.2, f)
                x = sd_f.compute(mag, re)
                self.sd[f] = sd_f
                # then calculate the multiplicative interloper distribution
                intdist = ID.InterloperRLDist(self.maglimits[f],
                                              self.logrlimits[f],
                                              0.02,
                                              0.02,
                                              f,
                                              mag_lolim=self.mag_lolims[f])
                intfrac = Ftable(self.intfracfiles[f])
                x = intdist.compute(intfrac, sd_f)
                self.intfrac[f] = intdist

        ### MCMC parameters?
        ### Define a separate class for MCMC stuff...
        self.galaxy_samples = {}
        self.p = p
        # define galaxy samples
        for f in p['FIELDS']:
            self.galaxy_samples[f] = GalaxySample(
                self.datafiles[f],
                f,
                self.filtername,
                self.z0,
                self.maglimits[f],
                self.logrlimits[f],
                re_err_lim=self.re_err_lim,
                chi2nu_lim=self.chi2nu_lim,
                good_vflags=self.goodflags,
                sexmaglimit=self.sexmaglimits[f],
                **kwargs)
            print "Total number of galaxies in %s used in likelihood calculation: %d" % (
                f, len(self.galaxy_samples[f].mag))
        mag, logr = self.combine_samples(
            flist=self.fields)  # to record attributes self.mag, self.logr
        self.mag = mag
        self.logr = logr
        self.Ntot = len(self.mag)
        # Now builds RLDist factories
        self.RLDist_factories = {}
        factory = RLDist.RLDistributionFactory
        # build RL Distribution factories for each field
        for f in self.fields:
            if self.sexmaglimits[f] == None:
                maglimits = self.maglimits[f]
            else:
                maglimits = [self.maglimits[f][0], self.sexmaglimits[f]]
            self.RLDist_factories[f] = factory(maglimits, self.logrlimits[f],
                                               self.magconvert)
コード例 #25
0
ファイル: plot_RLdist.py プロジェクト: albertfxwang/mypytools
def plot_RLdist_idrops(logR0,
                       beta,
                       cat=catdir +
                       '/idrops/idrops_goodss_130623_vflags_galfit.fits',
                       marker='o',
                       ax=None,
                       ms=8**2,
                       color=pms.Bright_Blue,
                       bootstrap=False,
                       nsamp=10000):
    c = Ftable(cat)
    mcy = bl.mconvert('kcorr/M1500_to_f105w_omega_m_0.3.txt')
    mean_kcorr = mcy(6.0)
    mag_array1 = c.f105w_magout_gf[(c.udf_fit == True)
                                   & (c.f105w_magout_gf <= 27.5)]
    re_array1 = c.f105w_reout_gf[(c.udf_fit == True)
                                 & (c.f105w_magout_gf <= 27.5)]
    n_array1 = c.f105w_nout_gf[(c.udf_fit == True)
                               & (c.f105w_magout_gf <= 27.5)]

    mag_array2 = c.f105w_magout_gf[(c.deep_fit == True)
                                   & (c.f105w_magout_gf <= 26.5)]
    re_array2 = c.f105w_reout_gf[(c.deep_fit == True)
                                 & (c.f105w_magout_gf <= 26.5)]
    n_array2 = c.f105w_nout_gf[(c.deep_fit == True)
                               & (c.f105w_magout_gf <= 26.5)]

    mag_array3 = c.f098m_magout_gf[(c.ers_fit == True)
                                   & (c.f098m_magout_gf <= 26.5)]
    re_array3 = c.f098m_reout_gf[(c.ers_fit == True)
                                 & (c.f098m_magout_gf <= 26.5)]
    n_array3 = c.f098m_nout_gf[(c.ers_fit == True)
                               & (c.f098m_magout_gf <= 26.5)]

    mag_array4 = c.f105w_magout_gf[(c.wide_fit == True)
                                   & (c.f105w_magout_gf <= 26.5)]
    re_array4 = c.f105w_reout_gf[(c.wide_fit == True)
                                 & (c.f105w_magout_gf <= 26.5)]
    n_array4 = c.f105w_nout_gf[(c.wide_fit == True)
                               & (c.f105w_magout_gf <= 26.5)]

    mag_array = np.concatenate(
        [mag_array1, mag_array2, mag_array3, mag_array4])
    re_array = np.concatenate([re_array1, re_array2, re_array3, re_array4])
    n_array = np.concatenate([n_array1, n_array2, n_array3, n_array4])

    plot_RLdist(mag_array,
                re_array,
                'i-dropouts',
                logR0,
                beta,
                mean_kcorr,
                color=color,
                marker=marker,
                ax=ax,
                ms=ms,
                gfband='F105W/F098M')
    if bootstrap == True:
        p_array = bootstrap_error(mag_array, np.log10(re_array), nsamp=nsamp)
        return p_array
    else:
        return 0
コード例 #26
0
    def __init__(self,
                 id2fit,
                 sexcat,
                 sci_image,
                 rms_image,
                 seg_image,
                 sexcatfmt='fits',
                 idcol='id_1',
                 racol='ra_1',
                 deccol='dec_1',
                 magcol='mag_auto',
                 recol='flux_radius_2',
                 axratiocol='axratio',
                 thetacol='theta_image',
                 isoareacol='isoarea_image',
                 root='images',
                 pixscale=0.06,
                 psffile="",
                 constraint_file="constraints3",
                 magzpt=25.96,
                 cbox=160):
        """
        id2fit: array of ID numbers to run GALFIT on.
        sexcat: the SExtractor catalog where the object IDs are specified.
        NOTE: sexcat needs to contain ALL objects in the field, not just the objects to 
        be fit by GALFIT!!
        sci_image: the science (drizzled) mosaic
        rms_image: the RMS image
        seg_image: the segmentation map
        racol: column name that contains RA
        deccol: column name that contains DEC
        magcol: column name that contains magnitude (default to MAG_AUTO)
        recol: column name for Re (half-light radius)
        axratiocol: column name for axis ratio
        thetacol: column name for position angle (theta)
        root: the directory name for all the GALFIT cutouts and output images
        pixscale: the pixel scale in arcsec
        """
        self.id2fit = id2fit
        if sexcatfmt == 'fits':
            self.sexcat = Ftable(sexcat)
            self.nobj_all = len(self.sexcat.d)
        else:
            self.sexcat = sextractor(sexcat)
            self.nobj_all = len(self.sexcat)
        if not os.path.exists(root):
            os.mkdir(root)
        elif not os.path.isdir(root):
            raise OSError, "%s already exists and is not a directory." % root
        self.ra_array = getattr(self.sexcat, racol)
        self.dec_array = getattr(self.sexcat, deccol)
        self.id_array = getattr(self.sexcat, idcol)
        self.mag_array = getattr(self.sexcat, magcol)
        self.Re_array = getattr(self.sexcat, recol)
        self.axratio_array = getattr(self.sexcat, axratiocol)
        self.theta_array = getattr(self.sexcat, thetacol)
        self.isoarea_array = getattr(self.sexcat, isoareacol)

        self.sci_image = sci_image
        self.rms_image = rms_image
        self.seg_image = seg_image
        # figure out the WCS
        hdr = pyfits.getheader(self.sci_image)
        self.wcs = pywcs.WCS(hdr)
        # figure out the X, Y positions of each source... will need later
        skycrd = N.array([self.ra_array, self.dec_array])
        skycrd = skycrd.swapaxes(0, 1)
        pixcrd = self.wcs.wcs_sky2pix(skycrd,
                                      1)  # pixel coordinate starts at 1
        pixcrd = pixcrd.swapaxes(0, 1)
        self.x_array = pixcrd[0]
        self.y_array = pixcrd[1]
        self.crashes = "galfit.crashes"
        self.fitted = "galfit.fitted"
        self.root = root  # a directory containing all the cutouts
        self.xscale = pixscale
        self.yscale = pixscale
        self.psffile = psffile
        self.constraint_file = constraint_file
        self.magzpt = magzpt
        self.cbox = cbox  # convolution box size --- should be comparable to the PSF image size?
コード例 #27
0
 def merge_sexcats(self):
     """
 Assume that files self.sex_catalog exist.
 """
     # Match & merge photometry into one master catalog
     # Note: update all magnitude errors with the scaled flux errors
     merged_columns = []
     for f in self.filters:
         c = Ftable(self.sex_fitstable[f])
         skyrms_factor = self.c['skyrms_factor'][f]
         if f == self.detectband:
             merged_columns += [
                 pyfits.Column(name='number', array=c.number, format='I')
             ]
             merged_columns += [
                 pyfits.Column(name='x_image', array=c.x_image, format='D')
             ]
             merged_columns += [
                 pyfits.Column(name='y_image', array=c.y_image, format='D')
             ]
             merged_columns += [
                 pyfits.Column(name='alpha_j2000',
                               array=c.alpha_j2000,
                               format='D')
             ]
             merged_columns += [
                 pyfits.Column(name='delta_j2000',
                               array=c.delta_j2000,
                               format='D')
             ]
         flux_iso = c.__getitem__('%s_flux_iso' % f)
         fluxerr_iso = c.__getitem__('%s_fluxerr_iso' % f)
         print "Scaling fluxerr_iso..."
         fluxerr_iso_scaled = fluxerr_iso * skyrms_factor
         # sn_iso = flux_iso / fluxerr_iso
         sn_iso = flux_iso / fluxerr_iso_scaled
         flux_auto = c.__getitem__('%s_flux_auto' % f)
         fluxerr_auto = c.__getitem__('%s_fluxerr_auto' % f)
         sn_auto = flux_auto / fluxerr_auto
         print "Scaling fluxerr_auto..."
         fluxerr_auto_scaled = fluxerr_auto * skyrms_factor
         for i in range(len(c.Columns)):
             colname = c.Columns[i]
             if colname in self.fixcols:
                 continue
             else:
                 merged_columns += [
                     pyfits.Column(name=colname,
                                   array=c.__getitem__(colname),
                                   format=c.d.formats[i])
                 ]
                 if colname == '%s_fluxerr_iso' % f:
                     # also add the scaled fluxerr
                     merged_columns += [
                         pyfits.Column(name='%s_fluxerr_iso_scaled' % f,
                                       array=fluxerr_iso_scaled,
                                       format='D')
                     ]
                     magerr_iso_scaled = magerr_from_sn(sn_iso)
                 if colname == '%s_fluxerr_auto' % f:
                     merged_columns += [
                         pyfits.Column(name='%s_fluxerr_auto_scaled' % f,
                                       array=fluxerr_auto_scaled,
                                       format='D')
                     ]
                     magerr_auto_scaled = magerr_from_sn(sn_auto)
                 if colname.startswith('%s_fluxerr_aper' % f):
                     fluxerr_aper_n = getattr(c, colname)
                     print "Scaling %s..." % colname
                     fluxerr_aper_n_scaled = fluxerr_aper_n * skyrms_factor
                     flux_aper_n = getattr(c, '%s_flux_aper' % f)
                     sn_aper = flux_aper_n / fluxerr_aper_n_scaled
                     merged_columns += [
                         pyfits.Column(name='%s_scaled' % colname,
                                       array=fluxerr_aper_n_scaled,
                                       format='D')
                     ]
                 # Now update MAG_ISO: MAG_ISO=99.0 if S/N < 1 and MAGERR_ISO becomes
                 # the 1-sigma magnitude limit; MAG_ISO=-99.0 if object is not detected
                 if colname == '%s_mag_iso' % f:
                     mag_iso = np.where(sn_iso >= 1.,
                                        c.__getitem__('%s_mag_iso' % f),
                                        99.0)
                     mag_iso = np.where(fluxerr_iso == 0, -99.0, mag_iso)
                     merged_columns[-1].array = mag_iso  # update the array
                 if colname == '%s_magerr_iso' % f:
                     magerr_iso = np.where(fluxerr_iso_scaled == 0, 0.,
                                           2.5 * np.log10(1. + 1. / sn_iso))
                     # if S/N <= 1., use the 1-sigma magnitude limit as magerr_iso
                     magerr_iso = np.where(
                         sn_iso >= 1., magerr_iso, self.zeropoints[f] -
                         2.5 * np.log10(fluxerr_iso_scaled))
                     merged_columns[-1].array = magerr_iso
                 # Write the 1-sigma MAG_ISO for ease of color selection
                 if colname == '%s_mag_auto' % f:
                     mag_auto = np.where(sn_auto >= 1.,
                                         c.__getitem__('%s_mag_auto' % f),
                                         99.0)
                     mag_auto = np.where(fluxerr_auto == 0., -99.0,
                                         mag_auto)
                     merged_columns[-1].array = mag_auto
                 if colname == '%s_magerr_auto' % f:
                     magerr_auto = np.where(
                         fluxerr_auto_scaled == 0, 0.,
                         2.5 * np.log10(1. + 1. / sn_auto))
                     magerr_auto = np.where(
                         sn_auto >= 1., magerr_auto, self.zeropoints[f] -
                         2.5 * np.log10(fluxerr_auto_scaled))
         mag_iso_1sig = np.where(sn_iso >= 1.0, mag_iso, magerr_iso)
         merged_columns += [
             pyfits.Column(name='%s_mag_iso_1sig' % f,
                           array=mag_iso_1sig,
                           format='D')
         ]
         mag_auto_1sig = np.where(sn_auto >= 1.0, mag_auto, magerr_auto)
         merged_columns += [
             pyfits.Column(name='%s_mag_auto_1sig' % f,
                           array=mag_auto_1sig,
                           format='D')
         ]
         naper = 0
         flux_aper = c.__getitem__('%s_flux_aper' % f)
         fluxerr_aper = c.__getitem__('%s_fluxerr_aper' % f)
         print "Scaling fluxerr_aper..."
         fluxerr_aper_scaled = fluxerr_aper * skyrms_factor
         # sn_aper = flux_aper / fluxerr_aper
         sn_aper = flux_aper / fluxerr_aper_scaled
         for i in range(len(merged_columns)):
             if merged_columns[i].name == '%s_mag_aper' % f:
                 mag_aper = np.where(sn_aper >= 1.,
                                     c.__getitem__('%s_mag_aper' % f), 99.0)
                 mag_aper = np.where(fluxerr_aper == 0, -99.0, mag_aper)
                 merged_columns[i].array = mag_aper
             if merged_columns[i].name == '%s_magerr_aper' % f:
                 # magerr_aper = np.where(fluxerr_aper==0, 0.,
                 #                        c.__getitem__('%s_magerr_aper'%f))
                 # magerr_aper = np.where(sn_aper>=1., magerr_aper,
                 #                        self.zeropoints[f]-2.5*np.log10(fluxerr_aper))
                 magerr_aper = magerr_from_sn(sn_aper)
                 magerr_aper = np.where(fluxerr_aper_scaled == 0., 0.,
                                        2.5 * np.log10(1. + 1. / sn_aper))
                 # update magerr_aper with 1-sigma magnitude limit
                 magerr_aper = np.where(
                     sn_aper >= 1., magerr_aper, self.zeropoints[f] -
                     2.5 * np.log10(fluxerr_aper_scaled))
                 merged_columns[i].array = magerr_aper
             if merged_columns[i].name.startswith('%s_mag_aper_' % f):
                 naper = merged_columns[i].name.split('_')[-1]
                 naper = int(naper)
                 flux_aper_n = c.__getitem__('%s_flux_aper_%d' % (f, naper))
                 fluxerr_aper_n = c.__getitem__('%s_fluxerr_aper_%d' %
                                                (f, naper))
                 fluxerr_aper_n_scaled = fluxerr_aper_n * skyrms_factor
                 # sn_aper_n = flux_aper_n / fluxerr_aper_n
                 sn_aper_n = flux_aper_n / fluxerr_aper_n_scaled
                 mag_aper_n = np.where(
                     sn_aper_n >= 1.,
                     c.__getitem__('%s_mag_aper_%d' % (f, naper)), 99.0)
                 mag_aper_n = np.where(fluxerr_aper_n == 0, -99.0,
                                       mag_aper_n)
                 merged_columns[i].array = mag_aper_n
             if merged_columns[i].name.startswith('%s_magerr_aper_' % f):
                 naper = merged_columns[i].name.split('_')[-1]
                 naper = int(naper)
                 flux_aper_n = c.__getitem__('%s_flux_aper_%d' % (f, naper))
                 fluxerr_aper_n = c.__getitem__('%s_fluxerr_aper_%d' %
                                                (f, naper))
                 fluxerr_aper_n_scaled = fluxerr_aper_n * skyrms_factor
                 # sn_aper_n = flux_aper_n / fluxerr_aper_n
                 sn_aper_n = flux_aper_n / fluxerr_aper_n_scaled
                 # magerr_aper_n = np.where(fluxerr_aper_n==0, 0.,
                 #                          c.__getitem__('%s_magerr_aper_%d' % (f,naper)))
                 # magerr_aper_n = np.where(sn_aper_n>=1., magerr_aper_n,
                 #                          self.zeropoints[f]-2.5*np.log10(fluxerr_aper_n))
                 magerr_aper_n = np.where(
                     fluxerr_aper_n == 0., 0.,
                     2.5 * np.log10(1. + 1. / sn_aper_n))
                 magerr_aper_n = np.where(
                     sn_aper >= 1., magerr_aper, self.zeropoints[f] -
                     2.5 * np.log10(fluxerr_aper_n_scaled))
                 merged_columns[i].array = magerr_aper_n
         # OK, special treatment for mag_aper_1, which should be 0.4 arcsec
         # diameter aperture
         for i in range(len(merged_columns)):
             if merged_columns[i].name == '%s_mag_aper_1' % f:
                 j_mag_aper_1 = i
             elif merged_columns[i].name == '%s_magerr_aper_1' % f:
                 j_magerr_aper_1 = i
             elif merged_columns[i].name == '%s_mag_aper_3' % f:
                 j_mag_aper_3 = i
             elif merged_columns[i].name == '%s_magerr_aper_3' % f:
                 j_magerr_aper_3 = i
         flux_aper_1 = c.__getitem__('%s_flux_aper_1' % f)
         fluxerr_aper_1 = c.__getitem__('%s_fluxerr_aper_1' % f)
         fluxerr_aper_1_scaled = fluxerr_aper_1 * skyrms_factor
         # sn_aper_1 = flux_aper_1 / fluxerr_aper_1
         sn_aper_1 = flux_aper_1 / fluxerr_aper_1_scaled
         mag_aper_1_1sig = np.where(sn_aper_1 >= 1.,
                                    merged_columns[j_mag_aper_1].array,
                                    merged_columns[j_magerr_aper_1].array)
         merged_columns += [
             pyfits.Column(name='%s_mag_aper_1_1sig' % f,
                           array=mag_aper_1_1sig,
                           format='D')
         ]
         flux_aper_3 = c.__getitem__('%s_flux_aper_3' % f)
         fluxerr_aper_3 = c.__getitem__('%s_fluxerr_aper_3' % f)
         fluxerr_aper_3_scaled = fluxerr_aper_3 * skyrms_factor
         # sn_aper_3 = flux_aper_3 / fluxerr_aper_3
         sn_aper_3 = flux_aper_3 / fluxerr_aper_3_scaled
         mag_aper_3_1sig = np.where(sn_aper_3 >= 1.,
                                    merged_columns[j_mag_aper_3].array,
                                    merged_columns[j_magerr_aper_3].array)
         merged_columns += [
             pyfits.Column(name='%s_mag_aper_3_1sig' % f,
                           array=mag_aper_3_1sig,
                           format='D')
         ]
     print "len(merged_columns)", len(merged_columns)
     assert len(
         merged_columns
     ) <= 999, "Warning: maximum number of columns allowed in a FITS table is 999!"
     coldefs = pyfits.ColDefs(merged_columns)
     tbhdu = pyfits.new_table(coldefs)
     tbhdu.header['PARAMS'] = self.paramfile
     merged_table = os.path.join(self.homedir, self.merged_table)
     print merged_table
     if os.path.exists(merged_table):
         os.remove(merged_table)
     tbhdu.writeto(merged_table)
コード例 #28
0
 def __init__(self, catalog, bands=['H']):
     Ftable.__init__(self, catalog)  # read the FITS table
     self.bands = bands
     self.catalog = catalog