def transform_grid(ACT_fits, optical_fits): import tableio from astropy.io.fits import getheader import numpy header = getheader(ACT_fits) nx = header['NAXIS1'] ny = header['NAXIS2'] (iy, ix) = numpy.indices((ny, nx)) # flatten (x,y) indices arrays and add 1.0 and put in a tmp file x = ix.ravel() + 1 y = iy.ravel() + 1 tableio.put_data('/tmp/xy_file', (x, y), format="%7d %7d") # system call for xy2sky and sky2xy cmd1 = "xy2sky -d %s @/tmp/xy_file > /tmp/radec_file" % ACT_fits cmd2 = "sky2xy %s @/tmp/radec_file > /tmp/xynew_file" % optical_fits os.system(cmd1) os.system(cmd2) # Read in new grid and re-shape (ixnew, iynew) = tableio.get_data('/tmp/xynew_file', (4, 5)) ix_new = ixnew.reshape(ix.shape) - 1. iy_new = iynew.reshape(iy.shape) - 1. # Clean up files os.remove('/tmp/xy_file') os.remove('/tmp/radec_file') os.remove('/tmp/xynew_file') return ix_new, iy_new
def sky2xy_list(ra, dec, fitsfile): import os import tableio inlist = "/tmp/%s_list.sky2xy" % os.environ['USER'] outlist = "/tmp/%s_list.sky2xy.out" % os.environ['USER'] tableio.put_data(inlist, (ra, dec), header='', format="%s %s J2000", append='no') cmd = "sky2xy %s @%s > %s" % (fitsfile, inlist, outlist) os.popen(cmd) (x, y) = tableio.get_data(outlist, cols=(4, 5)) #print cmd os.system("rm %s" % inlist) os.system("rm %s" % outlist) return x, y
# OK. header is now written vars = list(detection_variables) for i in range(ncats): #vars.append((m[i,:])) #vars.append((em[i,:])) vars.append((m_corr[i, :])) vars.append((em_corr[i, :])) vars.append((ap_corr[i, :])) vars.append((m_bpz[i, :])) vars.append((em_bpz[i, :])) variables = tuple(vars) format = '%i\t' + '%4f ' * (len(variables) - 1) self.logfile.write('Writing data to multicolor catalog...') tableio.put_data(self.colorcat, variables, format=format, append='yes') self.outputList[os.path.basename(self.colorcat)] = preds self.logfile.write('Multicolor catalog complete.') return def writeXml(self): """ writeXml marks the multicolor catalog with the pipeline protocol markup. A new requirement has been placed upon this writeXml method to allow certain fields of the catalog file to be excluded from the xml markup. This is done to avoid redundant fields for the same objects being sent to the SDA. See Bugzilla bug #1436. New functionality in this method now has each filter producing it's own photometry catalog. This functionality is implement via the excludeList which allows the method to select different fields to be excluded from the catalog markup. This method
def BuildColorCat(self): # Change accordingly zp_error = 0.05 # The default output names self.colorCat = self.tilename + ".color" self.columnsFile = self.tilename + ".columns" print('Processing catalogs... for: ', self.tilename, file=sys.stderr) flux = {} fluxerr = {} m = {} em = {} # Get the detection catalog required columns outColumns = ['NUMBER', 'X_IMAGE', 'Y_IMAGE'] detCatalog = self.combcat['i'] detcols = SEx_head(detCatalog, verb=None) detectionList = [] for key in outColumns: detectionList.append(detcols[key]) detectionColumns = tuple( detectionList) # the get_data function requires a tuple detection_variables = tableio.get_data(detCatalog, detectionColumns) # Read in the MAG_ISO and MAG_ISOERR from each catalog for filter in self.filters: # Get the columns sexcols = SEx_head(self.combcat[filter], verb=None) ## Info for flux columns fluxList = [] fluxList.append(sexcols['FLUX_ISO']) fluxList.append(sexcols['FLUXERR_ISO']) fluxColumns = tuple( fluxList) # the get_data function interface requires a tuple # Get the array using tableio flux[filter], fluxerr[filter] = tableio.get_data( self.combcat[filter], fluxColumns) m[filter] = flux[filter] * 0.0 em[filter] = flux[filter] * 0.0 # Fix the NAN values flux[filter] = deNAN(flux[filter]) # Those objects with flux equal or less than 0 are assigned a magnitude of 99 # and a limiting magnitude equal to their SExtractor photometric error. This # is interpreted by BPZ as a nondetection with zero flux and 1-sigma error # equal to the limiting magnitude nondetected = Numeric.less_equal( flux[filter], 0.0) * Numeric.greater(fluxerr[filter], 0.0) # Those objects with error flux and flux equal to 0 are assigned a magnitude of -99 # and a flux of 0, which is interpreted by SExtractor as a non-observed object nonobserved = Numeric.less_equal(fluxerr[filter], 0.0) # When flux error > 100*(flux), mark as nonobserved (Benitez, 24-Oct-03). # Fix for fc11 -- y[:] has change meaning #nonobserved = Numeric.where(fluxerr[filter] > 100*(abs(flux[filter])),1.0,nonobserved[:]) nonobserved = Numeric.where( fluxerr[filter] > 100 * (abs(flux[filter])), 1.0, nonobserved) detected = Numeric.logical_not(nonobserved + nondetected) # Get the zero point for the final magnitudes zpoint = self.magbase print(filter, zpoint) flux[filter] = Numeric.clip(flux[filter], 1e-100, 1e100) m[filter] = Numeric.where( detected, -2.5 * Numeric.log10(abs(flux[filter])) + zpoint - self.XCorr[filter], m[filter]) m[filter] = Numeric.where(nondetected, 99.0, m[filter]) m[filter] = Numeric.where(nonobserved, -99.0, m[filter]) # clip values from being too small or large, i.e. 0 or inf. fluxerr[filter] = Numeric.clip(fluxerr[filter], 1e-100, 1e100) em[filter] = Numeric.where( detected, 2.5 * Numeric.log10(1.0 + abs(fluxerr[filter] / flux[filter])) + self.XCorrError[filter], em[filter]) em[filter] = Numeric.where( nondetected, 2.5 * Numeric.log10(abs(fluxerr[filter])) - zpoint, em[filter]) em[filter] = Numeric.where(nonobserved, 0.0, em[filter]) #outColumns.append(filter +'_SDSS_MAG_ISO') #outColumns.append(filter +'_SDSS_MAGERR_ISO') outColumns.append(filter + '_MOSAICII_MAG_ISO') outColumns.append(filter + '_MOSAICII_MAGERR_ISO') # Prepare the header header = \ '## ' + time.ctime() + '\n'+\ '## BPZ Catalog file for Observation: ' + self.tilename + '\n'+\ '## (This file was generated automatically by the BCS Rutgers pipeline)\n##\n' for i in range(len(outColumns)): header = header + '# ' + str(i + 1) + '\t' + outColumns[i] + '\n' # Prepare the data vars = list(detection_variables) for filter in self.filters: vars.append(m[filter]) vars.append(em[filter]) variables = tuple(vars) format = '%i\t %10.2f %10.2f' + '%10.4f ' * (len(variables) - 3) print('Writing data to multicolor catalog...', file=sys.stderr) tableio.put_data(self.colorCat, variables, header=header, format=format, append='no') print('Multicolor catalog complete.', file=sys.stderr) # And now write .columns file cfile = open(self.columnsFile, 'w') cfile.write('## ' + time.ctime() + '\n') cfile.write('## ' + 'BPZ' + ' .columns file for Observation: ' + self.tilename + '\n') cfile.write( '## (This file was generated automatically by the BCS Rutgers pipeline)\n##\n' ) i = len(detection_variables) for filter in self.filters: if filter == 'i': n_mo = str(i + 1) colmag = i + 1 colmagerr = i + 2 cfile.write('%s_MOSAICII\t %s,%s\t AB\t %.2f\t 0.0\n' % (filter, i + 1, i + 2, zp_error)) i = i + 2 cfile.write('M_0\t%s\n' % n_mo) cfile.close() return
def _magFix(self, catalogFile): """This private method receives a path to a catalog file and sifts through the MAGERR field looking for values > 10. It sets the corresponding MAG field = -99 and sets that object's MAGERR field to 0.0. catalogFile is a path not a file object.""" # fillHeader will return a list of tuples where which looks like # # [(1, 'NUMBER'), # (2, 'X_IMAGE'), # (3, 'Y_IMAGE'), # ... # (12, 'MAG_ISOCOR'), # (13, 'MAGERR_ISOCOR'), # (14, 'FLUX_APER', 1) # (15, 'FLUX_APER', 2), # (16, 'FLUX_APER', 3), # ... # ] # # The tuples are either of length 2 or 3. If len is 3, the 3rd item of the # tuple is the nth occurance of that column identifier. This occurs on those # columns of MAGs and MAGERRs for a series of increasingly larger apertures. # newFieldList will be a list of Numeric arrays containing the columns of the catalogs. # This list will contain fields which have not been altered, i.e. all fields other than # MAG_* and MAGERR_*, and the new MAG and MAGERR fields which have been corrected. # Once the list is complete, it is tuple-ized and send to the tableio pu_data function. newFieldList = [] newMagsList = [] newMagErrsList = [] newMagHeaders = [] newMagErrHeaders = [] newHeaders = [] magCols = [] magErrCols = [] selectSet = fillHeader(catalogFile) print "Searching catalog for required columns, MAG, MAGERR" for i in range(len(selectSet)): if len(selectSet[i]) == 2: column, name = selectSet[i] paramNames = name.split("_") if "MAG" in paramNames: magCols.append((column, name)) elif "MAGERR" in paramNames: magErrCols.append((column, name)) else: oldField = tableio.get_data(catalogFile, (column - 1)) newFieldList.append(oldField) newHeaders.append(name) continue else: column, name, id = selectSet[i] paramNames = name.split("_") if "MAG" in paramNames: magCols.append((column, name, id)) elif "MAGERR" in paramNames: magErrCols.append((column, name, id)) else: oldField = tableio.get_data(catalogFile, (column - 1)) newFieldList.append(oldField) newHeaders.append(name) continue # We now have # catalog field --> list # -------------------------------- # MAG_* --> magCols # MAGERR_* --> magErrCols # # The algorithm will be to step through the magErrCols columns, extracting those fields # via get_data and getting Numeric arrays. The matching mag columns are slurped as well. # We search the magErrCols arrays looking for >= 10 values and then marking the those mags # as -99.0 and the matching magerrs as 0.0 # See Bugzilla bug #2700 for item in magErrCols: magErrAperId = None # item may be of len 2 or 3 if len(item) == 2: magErrColId, magErrColName = item else: magErrColId, magErrColName, magErrAperId = item magErrKind = magErrColName.split("_")[1] # ISO, ISOCORR, etc. print "\n\nMAG type:", magErrKind if magErrAperId: print magErrColName, "Aper id is", magErrAperId print "Getting\t", magErrColName, "\tfield", magErrColId # MAGERR array: magErrs = tableio.get_data(catalogFile, magErrColId - 1) matchingMagColName = None matchingMagColId = None #----------------------- Search for matching MAG_* field -----------------------# for magitems in magCols: # We know that the magErrColName is MAGERR and if magErrNameId is true then # the tuple is of len 3, i.e. a MAGERR_APER field. We look for the matching # MAG_APER field id, 1, 2, 3... etc. if len(magitems) == 3: magColId, magColName, magAperId = magitems if magColName == "MAG_" + magErrKind: matchingMagColName = magColName #print "Found matching field type:",magColName,"in field",magColId if magAperId == magErrAperId: print "Found matching aperture id." print "MAG_APER id: ", magAperId, "MAGERR_APER id: ", magErrAperId matchingMagColId = magColId matchingMags = tableio.get_data( catalogFile, magColId - 1) break else: continue else: magColId, magColName = magitems if magColName == "MAG_" + magErrKind: print "Found matching field type:", magColName, "in field", magColId matchingMagColName = magColName matchingMagColId = magColId matchingMags = tableio.get_data( catalogFile, magColId - 1) break else: continue #--------------------------------------------------------------------------------# print " MAG err field:", magErrColName, magErrColId print " Mag field:", matchingMagColName, matchingMagColId # Now the grunt work on the arrays, # magErrs, matchingMags # # update: flagging all MAGs as -99 when the corresponding MAGERR > 10 # introduced a bug which unintentionally reset the magnitudes # SExtractor had flagged with a MAG = 99.0 and a MAGERR = 99.0 # This now checks for a MAGERR of 99 and does not reset the MAG value # if MAGERR = 99.0 but does for all other MAGERRS > 10.0 badMagErrs1 = Numeric.where(magErrs >= 10, 1, 0) badMagErrs2 = Numeric.where(magErrs != 99.0, 1, 0) badMagErrs = badMagErrs1 * badMagErrs2 del badMagErrs1, badMagErrs2 newMags = Numeric.where(badMagErrs, -99.0, matchingMags) newMagErrs = Numeric.where(badMagErrs, 0.0, magErrs) newMagsList.append(newMags) newMagHeaders.append(matchingMagColName) newMagErrsList.append(newMagErrs) newMagErrHeaders.append(magErrColName) # concatenate the lists. This is done to preserve the MAG_APER and MAGERR_APER # grouping of the original SExtractor catalog. newFieldList = newFieldList + newMagsList newFieldList = newFieldList + newMagErrsList newHeaders = newHeaders + newMagHeaders newHeaders = newHeaders + newMagErrHeaders newVariables = tuple(newFieldList) # rename the old catalog file as catalogFile.old os.rename(catalogFile, catalogFile + ".old") self.outputList[os.path.basename(catalogFile) + ".old"] = [os.path.basename(catalogFile)] fob = open(catalogFile, 'w') fob.write("## " + ptime() + "\n") fob.write("## " + self.modName + " catalog regenerated by _magFix method.\n") fob.write( '## (This file was generated automatically by the ACS Pipeline.)\n##\n' ) fob.write( "## This catalog has been photometrically corrected to remove\n") fob.write("## 'bad' magnitude values.\n") fob.write("##\n") for i in range(len(newHeaders)): fob.write("# " + str(i + 1) + "\t" + newHeaders[i] + "\n") fob.close() tableio.put_data(catalogFile, newVariables, append="yes") return
def BuildColorCat(tilename, combcat, filters=['g', 'r', 'i', 'z', 'K'], newfirm=True): # The default output names colorCat = tilename + "_complete.catalog" print('Processing catalogs... for: ', tilename, file=sys.stderr) flux = {} fluxerr = {} m = {} em = {} # Get the detection catalog required columns outColumns = ['NUMBER', 'X_IMAGE', 'Y_IMAGE'] detCatalog = combcat['i'] detcols = SEx_head(detCatalog, verb=None) detectionList = [] for key in outColumns: detectionList.append(detcols[key]) # the get_data function requires a tuple detectionColumns = tuple(detectionList) detection_variables = tableio.get_data(detCatalog, detectionColumns) # Read in the MAG_ISO and MAG_ISOERR from each catalog for filter in filters: if not newfirm and filter == 'K': continue # get the zeropoint Info tmp = np.genfromtxt('photometry_control_star_{}.dat'.format( filter), names=True, dtype=None) zpoint = tmp['ZP'] # Get the columns sexcols = SEx_head(combcat[filter], verb=None) ## Info for flux columns fluxList = [] fluxList.append(sexcols['FLUX_ISO']) fluxList.append(sexcols['FLUXERR_ISO']) fluxColumns = tuple( fluxList) # the get_data function interface requires a tuple # Get the array using tableio flux[filter], fluxerr[filter] = tableio.get_data(combcat[filter], fluxColumns) m[filter] = flux[filter] * 0.0 em[filter] = flux[filter] * 0.0 # Fix the NAN values flux[filter] = deNAN(flux[filter]) # Those objects with flux equal or less than 0 are assigned a # magnitude of 99 and a limiting magnitude equal to their # SExtractor photometric error. This is interpreted by BPZ as a # nondetection with zero flux and 1-sigma error equal to the # limiting magnitude #nondetected = np.less_equal(flux[filter], 0.0) * \ # np.greater(fluxerr[filter], 0.0) # update: There are a lot of really small positive values. I am # going to modify this to look for values really close to zero. nondetected = (flux[filter] < 1E-3) & (fluxerr[filter] > 0.0) # Those objects with error flux and flux equal to 0 are assigned a # magnitude of -99 # and a flux of 0, which is interpreted by SExtractor as a # non-observed object nonobserved = np.less_equal(fluxerr[filter], 0.0) # When flux error > 100*(flux), mark as nonobserved (Benitez, # 24-Oct-03). nonobserved = np.where(fluxerr[filter] > 100 * (abs(flux[filter])), True, nonobserved) detected = np.logical_not(nonobserved + nondetected) print(filter, zpoint) flux[filter] = np.clip(flux[filter], 1e-100, 1e100) m[filter] = np.where(detected, -2.5 * np.log10(abs(flux[filter])) + zpoint, m[filter]) m[filter] = np.where(nondetected, 99.0, m[filter]) m[filter] = np.where(nonobserved, -99.0, m[filter]) # clip values from being too small or large, i.e. 0 or inf. fluxerr[filter] = np.clip(fluxerr[filter], 1e-100, 1e100) em[filter] = np.where( detected, 2.5 * np.log10(1.0 + abs(fluxerr[filter] / flux[filter])), em[filter]) em[filter] = np.where( nondetected, 2.5 * np.log10(abs(fluxerr[filter])) - zpoint, em[filter]) em[filter] = np.where(nonobserved, 0.0, em[filter]) if filter == 'K': outColumns.append(filter + '_KittPeak_MAG_ISO') outColumns.append(filter + '_KittPeak_MAGERR_ISO') else: outColumns.append(filter + '_MOSAICII_MAG_ISO') outColumns.append(filter + '_MOSAICII_MAGERR_ISO') # Prepare the header header = \ '## ' + '\n' + \ '## BPZ Catalog file for Observation: ' + tilename + \ '\n' + \ '## (This file was generated automatically by' + \ 'the BCS Rutgers pipeline)\n##\n' for i in range(len(outColumns)): header = header + '# ' + str(i + 1) + '\t' + outColumns[i] + '\n' # Prepare the data vars = list(detection_variables) for filter in filters: if not newfirm and filter == 'K': continue vars.append(m[filter]) vars.append(em[filter]) variables = tuple(vars) format = '%i\t %10.2f %10.2f' + '%10.4f ' * (len(variables) - 3) print('Writing data to multicolor catalog...', file=sys.stderr) tableio.put_data(colorCat, variables, header=header, format=format, append='no') print('Multicolor catalog complete.', file=sys.stderr) return