def defineSchema(self, refSchema): self.mapper = SchemaMapper(refSchema) self.mapper.addMinimalSchema(SourceCatalog.Table.makeMinimalSchema(), True) schema = self.mapper.getOutputSchema() self.mKey = schema.addField("m", doc="template m", type="ArrayF", size=self.bfd.BFDConfig.MXYSIZE) self.dmKey = schema.addField("dm", doc="template m", type="ArrayF", size=self.bfd.BFDConfig.MSIZE * self.bfd.BFDConfig.DSIZE) self.dxyKey = schema.addField("dxy", doc="template m", type="ArrayF", size=self.bfd.BFDConfig.XYSIZE * self.bfd.BFDConfig.DSIZE) self.ndaKey = schema.addField("nda", doc="nda", type=np.float) self.idKey = schema.addField("bfd_id", doc="id", type=np.int64) if self.config.zFile: self.zKey = schema.addField("z", doc="redshift", type=np.float) # self.zIdKey = schema.addField("z_id", doc="redshift", type=np.int64) return schema
def combineWithForce(meas, force): """Combine the meas and forced_src catalogs.""" if len(meas) != len(force): raise Exception("# Meas and Forced_src catalogs should have " + "the same size!") mapper = SchemaMapper(meas.schema) mapper.addMinimalSchema(meas.schema) newSchema = mapper.getOutputSchema() # Add new fields newSchema.addField('force.deblend.nchild', type=int) newSchema.addField('force.classification.extendedness', type=float) newSchema.addField('force.flux.kron', type=float) newSchema.addField('force.flux.kron.err', type=float) newSchema.addField('force.flux.psf', type=float) newSchema.addField('force.flux.psf.err', type=float) newSchema.addField('force.flux.kron.apcorr', type=float) newSchema.addField('force.flux.kron.apcorr.err', type=float) newSchema.addField('force.flux.psf.apcorr', type=float) newSchema.addField('force.flux.psf.apcorr.err', type=float) newSchema.addField('force.cmodel.flux', type=float) newSchema.addField('force.cmodel.flux.err', type=float) newSchema.addField('force.cmodel.fracDev', type=float) newSchema.addField('force.cmodel.exp.flux', type=float) newSchema.addField('force.cmodel.exp.flux.err', type=float) newSchema.addField('force.cmodel.dev.flux', type=float) newSchema.addField('force.cmodel.dev.flux.err', type=float) newSchema.addField('force.cmodel.flux.apcorr', type=float) newSchema.addField('force.cmodel.flux.apcorr.err', type=float) newSchema.addField('force.cmodel.exp.flux.apcorr', type=float) newSchema.addField('force.cmodel.exp.flux.apcorr.err', type=float) newSchema.addField('force.cmodel.dev.flux.apcorr', type=float) newSchema.addField('force.cmodel.dev.flux.apcorr.err', type=float) newCols = ['deblend.nchild', 'classification.extendedness', 'flux.kron', 'flux.kron.err', 'flux.psf', 'flux.psf.err', 'flux.kron.apcorr', 'flux.kron.apcorr.err', 'flux.psf.apcorr', 'flux.psf.apcorr.err', 'cmodel.flux', 'cmodel.flux.err', 'cmodel.flux', 'cmodel.flux.err', 'cmodel.flux.apcorr', 'cmodel.flux.apcorr.err', 'cmodel.exp.flux', 'cmodel.exp.flux.err', 'cmodel.exp.flux.apcorr', 'cmodel.exp.flux.apcorr.err', 'cmodel.dev.flux', 'cmodel.dev.flux.err', 'cmodel.dev.flux.apcorr', 'cmodel.dev.flux.apcorr.err', 'cmodel.fracDev'] combSrc = SourceCatalog(newSchema) combSrc.extend(meas, mapper=mapper) for key in newCols: combSrc['force.' + key][:] = force[key][:] for name in ("Centroid", "Shape"): val = getattr(meas.table, "get" + name + "Key")() err = getattr(meas.table, "get" + name + "ErrKey")() flag = getattr(meas.table, "get" + name + "FlagKey")() getattr(combSrc.table, "define" + name)(val, err, flag) return combSrc
def create_source_catalog_from_text_and_butler(repo_dir, info, dataset='src'): butler = dafPersistence.Butler(repo_dir) schema = butler.get(dataset + "_schema", immediate=True).schema mapper = SchemaMapper(schema) mapper.addMinimalSchema(schema) newSchema = mapper.getOutputSchema() src_cat = SourceCatalog(newSchema) for row in info: record = src_cat.addNew() record.set('coord_ra', Angle(row['RA']*degrees)) record.set('coord_dec', Angle(row['Dec']*degrees)) print(src_cat['coord_ra'], src_cat['coord_dec']) return(src_cat)
def defineSchema(self, refSchema): self.mapper = SchemaMapper(refSchema) self.mapper.addMinimalSchema(SourceCatalog.Table.makeMinimalSchema(), True) schema = self.mapper.getOutputSchema() self.even = schema.addField('bfd_even', type="ArrayF", size=self.n_even, doc="Even Bfd moments") self.odd = schema.addField('bfd_odd', type="ArrayF", size=self.n_odd, doc="odd moments") self.shift = schema.addField('bfd_shift', type="ArrayF", size=2, doc="amount shifted to null moments") self.cov_even = schema.addField('bfd_cov_even', type="ArrayF", size=self.n_even*(self.n_even+1)//2, doc="even moment covariance matrix") self.cov_odd = schema.addField('bfd_cov_odd', type="ArrayF", size=self.n_odd*(self.n_odd+1)//2, doc="odd moment covariance matrix") self.flag = schema.addField('bfd_flag', type="Flag", doc="Set to 1 for any fatal failure") self.centroid_flag = schema.addField('bfd_flag_centroid', type="Flag", doc="Set to 1 for any fatal failure of centroid") self.parent_flag = schema.addField('bfd_flag_parent', type="Flag", doc="Set to 1 for parents") if self.config.add_single_bands: self.filter_keys = defaultdict(dict) self.n_even_single = self.n_even - len(self.config.filters) + 1 self.n_odd_single = self.n_odd for band in self.config.filters: self.filter_keys[band]['even'] = schema.addField(f'bfd_even_{band}', type="ArrayF", size=self.n_even_single, doc=f"Even Bfd moments for filter {band}") self.filter_keys[band]['odd'] = schema.addField(f'bfd_odd_{band}', type="ArrayF", size=self.n_odd_single, doc=f"Odd Bfd moments for filter {band}") self.filter_keys[band]['cov_even'] = schema.addField(f'bfd_cov_even_{band}', type="ArrayF", size=self.n_even_single*(self.n_even_single+1)//2, doc=f"even moment covariance matrix in filter {band}") self.filter_keys[band]['cov_odd'] = schema.addField(f'bfd_cov_odd_{band}', type="ArrayF", size=self.n_odd_single*(self.n_odd_single+1)//2, doc=f"odd moment covariance matrix in filter {band}") return schema
def defineSchema(self, refSchema): self.mapper = SchemaMapper(refSchema) self.mapper.addMinimalSchema(SourceCatalog.Table.makeMinimalSchema(), True) schema = self.mapper.getOutputSchema() self.even = schema.addField('bfd_even', type="ArrayF", size=self.n_even, doc="Even Bfd moments") self.odd = schema.addField('bfd_odd', type="ArrayF", size=self.n_odd, doc="odd moments") self.shift = schema.addField('bfd_shift', type="ArrayF", size=2, doc="amount shifted to null moments") self.cov_even = schema.addField('bfd_cov_even', type="ArrayF", size=self.n_even * (self.n_even + 1) // 2, doc="even moment covariance matrix") self.cov_odd = schema.addField('bfd_cov_odd', type="ArrayF", size=self.n_odd * (self.n_odd + 1) // 2, doc="odd moment covariance matrix") self.flag = schema.addField('bfd_flag', type="Flag", doc="Set to 1 for any fatal failure") self.centroid_flag = schema.addField( 'bfd_flag_centroid', type="Flag", doc="Set to 1 for any fatal failure of centroid") self.parent_flag = schema.addField('bfd_flag_parent', type="Flag", doc="Set to 1 for parents") return schema
def defineSchema(self, refSchema): self.mapper = SchemaMapper(refSchema) self.mapper.addMinimalSchema(SourceCatalog.Table.makeMinimalSchema(), True) schema = self.mapper.getOutputSchema() self.pqrKey = schema.addField("pqr", doc="pqr", type="ArrayF", size=self.bfd.BFDConfig.DSIZE) self.momKey = schema.addField("moment", doc="moment", type="ArrayF", size=self.n_even) self.momCovKey = schema.addField("moment_cov", doc="moment", type="ArrayF", size=self.n_even*(self.n_even+1)//2) self.numKey = schema.addField("n_templates", doc="number", type=np.int64) self.uniqKey = schema.addField("n_unique", doc="unique", type=np.int32) self.zKey = schema.addField("z", doc="redshift", type=np.float) self.g1Key = schema.addField("g1", doc="redshift", type=np.float) self.g2Key = schema.addField("g2", doc="redshift", type=np.float) self.kappaKey = schema.addField("kappa", doc="redshift", type=np.float) self.magKey = schema.addField("mag", doc="redshift", type=np.float) self.labelKey = schema.addField("label", doc="redshift", type=str, size=10) # self.zIdKey = schema.addField("z_id", doc="redshift", type=np.int64) return schema
def getFakeSources(butler, dataId, tol=1.0, extraCols=('zeropoint', 'visit', 'ccd'), includeMissing=False, footprints=False, radecMatch=None, multiband=False, reffMatch=False, pix=0.168, minRad=None, raCol='RA', decCol='Dec'): """ Get list of sources which agree in pixel position with fake ones with tol. This returns a sourceCatalog of all the matched fake objects, note, there will be duplicates in this list, since I haven't checked deblend.nchild, and I'm only doing a tolerance match, which could include extra sources The outputs can include extraCols as long as they are one of: zeropoint, visit, ccd, thetaNorth, pixelScale If includeMissing is true, then the pipeline looks at the fake sources added in the header and includes an entry in the table for sources without any measurements, specifically the 'id' column will be 0 radecMatch is the fakes table. if it's not None(default), then do an ra/dec match with the input catalog instead of looking in the header for where the sources where added """ coaddData = "deepCoadd_calexp" coaddMeta = "deepCoadd_calexp_md" availExtras = { 'zeropoint': { 'type': float, 'doc': 'zeropoint' }, 'visit': { 'type': int, 'doc': 'visit id' }, 'ccd': { 'type': int, 'doc': 'ccd id' }, 'thetaNorth': { 'type': lsst.afw.geom.Angle, 'doc': 'angle to north' }, 'pixelScale': { 'type': float, 'doc': 'pixelscale in arcsec/pixel' } } if not np.in1d(extraCols, list(availExtras.keys())).all(): print("extraCols must be in ", availExtras) try: if 'filter' not in dataId: sources = butler.get('src', dataId, flags=lsst.afw.table.SOURCE_IO_NO_FOOTPRINTS, immediate=True) cal = butler.get('calexp', dataId, immediate=True) cal_md = butler.get('calexp_md', dataId, immediate=True) else: meas = butler.get('deepCoadd_meas', dataId, flags=NO_FOOTPRINT, immediate=True) force = butler.get('deepCoadd_forced_src', dataId, flags=NO_FOOTPRINT, immediate=True) sources = combineWithForce(meas, force) cal = butler.get(coaddData, dataId, immediate=True) cal_md = butler.get(coaddMeta, dataId, immediate=True) except RuntimeError: print("skipping", dataId) return None if ('pixelScale' in extraCols) or ('thetaNorth' in extraCols): wcs = cal.getWcs() availExtras['pixelScale']['value'] = wcs.getPixelScale().asArcseconds() # The 8 lines of code below find the angle to north, first the mid pixel of the calexp is found, # then the pixel to sky matrix at this point, the coordinate this gives can then be used to find the # linearized sky to pixel matrix which can then be used to find the angle. xMid = cal.getWidth() // 2 yMid = cal.getHeight() // 2 midPoint = lsst.afw.geom.Point2D(xMid, yMid) midCoord = wcs.pixelToSky(midPoint) northSkyToPixelMatrix = wcs.linearizeSkyToPixel( midCoord, lsst.afw.geom.degrees) northSkyToPixelMatrix = northSkyToPixelMatrix.getLinear() availExtras['thetaNorth']['value'] = (np.arctan2( *tuple(northSkyToPixelMatrix(lsst.afw.geom.Point2D(1.0, 0.0)))) ) * lsst.afw.geom.radians if 'visit' in extraCols: availExtras['visit']['value'] = dataId['visit'] if 'ccd' in extraCols: availExtras['ccd']['value'] = dataId['ccd'] if 'zeropoint' in extraCols: zeropoint = 2.5 * np.log10(cal_md.getScalar('FLUXMAG0')) availExtras['zeropoint']['value'] = zeropoint if radecMatch is None: fakeXY, srcIndex = getFakeMatchesHeader(cal_md, sources, tol=tol) else: if minRad is not None: print("# The min matching radius is %4.1f pixel" % minRad) bbox = lsst.afw.geom.Box2D(cal.getBBox(lsst.afw.image.PARENT)) fakeXY, srcIndex, srcClose = getFakeMatchesRaDec(sources, radecMatch, bbox, cal.getWcs(), tol=tol, reffMatch=reffMatch, pix=pix, minRad=minRad, raCol=raCol, decCol=decCol) mapper = SchemaMapper(sources.schema) mapper.addMinimalSchema(sources.schema) newSchema = mapper.getOutputSchema() newSchema.addField('fakeId', type=np.int32, doc='id of fake source matched to position') newSchema.addField('nMatched', type=np.int32, doc='Number of matched objects') newSchema.addField('nPrimary', type=np.int32, doc='Number of unique matched objects') newSchema.addField('nNoChild', type=np.int32, doc='Number of matched objects with nchild==0') newSchema.addField('rMatched', type=float, doc='Radius used form atching obects, in pixel') newSchema.addField('fakeOffX', type=float, doc='offset from input fake position in X (pixels)') newSchema.addField('fakeOffY', type=float, doc='offset from input fake position in Y (pixels)') newSchema.addField('fakeOffR', type=float, doc='offset from input fake position in radius') newSchema.addField('fakeClosest', type="Flag", doc='Is this match the closest one?') for extraName in set(extraCols).intersection(availExtras): newSchema.addField(extraName, type=availExtras[extraName]['type'], doc=availExtras[extraName]['doc']) srcList = SourceCatalog(newSchema) srcList.reserve( sum([len(s) for s in srcIndex.values()]) + (0 if not includeMissing else list(srcIndex.values()).count([]))) centroidKey = sources.getCentroidKey() isPrimary = sources.schema.find('detect_isPrimary').getKey() nChild = sources.schema.find('force_deblend_nChild').getKey() for ident, sindlist in srcIndex.items(): rMatched = fakeXY[ident][2] if minRad is not None: if rMatched < minRad: rMatched = minRad nMatched = len(sindlist) nPrimary = np.sum( [sources[int(obj)].get(isPrimary) for obj in sindlist]) nNoChild = np.sum([(sources[int(obj)].get(nChild) == 0) for obj in sindlist]) if includeMissing and (nMatched == 0): newRec = srcList.addNew() newRec.set('fakeId', ident) newRec.set('id', 0) newRec.set('nMatched', 0) newRec.set('rMatched', rMatched) for ss in sindlist: newRec = srcList.addNew() newRec.assign(sources[int(ss)], mapper) newRec.set('fakeId', ident) newRec.set('nMatched', nMatched) newRec.set('nPrimary', nPrimary) newRec.set('nNoChild', nNoChild) newRec.set('rMatched', rMatched) offsetX = (sources[int(ss)].get(centroidKey).getX() - fakeXY[ident][0]) newRec.set('fakeOffX', offsetX) offsetY = (sources[int(ss)].get(centroidKey).getY() - fakeXY[ident][1]) newRec.set('fakeOffY', offsetY) newRec.set('fakeOffR', np.sqrt(offsetX**2.0 + offsetY**2.0)) if radecMatch: if int(ss) == int(srcClose[ident]): newRec.set('fakeClosest', True) else: newRec.set('fakeClosest', False) if includeMissing: srcList = srcList.copy(deep=True) for extraName in set(extraCols).intersection(availExtras): tempCol = srcList.get(extraName) tempCol.fill(availExtras[extraName]['value']) return srcList
def combineWithForce(meas, force): """Combine the meas and forced_src catalogs.""" if len(meas) != len(force): raise Exception("# Meas and Forced_src catalogs should have " "the same size!") mapper = SchemaMapper(meas.schema) mapper.addMinimalSchema(meas.schema) newSchema = mapper.getOutputSchema() # Add new fields newSchema.addField('force_deblend_nChild', type=np.int32) newSchema.addField('force_base_ClassificationExtendedness_value', type=float) newSchema.addField('force_ext_photometryKron_KronFlux_instFlux', type=float) newSchema.addField('force_ext_photometryKron_KronFlux_instFluxErr', type=float) newSchema.addField('force_base_PsfFlux_instFlux', type=float) newSchema.addField('force_base_PsfFlux_instFluxErr', type=float) newSchema.addField('force_ext_photometryKron_KronFlux_apCorr', type=float) newSchema.addField('force_ext_photometryKron_KronFlux_apCorrErr', type=float) newSchema.addField('force_base_PsfFlux_apCorr', type=float) newSchema.addField('force_base_PsfFlux_apCorrErr', type=float) newSchema.addField('force_modelfit_CModel_instFlux', type=float) newSchema.addField('force_modelfit_CModel_instFluxErr', type=float) newSchema.addField('force_modelfit_CModel_fracDev', type=float) newSchema.addField('force_modelfit_CModel_exp_instFlux', type=float) newSchema.addField('force_modelfit_CModel_exp_instFluxErr', type=float) newSchema.addField('force_modelfit_CModel_dev_instFlux', type=float) newSchema.addField('force_modelfit_CModel_dev_instFluxErr', type=float) newSchema.addField('force_modelfit_CModel_apCorr', type=float) newSchema.addField('force_modelfit_CModel_apCorrErr', type=float) newSchema.addField('force_modelfit_CModel_exp_apCorr', type=float) newSchema.addField('force_modelfit_CModel_exp_apCorrErr', type=float) newSchema.addField('force_modelfit_CModel_dev_apCorr', type=float) newSchema.addField('force_modelfit_CModel_dev_apCorrErr', type=float) newCols = [ 'deblend_nChild', 'base_ClassificationExtendedness_value', 'ext_photometryKron_KronFlux_instFlux', 'ext_photometryKron_KronFlux_instFluxErr', 'base_PsfFlux_instFlux', 'base_PsfFlux_instFluxErr', 'ext_photometryKron_KronFlux_apCorr', 'ext_photometryKron_KronFlux_apCorrErr', 'base_PsfFlux_apCorr', 'base_PsfFlux_apCorrErr', 'modelfit_CModel_instFlux', 'modelfit_CModel_instFluxErr', 'modelfit_CModel_exp_apCorr', 'modelfit_CModel_exp_apCorrErr', 'modelfit_CModel_exp_instFlux', 'modelfit_CModel_exp_instFlux', 'modelfit_CModel_exp_apCorr', 'modelfit_CModel_exp_apCorrErr', 'modelfit_CModel_dev_instFlux', 'modelfit_CModel_dev_instFluxErr', 'modelfit_CModel_dev_apCorr', 'modelfit_CModel_dev_apCorrErr', 'modelfit_CModel_fracDev' ] measAlias = meas.schema.getAliasMap() newAlias = newSchema.getAliasMap() for aliasKey in measAlias.keys(): newAlias.set(aliasKey, measAlias[aliasKey]) combSrc = SourceCatalog(newSchema) combSrc.extend(meas, mapper=mapper) for key in newCols: combSrc['force_' + key][:] = force[key][:] return combSrc
def getGalaxy(rootdir, visit, ccd, tol): """Get list of sources which agree in position with fake ones with tol """ # Call the butler butler = dafPersist.Butler(rootdir) dataId = {'visit': visit, 'ccd': ccd} tol = float(tol) # Get the source catalog and metadata sources = butler.get('src', dataId) cal_md = butler.get('calexp_md', dataId) # Get the X, Y locations of objects on the CCD srcX, srcY = sources.getX(), sources.getY() # Get the zeropoint zeropoint = (2.5 * np.log10(cal_md.getScalar("FLUXMAG0"))) # Get the parent ID parentID = sources.get('parent') # Check the star/galaxy separation extendClass = sources.get('classification.extendedness') # Get the nChild nChild = sources.get('deblend.nchild') # For Galaxies: Get these parameters # 1. Get the Kron flux and its error fluxKron, ferrKron = sources.get('flux.kron'), sources.get('flux.kron.err') magKron = (zeropoint - 2.5 * np.log10(fluxKron)) merrKron = (2.5 / np.log(10) * (ferrKron / fluxKron)) # X, Y locations of the fake galaxies fakeList = collections.defaultdict(tuple) # Regular Expression # Search for keywords like FAKE12 fakename = re.compile('FAKE([0-9]+)') # Go through all the keywords counts = 0 for card in cal_md.names(): # To see if the card matches the pattern m = fakename.match(card) if m is not None: # Get the X,Y location for fake object x, y = list(map(float, (cal_md.getScalar(card)).split(','))) # Get the ID or index of the fake object fakeID = int(m.group(1)) fakeList[counts] = [fakeID, x, y] counts += 1 # Match the fake object to the source list srcIndex = collections.defaultdict(list) for fid, fcoord in fakeList.items(): separation = np.sqrt(np.abs(srcX-fcoord[1])**2 + np.abs(srcY-fcoord[2])**2) matched = (separation <= tol) matchId = np.where(matched)[0] matchSp = separation[matchId] sortId = [matchId for (matchSp, matchId) in sorted(zip(matchSp, matchId))] # DEBUG: # print fid, fcoord, matchId # print sortId, sorted(matchSp), matchId # Select the index of all matched object srcIndex[fid] = sortId # Return the source list mapper = SchemaMapper(sources.schema) mapper.addMinimalSchema(sources.schema) newSchema = mapper.getOutputSchema() newSchema.addField('fakeId', type=int, doc='id of fake source matched to position') srcList = SourceCatalog(newSchema) srcList.reserve(sum([len(s) for s in srcIndex.values()])) # Return a list of interesting parameters srcParam = [] nFake = 0 for matchIndex in srcIndex.values(): # Check if there is a match if len(matchIndex) > 0: # Only select the one with the smallest separation # TODO: actually get the one with minimum separation ss = matchIndex[0] fakeObj = fakeList[nFake] diffX = srcX[ss] - fakeObj[1] diffY = srcY[ss] - fakeObj[2] paramList = (fakeObj[0], fakeObj[1], fakeObj[2], magKron[ss], merrKron[ss], diffX, diffY, parentID[ss], nChild[ss], extendClass[ss]) srcParam.append(paramList) else: fakeObj = fakeList[nFake] paramList = (fakeObj[0], fakeObj[1], fakeObj[2], 0, 0, -1, -1, -1, -1, -1) srcParam.append(paramList) # Go to another fake object nFake += 1 # Make a numpy record array srcParam = np.array(srcParam, dtype=[('fakeID', int), ('fakeX', float), ('fakeY', float), ('magKron', float), ('errKron', float), ('diffX', float), ('diffY', float), ('parentID', int), ('nChild', int), ('extendClass', float)]) return srcIndex, srcParam, srcList, zeropoint
def _loadAndMatchCatalogs(self, repo, dataIds, matchRadius): """Load data from specific visit. Match with reference. Parameters ---------- repo : string The repository. This is generally the directory on disk that contains the repository and mapper. dataIds : list of dict List of `butler` data IDs of Image catalogs to compare to reference. The `calexp` cpixel image is needed for the photometric calibration. matchRadius : afwGeom.Angle(), optional Radius for matching. Default is 1 arcsecond. Returns ------- afw.table.GroupView An object of matched catalog. """ # Following # https://github.com/lsst/afw/blob/tickets/DM-3896/examples/repeatability.ipynb butler = dafPersist.Butler(repo) dataset = 'src' # 2016-02-08 MWV: # I feel like I could be doing something more efficient with # something along the lines of the following: # dataRefs = [dafPersist.ButlerDataRef(butler, vId) for vId in dataIds] ccdKeyName = getCcdKeyName(dataIds[0]) schema = butler.get(dataset + "_schema", immediate=True).schema mapper = SchemaMapper(schema) mapper.addMinimalSchema(schema) mapper.addOutputField(Field[float]('base_PsfFlux_snr', 'PSF flux SNR')) mapper.addOutputField(Field[float]('base_PsfFlux_mag', 'PSF magnitude')) mapper.addOutputField(Field[float]('base_PsfFlux_magerr', 'PSF magnitude uncertainty')) newSchema = mapper.getOutputSchema() # Create an object that matches multiple catalogs with same schema mmatch = MultiMatch(newSchema, dataIdFormat={ 'visit': np.int32, ccdKeyName: np.int32 }, radius=matchRadius, RecordClass=SimpleRecord) # create the new extented source catalog srcVis = SourceCatalog(newSchema) for vId in dataIds: try: calexpMetadata = butler.get("calexp_md", vId, immediate=True) except (FitsError, dafPersist.NoResults) as e: print(e) print("Could not open calibrated image file for ", vId) print("Skipping %s " % repr(vId)) continue except TypeError as te: # DECam images that haven't been properly reformatted # can trigger a TypeError because of a residual FITS header # LTV2 which is a float instead of the expected integer. # This generates an error of the form: # # lsst::pex::exceptions::TypeError: 'LTV2 has mismatched type' # # See, e.g., DM-2957 for details. print(te) print("Calibration image header information malformed.") print("Skipping %s " % repr(vId)) continue calib = afwImage.Calib(calexpMetadata) oldSrc = butler.get('src', vId, immediate=True) print( len(oldSrc), "sources in ccd %s visit %s" % (vId[ccdKeyName], vId["visit"])) # create temporary catalog tmpCat = SourceCatalog(SourceCatalog(newSchema).table) tmpCat.extend(oldSrc, mapper=mapper) tmpCat['base_PsfFlux_snr'][:] = tmpCat['base_PsfFlux_flux'] \ / tmpCat['base_PsfFlux_fluxSigma'] with afwImageUtils.CalibNoThrow(): _ = calib.getMagnitude(tmpCat['base_PsfFlux_flux'], tmpCat['base_PsfFlux_fluxSigma']) tmpCat['base_PsfFlux_mag'][:] = _[0] tmpCat['base_PsfFlux_magerr'][:] = _[1] srcVis.extend(tmpCat, False) mmatch.add(catalog=tmpCat, dataId=vId) # Complete the match, returning a catalog that includes # all matched sources with object IDs that can be used to group them. matchCat = mmatch.finish() # Create a mapping object that allows the matches to be manipulated # as a mapping of object ID to catalog of sources. allMatches = GroupView.build(matchCat) return allMatches
def _loadAndMatchCatalogs(repo, dataIds, matchRadius, useJointCal=False, skipTEx=False): """Load data from specific visit. Match with reference. Parameters ---------- repo : string or Butler A Butler or a repository URL that can be used to construct one dataIds : list of dict List of `butler` data IDs of Image catalogs to compare to reference. The `calexp` cpixel image is needed for the photometric calibration. matchRadius : afwGeom.Angle(), optional Radius for matching. Default is 1 arcsecond. useJointCal : `bool`, optional Use jointcal/meas_mosaic outputs to calibrate positions and fluxes. skipTEx : `bool`, optional Skip TEx calculations (useful for older catalogs that don't have PsfShape measurements). Returns ------- catalog_list : afw.table.SourceCatalog List of all of the catalogs matched_catalog : afw.table.GroupView An object of matched catalog. """ # Following # https://github.com/lsst/afw/blob/tickets/DM-3896/examples/repeatability.ipynb if isinstance(repo, dafPersist.Butler): butler = repo else: butler = dafPersist.Butler(repo) dataset = 'src' # 2016-02-08 MWV: # I feel like I could be doing something more efficient with # something along the lines of the following: # dataRefs = [dafPersist.ButlerDataRef(butler, vId) for vId in dataIds] ccdKeyName = getCcdKeyName(dataIds[0]) # Hack to support raft and sensor 0,1 IDs as ints for multimatch if ccdKeyName == 'sensor': ccdKeyName = 'raft_sensor_int' for vId in dataIds: vId[ccdKeyName] = raftSensorToInt(vId) schema = butler.get(dataset + "_schema").schema mapper = SchemaMapper(schema) mapper.addMinimalSchema(schema) mapper.addOutputField(Field[float]('base_PsfFlux_snr', 'PSF flux SNR')) mapper.addOutputField(Field[float]('base_PsfFlux_mag', 'PSF magnitude')) mapper.addOutputField(Field[float]('base_PsfFlux_magErr', 'PSF magnitude uncertainty')) mapper.addOutputField(Field[float]('e1', 'Source Ellipticity 1')) mapper.addOutputField(Field[float]('e2', 'Source Ellipticity 1')) mapper.addOutputField(Field[float]('psf_e1', 'PSF Ellipticity 1')) mapper.addOutputField(Field[float]('psf_e2', 'PSF Ellipticity 1')) newSchema = mapper.getOutputSchema() newSchema.setAliasMap(schema.getAliasMap()) # Create an object that matches multiple catalogs with same schema mmatch = MultiMatch(newSchema, dataIdFormat={ 'visit': np.int32, ccdKeyName: np.int32 }, radius=matchRadius, RecordClass=SimpleRecord) # create the new extented source catalog srcVis = SourceCatalog(newSchema) for vId in dataIds: if useJointCal: try: photoCalib = butler.get("jointcal_photoCalib", vId) except (FitsError, dafPersist.NoResults) as e: print(e) print("Could not open photometric calibration for ", vId) print("Skipping this dataId.") continue try: wcs = butler.get("jointcal_wcs", vId) except (FitsError, dafPersist.NoResults) as e: print(e) print("Could not open updated WCS for ", vId) print("Skipping this dataId.") continue else: try: photoCalib = butler.get("calexp_photoCalib", vId) except (FitsError, dafPersist.NoResults) as e: print(e) print("Could not open calibrated image file for ", vId) print("Skipping this dataId.") continue except TypeError as te: # DECam images that haven't been properly reformatted # can trigger a TypeError because of a residual FITS header # LTV2 which is a float instead of the expected integer. # This generates an error of the form: # # lsst::pex::exceptions::TypeError: 'LTV2 has mismatched type' # # See, e.g., DM-2957 for details. print(te) print("Calibration image header information malformed.") print("Skipping this dataId.") continue # We don't want to put this above the first "if useJointCal block" # because we need to use the first `butler.get` above to quickly # catch data IDs with no usable outputs. try: # HSC supports these flags, which dramatically improve I/O # performance; support for other cameras is DM-6927. oldSrc = butler.get('src', vId, flags=SOURCE_IO_NO_FOOTPRINTS) except (OperationalError, sqlite3.OperationalError): oldSrc = butler.get('src', vId) print(len(oldSrc), "sources in ccd %s visit %s" % (vId[ccdKeyName], vId["visit"])) # create temporary catalog tmpCat = SourceCatalog(SourceCatalog(newSchema).table) tmpCat.extend(oldSrc, mapper=mapper) tmpCat['base_PsfFlux_snr'][:] = tmpCat['base_PsfFlux_instFlux'] \ / tmpCat['base_PsfFlux_instFluxErr'] if useJointCal: for record in tmpCat: record.updateCoord(wcs) photoCalib.instFluxToMagnitude(tmpCat, "base_PsfFlux", "base_PsfFlux") if not skipTEx: _, psf_e1, psf_e2 = ellipticity_from_cat( oldSrc, slot_shape='slot_PsfShape') _, star_e1, star_e2 = ellipticity_from_cat(oldSrc, slot_shape='slot_Shape') tmpCat['e1'][:] = star_e1 tmpCat['e2'][:] = star_e2 tmpCat['psf_e1'][:] = psf_e1 tmpCat['psf_e2'][:] = psf_e2 srcVis.extend(tmpCat, False) mmatch.add(catalog=tmpCat, dataId=vId) # Complete the match, returning a catalog that includes # all matched sources with object IDs that can be used to group them. matchCat = mmatch.finish() # Create a mapping object that allows the matches to be manipulated # as a mapping of object ID to catalog of sources. allMatches = GroupView.build(matchCat) return srcVis, allMatches
def _loadAndMatchCatalogs(self, repo, dataIds, matchRadius, useJointCal=False): """Load data from specific visit. Match with reference. Parameters ---------- repo : string or Butler A Butler or a repository URL that can be used to construct one dataIds : list of dict List of `butler` data IDs of Image catalogs to compare to reference. The `calexp` cpixel image is needed for the photometric calibration. matchRadius : afwGeom.Angle(), optional Radius for matching. Default is 1 arcsecond. Returns ------- afw.table.GroupView An object of matched catalog. """ # Following # https://github.com/lsst/afw/blob/tickets/DM-3896/examples/repeatability.ipynb if isinstance(repo, dafPersist.Butler): butler = repo else: butler = dafPersist.Butler(repo) dataset = 'src' # 2016-02-08 MWV: # I feel like I could be doing something more efficient with # something along the lines of the following: # dataRefs = [dafPersist.ButlerDataRef(butler, vId) for vId in dataIds] ccdKeyName = getCcdKeyName(dataIds[0]) schema = butler.get(dataset + "_schema").schema mapper = SchemaMapper(schema) mapper.addMinimalSchema(schema) mapper.addOutputField(Field[float]('base_PsfFlux_snr', 'PSF flux SNR')) mapper.addOutputField(Field[float]('base_PsfFlux_mag', 'PSF magnitude')) mapper.addOutputField(Field[float]('base_PsfFlux_magErr', 'PSF magnitude uncertainty')) newSchema = mapper.getOutputSchema() newSchema.setAliasMap(schema.getAliasMap()) # Create an object that matches multiple catalogs with same schema mmatch = MultiMatch(newSchema, dataIdFormat={ 'visit': np.int32, ccdKeyName: np.int32 }, radius=matchRadius, RecordClass=SimpleRecord) # create the new extented source catalog srcVis = SourceCatalog(newSchema) for vId in dataIds: if useJointCal: try: photoCalib = butler.get("photoCalib", vId) except (FitsError, dafPersist.NoResults) as e: print(e) print("Could not open photometric calibration for ", vId) print("Skipping %s " % repr(vId)) continue try: md = butler.get("wcs_md", vId) wcs = afwImage.makeWcs(md) except (FitsError, dafPersist.NoResults) as e: print(e) print("Could not open updated WCS for ", vId) print("Skipping %s " % repr(vId)) continue else: try: calexpMetadata = butler.get("calexp_md", vId) except (FitsError, dafPersist.NoResults) as e: print(e) print("Could not open calibrated image file for ", vId) print("Skipping %s " % repr(vId)) continue except TypeError as te: # DECam images that haven't been properly reformatted # can trigger a TypeError because of a residual FITS header # LTV2 which is a float instead of the expected integer. # This generates an error of the form: # # lsst::pex::exceptions::TypeError: 'LTV2 has mismatched type' # # See, e.g., DM-2957 for details. print(te) print("Calibration image header information malformed.") print("Skipping %s " % repr(vId)) continue calib = afwImage.Calib(calexpMetadata) # We don't want to put this above the first "if useJointCal block" # because we need to use the first `butler.get` above to quickly # catch data IDs with no usable outputs. try: # HSC supports these flags, which dramatically improve I/O # performance; support for other cameras is DM-6927. oldSrc = butler.get('src', vId, flags=SOURCE_IO_NO_FOOTPRINTS) except: oldSrc = butler.get('src', vId) print( len(oldSrc), "sources in ccd %s visit %s" % (vId[ccdKeyName], vId["visit"])) # create temporary catalog tmpCat = SourceCatalog(SourceCatalog(newSchema).table) tmpCat.extend(oldSrc, mapper=mapper) tmpCat['base_PsfFlux_snr'][:] = tmpCat['base_PsfFlux_flux'] \ / tmpCat['base_PsfFlux_fluxSigma'] if useJointCal: for record in tmpCat: record.updateCoord(wcs) photoCalib.instFluxToMagnitude(tmpCat, "base_PsfFlux", "base_PsfFlux") else: with afwImageUtils.CalibNoThrow(): _ = calib.getMagnitude(tmpCat['base_PsfFlux_flux'], tmpCat['base_PsfFlux_fluxSigma']) tmpCat['base_PsfFlux_mag'][:] = _[0] tmpCat['base_PsfFlux_magErr'][:] = _[1] srcVis.extend(tmpCat, False) mmatch.add(catalog=tmpCat, dataId=vId) # Complete the match, returning a catalog that includes # all matched sources with object IDs that can be used to group them. matchCat = mmatch.finish() # Create a mapping object that allows the matches to be manipulated # as a mapping of object ID to catalog of sources. allMatches = GroupView.build(matchCat) return allMatches
def _loadAndMatchCatalogs(repo, dataIds, matchRadius, doApplyExternalPhotoCalib=False, externalPhotoCalibName=None, doApplyExternalSkyWcs=False, externalSkyWcsName=None, skipTEx=False, skipNonSrd=False): """Load data from specific visits and returned a calibrated catalog matched with a reference. Parameters ---------- repo : `str` or `lsst.daf.persistence.Butler` A Butler or a repository URL that can be used to construct one. dataIds : list of dict List of butler data IDs of Image catalogs to compare to reference. The calexp cpixel image is needed for the photometric calibration. matchRadius : `lsst.geom.Angle`, optional Radius for matching. Default is 1 arcsecond. doApplyExternalPhotoCalib : bool, optional Apply external photoCalib to calibrate fluxes. externalPhotoCalibName : str, optional Type of external `PhotoCalib` to apply. Currently supported are jointcal, fgcm, and fgcm_tract. Must be set if doApplyExternalPhotoCalib is True. doApplyExternalSkyWcs : bool, optional Apply external wcs to calibrate positions. externalSkyWcsName : str, optional Type of external `wcs` to apply. Currently supported is jointcal. Must be set if "doApplyExternalWcs" is True. skipTEx : `bool`, optional Skip TEx calculations (useful for older catalogs that don't have PsfShape measurements). skipNonSrd : `bool`, optional Skip any metrics not defined in the LSST SRD; default False. Returns ------- catalog : `lsst.afw.table.SourceCatalog` A new calibrated SourceCatalog. matches : `lsst.afw.table.GroupView` A GroupView of the matched sources. Raises ------ RuntimeError: Raised if "doApplyExternalPhotoCalib" is True and "externalPhotoCalibName" is None, or if "doApplyExternalSkyWcs" is True and "externalSkyWcsName" is None. """ if doApplyExternalPhotoCalib and externalPhotoCalibName is None: raise RuntimeError( "Must set externalPhotoCalibName if doApplyExternalPhotoCalib is True." ) if doApplyExternalSkyWcs and externalSkyWcsName is None: raise RuntimeError( "Must set externalSkyWcsName if doApplyExternalSkyWcs is True.") # Following # https://github.com/lsst/afw/blob/tickets/DM-3896/examples/repeatability.ipynb if isinstance(repo, dafPersist.Butler): butler = repo else: butler = dafPersist.Butler(repo) dataset = 'src' # 2016-02-08 MWV: # I feel like I could be doing something more efficient with # something along the lines of the following: # dataRefs = [dafPersist.ButlerDataRef(butler, vId) for vId in dataIds] ccdKeyName = getCcdKeyName(dataIds[0]) # Hack to support raft and sensor 0,1 IDs as ints for multimatch if ccdKeyName == 'sensor': ccdKeyName = 'raft_sensor_int' for vId in dataIds: vId[ccdKeyName] = raftSensorToInt(vId) schema = butler.get(dataset + "_schema").schema mapper = SchemaMapper(schema) mapper.addMinimalSchema(schema) mapper.addOutputField(Field[float]('base_PsfFlux_snr', 'PSF flux SNR')) mapper.addOutputField(Field[float]('base_PsfFlux_mag', 'PSF magnitude')) mapper.addOutputField(Field[float]('base_PsfFlux_magErr', 'PSF magnitude uncertainty')) if not skipNonSrd: # Needed because addOutputField(... 'slot_ModelFlux_mag') will add a field with that literal name aliasMap = schema.getAliasMap() # Possibly not needed since base_GaussianFlux is the default, but this ought to be safe modelName = aliasMap[ 'slot_ModelFlux'] if 'slot_ModelFlux' in aliasMap.keys( ) else 'base_GaussianFlux' mapper.addOutputField(Field[float](f'{modelName}_mag', 'Model magnitude')) mapper.addOutputField(Field[float](f'{modelName}_magErr', 'Model magnitude uncertainty')) mapper.addOutputField(Field[float](f'{modelName}_snr', 'Model flux snr')) mapper.addOutputField(Field[float]('e1', 'Source Ellipticity 1')) mapper.addOutputField(Field[float]('e2', 'Source Ellipticity 1')) mapper.addOutputField(Field[float]('psf_e1', 'PSF Ellipticity 1')) mapper.addOutputField(Field[float]('psf_e2', 'PSF Ellipticity 1')) newSchema = mapper.getOutputSchema() newSchema.setAliasMap(schema.getAliasMap()) # Create an object that matches multiple catalogs with same schema mmatch = MultiMatch(newSchema, dataIdFormat={ 'visit': np.int32, ccdKeyName: np.int32 }, radius=matchRadius, RecordClass=SimpleRecord) # create the new extented source catalog srcVis = SourceCatalog(newSchema) for vId in dataIds: if not butler.datasetExists('src', vId): print(f'Could not find source catalog for {vId}; skipping.') continue photoCalib = _loadPhotoCalib(butler, vId, doApplyExternalPhotoCalib, externalPhotoCalibName) if photoCalib is None: continue if doApplyExternalSkyWcs: wcs = _loadExternalSkyWcs(butler, vId, externalSkyWcsName) if wcs is None: continue # We don't want to put this above the first _loadPhotoCalib call # because we need to use the first `butler.get` in there to quickly # catch dataIDs with no usable outputs. try: # HSC supports these flags, which dramatically improve I/O # performance; support for other cameras is DM-6927. oldSrc = butler.get('src', vId, flags=SOURCE_IO_NO_FOOTPRINTS) except (OperationalError, sqlite3.OperationalError): oldSrc = butler.get('src', vId) print(len(oldSrc), "sources in ccd %s visit %s" % (vId[ccdKeyName], vId["visit"])) # create temporary catalog tmpCat = SourceCatalog(SourceCatalog(newSchema).table) tmpCat.extend(oldSrc, mapper=mapper) tmpCat['base_PsfFlux_snr'][:] = tmpCat['base_PsfFlux_instFlux'] \ / tmpCat['base_PsfFlux_instFluxErr'] if doApplyExternalSkyWcs: afwTable.updateSourceCoords(wcs, tmpCat) photoCalib.instFluxToMagnitude(tmpCat, "base_PsfFlux", "base_PsfFlux") if not skipNonSrd: tmpCat['slot_ModelFlux_snr'][:] = ( tmpCat['slot_ModelFlux_instFlux'] / tmpCat['slot_ModelFlux_instFluxErr']) photoCalib.instFluxToMagnitude(tmpCat, "slot_ModelFlux", "slot_ModelFlux") if not skipTEx: _, psf_e1, psf_e2 = ellipticity_from_cat( oldSrc, slot_shape='slot_PsfShape') _, star_e1, star_e2 = ellipticity_from_cat(oldSrc, slot_shape='slot_Shape') tmpCat['e1'][:] = star_e1 tmpCat['e2'][:] = star_e2 tmpCat['psf_e1'][:] = psf_e1 tmpCat['psf_e2'][:] = psf_e2 srcVis.extend(tmpCat, False) mmatch.add(catalog=tmpCat, dataId=vId) # Complete the match, returning a catalog that includes # all matched sources with object IDs that can be used to group them. matchCat = mmatch.finish() # Create a mapping object that allows the matches to be manipulated # as a mapping of object ID to catalog of sources. allMatches = GroupView.build(matchCat) return srcVis, allMatches
def match_catalogs(inputs, photoCalibs, astromCalibs, vIds, matchRadius, apply_external_wcs=False, logger=None): schema = inputs[0].schema mapper = SchemaMapper(schema) mapper.addMinimalSchema(schema) mapper.addOutputField(Field[float]('base_PsfFlux_snr', 'PSF flux SNR')) mapper.addOutputField(Field[float]('base_PsfFlux_mag', 'PSF magnitude')) mapper.addOutputField(Field[float]('base_PsfFlux_magErr', 'PSF magnitude uncertainty')) # Needed because addOutputField(... 'slot_ModelFlux_mag') will add a field with that literal name aliasMap = schema.getAliasMap() # Possibly not needed since base_GaussianFlux is the default, but this ought to be safe modelName = aliasMap['slot_ModelFlux'] if 'slot_ModelFlux' in aliasMap.keys( ) else 'base_GaussianFlux' mapper.addOutputField(Field[float](f'{modelName}_mag', 'Model magnitude')) mapper.addOutputField(Field[float](f'{modelName}_magErr', 'Model magnitude uncertainty')) mapper.addOutputField(Field[float](f'{modelName}_snr', 'Model flux snr')) mapper.addOutputField(Field[float]('e1', 'Source Ellipticity 1')) mapper.addOutputField(Field[float]('e2', 'Source Ellipticity 1')) mapper.addOutputField(Field[float]('psf_e1', 'PSF Ellipticity 1')) mapper.addOutputField(Field[float]('psf_e2', 'PSF Ellipticity 1')) mapper.addOutputField(Field[np.int32]('filt', 'filter code')) newSchema = mapper.getOutputSchema() newSchema.setAliasMap(schema.getAliasMap()) # Create an object that matches multiple catalogs with same schema mmatch = MultiMatch(newSchema, dataIdFormat={ 'visit': np.int32, 'detector': np.int32 }, radius=matchRadius, RecordClass=SimpleRecord) # create the new extended source catalog srcVis = SourceCatalog(newSchema) filter_dict = { 'u': 1, 'g': 2, 'r': 3, 'i': 4, 'z': 5, 'y': 6, 'HSC-U': 1, 'HSC-G': 2, 'HSC-R': 3, 'HSC-I': 4, 'HSC-Z': 5, 'HSC-Y': 6 } # Sort by visit, detector, then filter vislist = [v['visit'] for v in vIds] ccdlist = [v['detector'] for v in vIds] filtlist = [v['band'] for v in vIds] tab_vids = Table([vislist, ccdlist, filtlist], names=['vis', 'ccd', 'filt']) sortinds = np.argsort(tab_vids, order=('vis', 'ccd', 'filt')) for ind in sortinds: oldSrc = inputs[ind] photoCalib = photoCalibs[ind] wcs = astromCalibs[ind] vId = vIds[ind] if logger: logger.debug( f"{len(oldSrc)} sources in ccd {vId['detector']} visit {vId['visit']}" ) # create temporary catalog tmpCat = SourceCatalog(SourceCatalog(newSchema).table) tmpCat.extend(oldSrc, mapper=mapper) filtnum = filter_dict[vId['band']] tmpCat['filt'] = np.repeat(filtnum, len(oldSrc)) tmpCat['base_PsfFlux_snr'][:] = tmpCat['base_PsfFlux_instFlux'] \ / tmpCat['base_PsfFlux_instFluxErr'] if apply_external_wcs and wcs is not None: updateSourceCoords(wcs, tmpCat) photoCalib.instFluxToMagnitude(tmpCat, "base_PsfFlux", "base_PsfFlux") tmpCat['slot_ModelFlux_snr'][:] = ( tmpCat['slot_ModelFlux_instFlux'] / tmpCat['slot_ModelFlux_instFluxErr']) photoCalib.instFluxToMagnitude(tmpCat, "slot_ModelFlux", "slot_ModelFlux") _, psf_e1, psf_e2 = ellipticity_from_cat(oldSrc, slot_shape='slot_PsfShape') _, star_e1, star_e2 = ellipticity_from_cat(oldSrc, slot_shape='slot_Shape') tmpCat['e1'][:] = star_e1 tmpCat['e2'][:] = star_e2 tmpCat['psf_e1'][:] = psf_e1 tmpCat['psf_e2'][:] = psf_e2 srcVis.extend(tmpCat, False) mmatch.add(catalog=tmpCat, dataId=vId) # Complete the match, returning a catalog that includes # all matched sources with object IDs that can be used to group them. matchCat = mmatch.finish() # Create a mapping object that allows the matches to be manipulated # as a mapping of object ID to catalog of sources. # I don't think I can persist a group view, so this may need to be called in a subsequent task # allMatches = GroupView.build(matchCat) return srcVis, matchCat
def getFakeSources(butler, dataId, tol=1.0, extraCols=('zeropoint', 'visit', 'ccd'), includeMissing=False, footprints=False, radecMatch=None): """Get list of sources which agree in pixel position with fake ones with tol this returns a sourceCatalog of all the matched fake objects, note, there will be duplicates in this list, since I haven't checked deblend.nchild, and I'm only doing a tolerance match, which could include extra sources the outputs can include extraCols as long as they are one of: zeropoint, visit, ccd, thetaNorth, pixelScale if includeMissing is true, then the pipeline looks at the fake sources added in the header and includes an entry in the table for sources without any measurements, specifically the 'id' column will be 0 radecMatch is the fakes table. if it's not None(default), then do an ra/dec match with the input catalog instead of looking in the header for where the sources where added """ availExtras = { 'zeropoint': { 'type': float, 'doc': 'zeropoint' }, 'visit': { 'type': int, 'doc': 'visit id' }, 'ccd': { 'type': int, 'doc': 'ccd id' }, 'thetaNorth': { 'type': lsst.afw.geom.Angle, 'doc': 'angle to north' }, 'pixelScale': { 'type': float, 'doc': 'pixelscale in arcsec/pixel' } } if not np.in1d(extraCols, availExtras.keys()).all(): print "extraCols must be in ", availExtras try: if not 'filter' in dataId: sources = butler.get('src', dataId, flags=lsst.afw.table.SOURCE_IO_NO_FOOTPRINTS, immediate=True) cal = butler.get('calexp', dataId, immediate=True) cal_md = butler.get('calexp_md', dataId, immediate=True) else: sources = butler.get('deepCoadd_src', dataId, flags=lsst.afw.table.SOURCE_IO_NO_FOOTPRINTS, immediate=True) cal = butler.get('deepCoadd', dataId, immediate=True) cal_md = butler.get('deepCoadd_md', dataId, immediate=True) except (lsst.pex.exceptions.LsstException, RuntimeError) as e: print "skipping", dataId return None if ('pixelScale' in extraCols) or ('thetaNorth' in extraCols): wcs = cal.getWcs() availExtras['pixelScale']['value'] = wcs.pixelScale().asArcseconds() availExtras['thetaNorth']['value'] = lsst.afw.geom.Angle( np.arctan2(*tuple(wcs.getLinearTransform().invert()( lsst.afw.geom.Point2D(1.0, 0.0))))) if 'visit' in extraCols: availExtras['visit']['value'] = dataId['visit'] if 'ccd' in extraCols: availExtras['ccd']['value'] = dataId['ccd'] if 'zeropoint' in extraCols: availExtras['zeropoint']['value'] = 2.5 * np.log10( cal_md.get('FLUXMAG0')) if radecMatch is None: fakeXY, srcIndex = getFakeMatchesHeader(cal_md, sources, tol=tol) else: fakeXY, srcIndex = getFakeMatchesRaDec( sources, radecMatch, lsst.afw.geom.Box2D(cal.getBBox(lsst.afw.image.PARENT)), cal.getWcs(), tol=tol) mapper = SchemaMapper(sources.schema) mapper.addMinimalSchema(sources.schema) newSchema = mapper.getOutputSchema() newSchema.addField('fakeId', type=int, doc='id of fake source matched to position') newSchema.addField('fakeOffset', type=lsst.afw.geom.Point2D, doc='offset from input fake position (pixels)') for extraName in set(extraCols).intersection(availExtras): newSchema.addField(extraName, type=availExtras[extraName]['type'], doc=availExtras[extraName]['doc']) srcList = SourceCatalog(newSchema) srcList.reserve( sum([len(s) for s in srcIndex.values()]) + (0 if not includeMissing else srcIndex.values().count([]))) centroidKey = sources.schema.find('centroid.sdss').getKey() for ident, sindlist in srcIndex.items(): if includeMissing and (len(sindlist) == 0): newRec = srcList.addNew() newRec.set('fakeId', ident) newRec.set('id', 0) for ss in sindlist: newRec = srcList.addNew() newRec.assign(sources[ss], mapper) newRec.set('fakeId', ident) newRec.set( 'fakeOffset', lsst.afw.geom.Point2D( sources[ss].get(centroidKey).getX() - fakeXY[ident][0], sources[ss].get(centroidKey).getY() - fakeXY[ident][1])) if includeMissing: srcList = srcList.copy(deep=True) for extraName in set(extraCols).intersection(availExtras): tempCol = srcList.get(extraName) tempCol.fill(availExtras[extraName]['value']) return srcList