def _makeDummyCatalog(size, skyFlag=False): """Create a trivial catalog for testing source counts. Parameters ---------- size : `int` The number of entries in the catalog. skyFlag : `bool` If set, the schema is guaranteed to have the ``sky_source`` flag, and one row has it set to `True`. If not set, the ``sky_source`` flag is not present. Returns ------- catalog : `lsst.afw.table.SourceCatalog` A new catalog with ``size`` rows. """ schema = SourceCatalog.Table.makeMinimalSchema() if skyFlag: schema.addField("sky_source", type="Flag", doc="Sky objects.") catalog = SourceCatalog(schema) for i in range(size): record = catalog.addNew() if skyFlag and size > 0: record["sky_source"] = True return catalog
def combineWithForce(meas, force): """Combine the meas and forced_src catalogs.""" if len(meas) != len(force): raise Exception("# Meas and Forced_src catalogs should have " + "the same size!") mapper = SchemaMapper(meas.schema) mapper.addMinimalSchema(meas.schema) newSchema = mapper.getOutputSchema() # Add new fields newSchema.addField('force.deblend.nchild', type=int) newSchema.addField('force.classification.extendedness', type=float) newSchema.addField('force.flux.kron', type=float) newSchema.addField('force.flux.kron.err', type=float) newSchema.addField('force.flux.psf', type=float) newSchema.addField('force.flux.psf.err', type=float) newSchema.addField('force.flux.kron.apcorr', type=float) newSchema.addField('force.flux.kron.apcorr.err', type=float) newSchema.addField('force.flux.psf.apcorr', type=float) newSchema.addField('force.flux.psf.apcorr.err', type=float) newSchema.addField('force.cmodel.flux', type=float) newSchema.addField('force.cmodel.flux.err', type=float) newSchema.addField('force.cmodel.fracDev', type=float) newSchema.addField('force.cmodel.exp.flux', type=float) newSchema.addField('force.cmodel.exp.flux.err', type=float) newSchema.addField('force.cmodel.dev.flux', type=float) newSchema.addField('force.cmodel.dev.flux.err', type=float) newSchema.addField('force.cmodel.flux.apcorr', type=float) newSchema.addField('force.cmodel.flux.apcorr.err', type=float) newSchema.addField('force.cmodel.exp.flux.apcorr', type=float) newSchema.addField('force.cmodel.exp.flux.apcorr.err', type=float) newSchema.addField('force.cmodel.dev.flux.apcorr', type=float) newSchema.addField('force.cmodel.dev.flux.apcorr.err', type=float) newCols = ['deblend.nchild', 'classification.extendedness', 'flux.kron', 'flux.kron.err', 'flux.psf', 'flux.psf.err', 'flux.kron.apcorr', 'flux.kron.apcorr.err', 'flux.psf.apcorr', 'flux.psf.apcorr.err', 'cmodel.flux', 'cmodel.flux.err', 'cmodel.flux', 'cmodel.flux.err', 'cmodel.flux.apcorr', 'cmodel.flux.apcorr.err', 'cmodel.exp.flux', 'cmodel.exp.flux.err', 'cmodel.exp.flux.apcorr', 'cmodel.exp.flux.apcorr.err', 'cmodel.dev.flux', 'cmodel.dev.flux.err', 'cmodel.dev.flux.apcorr', 'cmodel.dev.flux.apcorr.err', 'cmodel.fracDev'] combSrc = SourceCatalog(newSchema) combSrc.extend(meas, mapper=mapper) for key in newCols: combSrc['force.' + key][:] = force[key][:] for name in ("Centroid", "Shape"): val = getattr(meas.table, "get" + name + "Key")() err = getattr(meas.table, "get" + name + "ErrKey")() flag = getattr(meas.table, "get" + name + "FlagKey")() getattr(combSrc.table, "define" + name)(val, err, flag) return combSrc
def makePsfCandidates(self, starCat, exposure): """Make a list of PSF candidates from a star catalog. Parameters ---------- starCat : `lsst.afw.table.SourceCatalog` Catalog of stars, as returned by ``lsst.meas.algorithms.starSelector.run()``. exposure : `lsst.afw.image.Exposure` The exposure containing the sources. Returns ------- struct : `lsst.pipe.base.Struct` Results struct containing: - ``psfCandidates`` : List of PSF candidates (`list` of `lsst.meas.algorithms.PsfCandidate`). - ``goodStarCat`` : Subset of ``starCat`` that was successfully made into PSF candidates (`lsst.afw.table.SourceCatalog`). """ goodStarCat = SourceCatalog(starCat.schema) psfCandidateList = [] didSetSize = False for star in starCat: try: psfCandidate = makePsfCandidate(star, exposure) # The setXXX methods are class static, but it's convenient to call them on # an instance as we don't know exposures's pixel type # (and hence psfCandidate's exact type) if not didSetSize: psfCandidate.setBorderWidth(self.config.borderWidth) psfCandidate.setWidth(self.config.kernelSize + 2 * self.config.borderWidth) psfCandidate.setHeight(self.config.kernelSize + 2 * self.config.borderWidth) didSetSize = True im = psfCandidate.getMaskedImage().getImage() except lsst.pex.exceptions.Exception as err: self.log.warning( "Failed to make a psfCandidate from star %d: %s", star.getId(), err) continue vmax = afwMath.makeStatistics(im, afwMath.MAX).getValue() if not np.isfinite(vmax): continue psfCandidateList.append(psfCandidate) goodStarCat.append(star) return pipeBase.Struct( psfCandidates=psfCandidateList, goodStarCat=goodStarCat, )
def _makeDummyCatalog(nParents, *, skyFlags=False, deblendFlags=False, nChildren=0, nGrandchildren=0): """Create a trivial catalog for testing deblending counts. Parameters ---------- nParents : `int` The number of entries in the catalog prior to deblending. skyFlags : `bool` If set, the schema includes flags associated with sky sources, and one top-level source (the deblended one, if it exists) and any descendents are sky sources. deblendFlags : `bool` If set, the schema includes flags associated with the deblender. nChildren : `int` If positive, one source is deblended into ``nChildren`` children. This parameter is ignored if ``deblendFlags`` is `False`. nGrandchildren : `int` If positive, one source produced by ``nChildren`` is deblended into ``nGrandchildren`` children. This parameter is ignored if ``nChildren`` is 0 or not applicable. Returns ------- catalog : `lsst.afw.table.SourceCatalog` A new catalog with ``nParents + nChildren + nGrandchildren`` rows. """ schema = SourceCatalog.Table.makeMinimalSchema() if skyFlags: schema.addField("sky_source", type="Flag", doc="Sky source.") if deblendFlags: # See https://community.lsst.org/t/4957 for flag definitions. # Do not use detect_ flags, as they are defined by a postprocessing # task and some post-deblend catalogs may not have them. schema.addField('deblend_nChild', type=np.int32, doc='') schema.addField('deblend_nPeaks', type=np.int32, doc='') schema.addField('deblend_parentNPeaks', type=np.int32, doc='') schema.addField('deblend_parentNChild', type=np.int32, doc='') catalog = SourceCatalog(schema) if nParents > 0: # normally anti-pattern, but simplifies nested ifs for i in range(nParents): record = catalog.addNew() if deblendFlags: record["deblend_nPeaks"] = 1 if skyFlags: record["sky_source"] = True if deblendFlags and nChildren > 0: children = _addChildren(catalog, record, nChildren) if nGrandchildren > 0: _addChildren(catalog, children[0], nGrandchildren) return catalog
def makeRerunCatalog(schema, oldCatalog, idList, fields=None): """ Creates a catalog prepopulated with ids This function is used to generate a SourceCatalog containing blank records with Ids specified in the idList parameter This function is primarily used when rerunning measurements on a footprint. Specifying ids in a new measurement catalog which correspond to ids in an old catalog makes comparing results much easier. Parameters ---------- schema : lsst.afw.table.Schema Schema used to describe the fields in the resulting SourceCatalog oldCatalog : lsst.afw.table.SourceCatalog Catalog containing previous measurements. idList : iterable Python iterable whose values should be numbers corresponding to measurement ids, ids must exist in the oldCatalog fields : iterable Python iterable whose entries should be strings corresponding to schema keys that exist in both the old catalog and input schema. Fields listed will be copied from the old catalog into the new catalog. Returns ------- measCat : lsst.afw.table.SourceCatalog SourceCatalog prepopulated with entries corresponding to the ids specified """ if fields is None: fields = [] if not isinstance(fields, Iterable): raise RuntimeError("fields list must be an iterable with string" "elements") for entry in fields: if entry not in schema: schema.addField(oldCatalog.schema.find(entry).field) measCat = SourceCatalog(schema) for srcId in idList: oldSrc = oldCatalog.find(srcId) src = measCat.addNew() src.setId(srcId) src.setFootprint(oldSrc.getFootprint()) src.setParent(oldSrc.getParent()) src.setCoord(oldSrc.getCoord()) for entry in fields: src[entry] = oldSrc[entry] return measCat
def run(self, source_catalogs, vIds): # Concatenate catalogs schema = source_catalogs[0].schema size = sum([len(cat) for cat in source_catalogs]) source_catalog = SourceCatalog(schema) source_catalog.reserve(size) for cat in source_catalogs: source_catalog.extend(cat) return self.measure.run(source_catalog, self.config.connections.metric, vIds)
def selectStars(self, exposure, sourceCat, matches=None): """!Return a list of PSF candidates that represent likely stars A list of PSF candidates may be used by a PSF fitter to construct a PSF. @param[in] exposure the exposure containing the sources @param[in] sourceCat catalog of sources that may be stars (an lsst.afw.table.SourceCatalog) @param[in] matches a match vector as produced by meas_astrom; required (defaults to None to match the StarSelector API and improve error handling) @return an lsst.pipe.base.Struct containing: - starCat catalog of selected stars (a subset of sourceCat) """ import lsstDebug debugInfo = lsstDebug.Info(__name__) display = debugInfo.display pauseAtEnd = debugInfo.pauseAtEnd # pause when done if matches is None: raise RuntimeError("CatalogStarSelectorTask requires matches") mi = exposure.getMaskedImage() if display: frame = 1 ds9.mtv(mi, frame=frame, title="PSF candidates") isGoodSource = CheckSource(sourceCat, self.config.fluxLim, self.config.fluxMax, self.config.badFlags) starCat = SourceCatalog(sourceCat.schema) with ds9.Buffering(): for ref, source, d in matches: if not ref.get("resolved"): if not isGoodSource(source): symb, ctype = "+", ds9.RED else: starCat.append(source) symb, ctype = "+", ds9.GREEN if display: ds9.dot(symb, source.getX() - mi.getX0(), source.getY() - mi.getY0(), size=4, frame=frame, ctype=ctype) if display and pauseAtEnd: input("Continue? y[es] p[db] ") return Struct(starCat=starCat, )
def setUp(self): # Set up a Coadd with CoaddInputs tables that have blank filter # columns to be filled in by later test code. self.coadd = ExposureF(30, 90) # WCS is arbitrary, since it'll be the same for all images wcs = makeSkyWcs(crpix=Point2D(0, 0), crval=SpherePoint(45.0, 45.0, degrees), cdMatrix=makeCdMatrix(scale=0.17 * degrees)) self.coadd.setWcs(wcs) schema = ExposureCatalog.Table.makeMinimalSchema() self.filterKey = schema.addField("filter", type=str, doc="", size=16) weightKey = schema.addField("weight", type=float, doc="") # First input image covers the first 2/3, second covers the last 2/3, # so they overlap in the middle 1/3. inputs = ExposureCatalog(schema) self.input1 = inputs.addNew() self.input1.setId(1) self.input1.setBBox(Box2I(Point2I(0, 0), Point2I(29, 59))) self.input1.setWcs(wcs) self.input1.set(weightKey, 2.0) self.input2 = inputs.addNew() self.input2.setId(2) self.input2.setBBox(Box2I(Point2I(0, 30), Point2I(29, 89))) self.input2.setWcs(wcs) self.input2.set(weightKey, 3.0) # Use the same catalog for visits and CCDs since the algorithm we're # testing only cares about CCDs. self.coadd.getInfo().setCoaddInputs(CoaddInputs(inputs, inputs)) # Set up a catalog with centroids and a FilterFraction plugin. # We have one record in each region (first input only, both inputs, # second input only) schema = SourceCatalog.Table.makeMinimalSchema() centroidKey = Point2DKey.addFields(schema, "centroid", doc="position", unit="pixel") schema.getAliasMap().set("slot_Centroid", "centroid") self.plugin = FilterFractionPlugin( config=FilterFractionPlugin.ConfigClass(), schema=schema, name="subaru_FilterFraction", metadata=PropertyList()) catalog = SourceCatalog(schema) self.record1 = catalog.addNew() self.record1.set(centroidKey, Point2D(14.0, 14.0)) self.record12 = catalog.addNew() self.record12.set(centroidKey, Point2D(14.0, 44.0)) self.record2 = catalog.addNew() self.record2.set(centroidKey, Point2D(14.0, 74.0))
def testPut(self): with lsst.utils.tests.temporaryDirectory() as root: Butler3.makeRepo(root) butler3 = Butler3(root, run="three") butler3.registry.registerDatasetType( DatasetType("cat", ["htm7"], "SourceCatalog", universe=butler3.registry.dimensions) ) butlerShim = ShimButler(butler3) catIn = SourceCatalog(SourceCatalog.Table.makeMinimalSchema()) catIn.addNew().set("id", 42) butlerShim.put(catIn, "cat", htm7=131072) catOut = butlerShim.get("cat", htm7=131072) self.assertEqual(list(catIn["id"]), list(catOut["id"]))
def create_source_catalog_from_text_and_butler(repo_dir, info, dataset='src'): butler = dafPersistence.Butler(repo_dir) schema = butler.get(dataset + "_schema", immediate=True).schema mapper = SchemaMapper(schema) mapper.addMinimalSchema(schema) newSchema = mapper.getOutputSchema() src_cat = SourceCatalog(newSchema) for row in info: record = src_cat.addNew() record.set('coord_ra', Angle(row['RA']*degrees)) record.set('coord_dec', Angle(row['Dec']*degrees)) print(src_cat['coord_ra'], src_cat['coord_dec']) return(src_cat)
def makePsfCandidates(self, exposure, starCat): """!Make a list of PSF candidates from a star catalog @param[in] exposure the exposure containing the sources @param[in] starCat catalog of stars (an lsst.afw.table.SourceCatalog), e.g. as returned by the run or selectStars method @return an lsst.pipe.base.Struct with fields: - psfCandidates list of PSF candidates (lsst.meas.algorithms.PsfCandidate) - goodStarCat catalog of stars that were successfully made into PSF candidates (a subset of starCat) """ goodStarCat = SourceCatalog(starCat.schema) psfCandidateList = [] didSetSize = False for star in starCat: try: psfCandidate = algorithmsLib.makePsfCandidate(star, exposure) # The setXXX methods are class static, but it's convenient to call them on # an instance as we don't know Exposure's pixel type # (and hence psfCandidate's exact type) if not didSetSize: psfCandidate.setBorderWidth(self.config.borderWidth) psfCandidate.setWidth(self.config.kernelSize + 2 * self.config.borderWidth) psfCandidate.setHeight(self.config.kernelSize + 2 * self.config.borderWidth) didSetSize = True im = psfCandidate.getMaskedImage().getImage() except Exception as err: self.log.debug( "Failed to make a psfCandidate from star %d: %s", star.getId(), err) continue vmax = afwMath.makeStatistics(im, afwMath.MAX).getValue() if not np.isfinite(vmax): continue psfCandidateList.append(psfCandidate) goodStarCat.append(star) return pipeBase.Struct( psfCandidates=psfCandidateList, goodStarCat=goodStarCat, )
def testPut(self): with TemporaryDirectory(dir=TESTDIR) as root: Butler3.makeRepo(root) butler3 = Butler3(root, run="three") butler3.registry.registerDatasetType( DatasetType("cat", ["label"], "SourceCatalog", universe=butler3.registry.dimensions) ) butlerShim = ShimButler(butler3) catIn = SourceCatalog(SourceCatalog.Table.makeMinimalSchema()) catIn.addNew().set("id", 42) butlerShim.put(catIn, "cat", label="four") catOut = butlerShim.get("cat", label="four") self.assertEqual(list(catIn["id"]), list(catOut["id"])) # Without this the temporary directory can not be removed # if on NFS because these objects have open SQLite registries. del butler3 del butlerShim
def getClumps(self, sigma=1.0, display=False): if self._num <= 0: raise RuntimeError("No candidate PSF sources") psfImage = self.getImage() # # Embed psfImage into a larger image so we can smooth when measuring it # width, height = psfImage.getWidth(), psfImage.getHeight() largeImg = psfImage.Factory(afwGeom.ExtentI(2 * width, 2 * height)) largeImg.set(0) bbox = afwGeom.BoxI(afwGeom.PointI(width, height), afwGeom.ExtentI(width, height)) largeImg.assign(psfImage, bbox, afwImage.LOCAL) # # Now measure that image, looking for the highest peak. Start by building an Exposure # msk = afwImage.MaskU(largeImg.getDimensions()) msk.set(0) var = afwImage.ImageF(largeImg.getDimensions()) var.set(1) mpsfImage = afwImage.MaskedImageF(largeImg, msk, var) mpsfImage.setXY0(afwGeom.PointI(-width, -height)) del msk del var exposure = afwImage.makeExposure(mpsfImage) # # Next run an object detector # maxVal = afwMath.makeStatistics(psfImage, afwMath.MAX).getValue() threshold = maxVal - sigma * math.sqrt(maxVal) if threshold <= 0.0: threshold = maxVal threshold = afwDetection.Threshold(threshold) ds = afwDetection.FootprintSet(mpsfImage, threshold, "DETECTED") # # And measure it. This policy isn't the one we use to measure # Sources, it's only used to characterize this PSF histogram # schema = SourceTable.makeMinimalSchema() psfImageConfig = SingleFrameMeasurementConfig() psfImageConfig.slots.centroid = "base_SdssCentroid" psfImageConfig.plugins["base_SdssCentroid"].doFootprintCheck = False psfImageConfig.slots.psfFlux = None # "base_PsfFlux" psfImageConfig.slots.apFlux = "base_CircularApertureFlux_3_0" psfImageConfig.slots.modelFlux = None psfImageConfig.slots.instFlux = None psfImageConfig.slots.calibFlux = None psfImageConfig.slots.shape = "base_SdssShape" # Formerly, this code had centroid.sdss, flux.psf, flux.naive, # flags.pixel, and shape.sdss psfImageConfig.algorithms.names = [ "base_SdssCentroid", "base_CircularApertureFlux", "base_SdssShape" ] psfImageConfig.algorithms["base_CircularApertureFlux"].radii = [3.0] psfImageConfig.validate() task = SingleFrameMeasurementTask(schema, config=psfImageConfig) sourceCat = SourceCatalog(schema) gaussianWidth = 1.5 # Gaussian sigma for detection convolution exposure.setPsf(algorithmsLib.DoubleGaussianPsf(11, 11, gaussianWidth)) ds.makeSources(sourceCat) # # Show us the Histogram # if display: frame = 1 dispImage = mpsfImage.Factory( mpsfImage, afwGeom.BoxI(afwGeom.PointI(width, height), afwGeom.ExtentI(width, height)), afwImage.LOCAL) ds9.mtv(dispImage, title="PSF Selection Image", frame=frame) clumps = list() # List of clumps, to return e = None # thrown exception IzzMin = 1.0 # Minimum value for second moments IzzMax = ( self._xSize / 8.0)**2 # Max value ... clump radius should be < clumpImgSize/8 apFluxes = [] task.run( sourceCat, exposure) # notes that this is backwards for the new framework for i, source in enumerate(sourceCat): if source.getCentroidFlag(): continue x, y = source.getX(), source.getY() apFluxes.append(source.getApFlux()) val = mpsfImage.getImage().get(int(x) + width, int(y) + height) psfClumpIxx = source.getIxx() psfClumpIxy = source.getIxy() psfClumpIyy = source.getIyy() if display: if i == 0: ds9.pan(x, y, frame=frame) ds9.dot("+", x, y, ctype=ds9.YELLOW, frame=frame) ds9.dot("@:%g,%g,%g" % (psfClumpIxx, psfClumpIxy, psfClumpIyy), x, y, ctype=ds9.YELLOW, frame=frame) if psfClumpIxx < IzzMin or psfClumpIyy < IzzMin: psfClumpIxx = max(psfClumpIxx, IzzMin) psfClumpIyy = max(psfClumpIyy, IzzMin) if display: ds9.dot("@:%g,%g,%g" % (psfClumpIxx, psfClumpIxy, psfClumpIyy), x, y, ctype=ds9.RED, frame=frame) det = psfClumpIxx * psfClumpIyy - psfClumpIxy * psfClumpIxy try: a, b, c = psfClumpIyy / det, -psfClumpIxy / det, psfClumpIxx / det except ZeroDivisionError: a, b, c = 1e4, 0, 1e4 clumps.append( Clump(peak=val, x=x, y=y, a=a, b=b, c=c, ixx=psfClumpIxx, ixy=psfClumpIxy, iyy=psfClumpIyy)) if len(clumps) == 0: msg = "Failed to determine center of PSF clump" if e: msg += ": %s" % e raise RuntimeError(msg) # if it's all we got return it if len(clumps) == 1: return clumps # which clump is the best? # if we've undistorted the moments, stars should only have 1 clump # use the apFlux from the clump measurement, and take the highest # ... this clump has more psf star candidate neighbours than the others. # get rid of any that are huge, and thus poorly defined goodClumps = [] for clump in clumps: if clump.ixx < IzzMax and clump.iyy < IzzMax: goodClumps.append(clump) # if culling > IzzMax cost us all clumps, we'll have to take what we have if len(goodClumps) == 0: goodClumps = clumps # use the 'brightest' clump iBestClump = numpy.argsort(apFluxes)[0] clumps = [clumps[iBestClump]] return clumps
def selectStars(self, exposure, sourceCat, matches=None): """!Return a list of PSF candidates that represent likely stars A list of PSF candidates may be used by a PSF fitter to construct a PSF. @param[in] exposure the exposure containing the sources @param[in] sourceCat catalog of sources that may be stars (an lsst.afw.table.SourceCatalog) @param[in] matches astrometric matches; ignored by this star selector @return an lsst.pipe.base.Struct containing: - starCat catalog of selected stars (a subset of sourceCat) """ import lsstDebug display = lsstDebug.Info(__name__).display isGoodSource = CheckSource(sourceCat.getTable(), self.config.badFlags, self.config.fluxLim, self.config.fluxMax) detector = exposure.getDetector() mi = exposure.getMaskedImage() # # Create an Image of Ixx v. Iyy, i.e. a 2-D histogram # # Use stats on our Ixx/yy values to determine the xMax/yMax range for clump image iqqList = [] for s in sourceCat: ixx, iyy = s.getIxx(), s.getIyy() # ignore NaN and unrealistically large values if (ixx == ixx and ixx < self.config.histMomentMax and iyy == iyy and iyy < self.config.histMomentMax and isGoodSource(s)): iqqList.append(s.getIxx()) iqqList.append(s.getIyy()) stat = afwMath.makeStatistics( iqqList, afwMath.MEANCLIP | afwMath.STDEVCLIP | afwMath.MAX) iqqMean = stat.getValue(afwMath.MEANCLIP) iqqStd = stat.getValue(afwMath.STDEVCLIP) iqqMax = stat.getValue(afwMath.MAX) iqqLimit = max(iqqMean + self.config.histMomentClip * iqqStd, self.config.histMomentMaxMultiplier * iqqMean) # if the max value is smaller than our range, use max as the limit, but don't go below N*mean if iqqLimit > iqqMax: iqqLimit = max(self.config.histMomentMinMultiplier * iqqMean, iqqMax) psfHist = _PsfShapeHistogram(detector=detector, xSize=self.config.histSize, ySize=self.config.histSize, ixxMax=iqqLimit, iyyMax=iqqLimit) if display: frame = 0 ds9.mtv(mi, frame=frame, title="PSF candidates") ctypes = [] for source in sourceCat: good = isGoodSource(source) if good: notRejected = psfHist.insert(source) if display: if good: if notRejected: ctypes.append(ds9.GREEN) # good else: ctypes.append(ds9.MAGENTA) # rejected else: ctypes.append(ds9.RED) # bad if display: with ds9.Buffering(): for source, ctype in zip(sourceCat, ctypes): ds9.dot("o", source.getX() - mi.getX0(), source.getY() - mi.getY0(), frame=frame, ctype=ctype) clumps = psfHist.getClumps(display=display) # # Go through and find all the PSF-like objects # # We'll split the image into a number of cells, each of which contributes only # one PSF candidate star # starCat = SourceCatalog(sourceCat.table) pixToTanXYTransform = None if detector is not None: tanSys = detector.makeCameraSys(TAN_PIXELS) pixToTanXYTransform = detector.getTransformMap().get(tanSys) # psf candidate shapes must lie within this many RMS of the average shape # N.b. if Ixx == Iyy, Ixy = 0 the criterion is # dx^2 + dy^2 < self.config.clumpNSigma*(Ixx + Iyy) == 2*self.config.clumpNSigma*Ixx for source in sourceCat: if not isGoodSource(source): continue Ixx, Ixy, Iyy = source.getIxx(), source.getIxy(), source.getIyy() if pixToTanXYTransform: p = afwGeom.Point2D(source.getX(), source.getY()) linTransform = pixToTanXYTransform.linearizeForwardTransform( p).getLinear() m = Quadrupole(Ixx, Iyy, Ixy) m.transform(linTransform) Ixx, Iyy, Ixy = m.getIxx(), m.getIyy(), m.getIxy() x, y = psfHist.momentsToPixel(Ixx, Iyy) for clump in clumps: dx, dy = (x - clump.x), (y - clump.y) if math.sqrt(clump.a * dx * dx + 2 * clump.b * dx * dy + clump.c * dy * dy) < 2 * self.config.clumpNSigma: # A test for > would be confused by NaN if not isGoodSource(source): continue try: psfCandidate = algorithmsLib.makePsfCandidate( source, exposure) # The setXXX methods are class static, but it's convenient to call them on # an instance as we don't know Exposure's pixel type # (and hence psfCandidate's exact type) if psfCandidate.getWidth() == 0: psfCandidate.setBorderWidth( self.config.borderWidth) psfCandidate.setWidth(self.config.kernelSize + 2 * self.config.borderWidth) psfCandidate.setHeight(self.config.kernelSize + 2 * self.config.borderWidth) im = psfCandidate.getMaskedImage().getImage() if not numpy.isfinite( afwMath.makeStatistics( im, afwMath.MAX).getValue()): continue starCat.append(source) if display: ds9.dot("o", source.getX() - mi.getX0(), source.getY() - mi.getY0(), size=4, frame=frame, ctype=ds9.CYAN) except Exception as err: self.log.error("Failed on source %s: %s" % (source.getId(), err)) break return Struct(starCat=starCat, )
def getSchemaCatalogs(self): # Customize schema dataset retrieval for CmdLineTask return {self.config.output: SourceCatalog(self.schema)}
def getInitOutputDatasets(self): # Customize init output dataset retrieval for PipelineTask. return {"outputSchema": SourceCatalog(self.schema)}
def run(self, images, ref, replacers, imageId): """Process coadds from all bands for a single patch. This method should not add or modify self. So far all children are using this exact code so leaving it here for now. If we specialize a lot, might make a processor its own object Parameters ---------- images : `dict` of `lsst.afw.image.ExposureF` Coadd images and associated metadata, keyed by filter name. ref : `lsst.afw.table.SourceCatalog` A catalog with one record for each object, containing "best" measurements across all bands. replacers : `dict` of `lsst.meas.base.NoiseReplacer`, optional A dictionary of `~lsst.meas.base.NoiseReplacer` objects that can be used to insert and remove deblended pixels for each object. When not `None`, all detected pixels in ``images`` will have *already* been replaced with noise, and this *must* be used to restore objects one at a time. imageId : `int` Unique ID for this unit of data. Should be used (possibly indirectly) to seed random numbers. Returns ------- results : `lsst.pipe.base.Struct` Struct with (at least) an `output` attribute that is a catalog to be written as ``self.config.output``. """ if len(images) != len(self.config.filters): self.log.info( 'Number of filters does not match the list of images given. Skipping' ) return None tm0 = time.time() nproc = 0 #import pdb;pdb.set_trace() # Make an empty catalog output = SourceCatalog(self.schema) # Add mostly-empty rows to it, copying IDs from the ref catalog. output.extend(ref, mapper=self.mapper) min_index = self.config.start_index if self.config.num_to_process is None: max_index = len(ref) else: max_index = self.config.start_index + self.config.num_to_process #for n, (refRecord) in enumerate(zip(ref)): for n, (refRecord, outRecord) in enumerate(zip(ref, output)): if n < min_index or n >= max_index: continue if refRecord.get('deblend_nChild') != 0: outRecord.set(self.flag, 1) outRecord.set(self.parent_flag, 1) continue #outRecord = output.table.copyRecord(refRecord, self.mapper) #output._append(outRecord) self.log.info('index: %06d/%06d' % (n, max_index)) nproc += 1 outRecord.setFootprint( None) # copied from ref; don't need to write these again # Insert the deblended pixels for just this object into all images. for r in replacers.values(): r.insertSource(refRecord.getId()) try: kgals = self.buildKGalaxy(refRecord, images) kc = KColorGalaxy(self.bfd, kgals) except Exception as e: kc = None if kc is None: outRecord.set(self.flag, 1) continue dx, badcentering, msg = kc.recenter(self.config.weight_sigma) if badcentering: self.log.info('Bad centering %s', msg) outRecord.set(self.flag, 1) outRecord.set(self.centroid_flag, 1) dx = [0, 0] mom, cov = kc.get_moment(dx[0], dx[1], True) mom_even = mom.m mom_odd = mom.xy cov_even = cov.m cov_odd = cov.xy cov_even_save = [] cov_odd_save = [] for ii in range(cov_even.shape[0]): cov_even_save.extend(cov_even[ii][ii:]) for ii in range(cov_odd.shape[0]): cov_odd_save.extend(cov_odd[ii][ii:]) outRecord.set(self.even, np.array(mom_even, dtype=np.float32)) outRecord.set(self.odd, np.array(mom_odd, dtype=np.float32)) outRecord.set(self.cov_even, np.array(cov_even_save, dtype=np.float32)) outRecord.set(self.cov_odd, np.array(cov_odd_save, dtype=np.float32)) outRecord.set(self.shift, np.array([dx[0], dx[1]], dtype=np.float32)) # Remove the deblended pixels for this object so we can process the next one. for r in replacers.values(): r.removeSource(refRecord.getId()) del kgals del kc del mom del cov # Restore all original pixels in the images. if replacers is not None: for r in replacers.values(): r.end() tm = time.time() - tm0 self.log.info('time: %g min' % (tm / 60.0)) self.log.info('time per: %g sec' % (tm / nproc)) return Struct(output=output[min_index:max_index])
def getGalaxy(rootdir, visit, ccd, tol): """Get list of sources which agree in position with fake ones with tol """ # Call the butler butler = dafPersist.Butler(rootdir) dataId = {'visit': visit, 'ccd': ccd} tol = float(tol) # Get the source catalog and metadata sources = butler.get('src', dataId) cal_md = butler.get('calexp_md', dataId) # Get the X, Y locations of objects on the CCD srcX, srcY = sources.getX(), sources.getY() # Get the zeropoint zeropoint = (2.5 * np.log10(cal_md.getScalar("FLUXMAG0"))) # Get the parent ID parentID = sources.get('parent') # Check the star/galaxy separation extendClass = sources.get('classification.extendedness') # Get the nChild nChild = sources.get('deblend.nchild') # For Galaxies: Get these parameters # 1. Get the Kron flux and its error fluxKron, ferrKron = sources.get('flux.kron'), sources.get('flux.kron.err') magKron = (zeropoint - 2.5 * np.log10(fluxKron)) merrKron = (2.5 / np.log(10) * (ferrKron / fluxKron)) # X, Y locations of the fake galaxies fakeList = collections.defaultdict(tuple) # Regular Expression # Search for keywords like FAKE12 fakename = re.compile('FAKE([0-9]+)') # Go through all the keywords counts = 0 for card in cal_md.names(): # To see if the card matches the pattern m = fakename.match(card) if m is not None: # Get the X,Y location for fake object x, y = list(map(float, (cal_md.getScalar(card)).split(','))) # Get the ID or index of the fake object fakeID = int(m.group(1)) fakeList[counts] = [fakeID, x, y] counts += 1 # Match the fake object to the source list srcIndex = collections.defaultdict(list) for fid, fcoord in fakeList.items(): separation = np.sqrt(np.abs(srcX-fcoord[1])**2 + np.abs(srcY-fcoord[2])**2) matched = (separation <= tol) matchId = np.where(matched)[0] matchSp = separation[matchId] sortId = [matchId for (matchSp, matchId) in sorted(zip(matchSp, matchId))] # DEBUG: # print fid, fcoord, matchId # print sortId, sorted(matchSp), matchId # Select the index of all matched object srcIndex[fid] = sortId # Return the source list mapper = SchemaMapper(sources.schema) mapper.addMinimalSchema(sources.schema) newSchema = mapper.getOutputSchema() newSchema.addField('fakeId', type=int, doc='id of fake source matched to position') srcList = SourceCatalog(newSchema) srcList.reserve(sum([len(s) for s in srcIndex.values()])) # Return a list of interesting parameters srcParam = [] nFake = 0 for matchIndex in srcIndex.values(): # Check if there is a match if len(matchIndex) > 0: # Only select the one with the smallest separation # TODO: actually get the one with minimum separation ss = matchIndex[0] fakeObj = fakeList[nFake] diffX = srcX[ss] - fakeObj[1] diffY = srcY[ss] - fakeObj[2] paramList = (fakeObj[0], fakeObj[1], fakeObj[2], magKron[ss], merrKron[ss], diffX, diffY, parentID[ss], nChild[ss], extendClass[ss]) srcParam.append(paramList) else: fakeObj = fakeList[nFake] paramList = (fakeObj[0], fakeObj[1], fakeObj[2], 0, 0, -1, -1, -1, -1, -1) srcParam.append(paramList) # Go to another fake object nFake += 1 # Make a numpy record array srcParam = np.array(srcParam, dtype=[('fakeID', int), ('fakeX', float), ('fakeY', float), ('magKron', float), ('errKron', float), ('diffX', float), ('diffY', float), ('parentID', int), ('nChild', int), ('extendClass', float)]) return srcIndex, srcParam, srcList, zeropoint
def selectStars(self, exposure, sourceCat, matches=None): """Select sources for Kernel candidates @param[in] exposure the exposure containing the sources @param[in] sourceCat catalog of sources that may be stars (an lsst.afw.table.SourceCatalog) @param[in] matches a match vector as produced by meas_astrom; required (defaults to None to match the StarSelector API and improve error handling) @return an lsst.pipe.base.Struct containing: - starCat a list of sources to be used as kernel candidates """ import lsstDebug display = lsstDebug.Info(__name__).display displayExposure = lsstDebug.Info(__name__).displayExposure pauseAtEnd = lsstDebug.Info(__name__).pauseAtEnd if matches is None: raise RuntimeError("DiaCatalogSourceSelector requires matches") mi = exposure.getMaskedImage() if display: if displayExposure: ds9.mtv(mi, title="Kernel candidates", frame=lsstDebug.frame) # # Look for flags in each Source # isGoodSource = CheckSource(sourceCat, self.config.fluxLim, self.config.fluxMax, self.config.badFlags) # # Go through and find all the acceptable candidates in the catalogue # starCat = SourceCatalog(sourceCat.schema) if display and displayExposure: symbs = [] ctypes = [] doColorCut = True refSchema = matches[0][0].schema rRefFluxField = measAlg.getRefFluxField(refSchema, "r") gRefFluxField = measAlg.getRefFluxField(refSchema, "g") for ref, source, d in matches: if not isGoodSource(source): if display and displayExposure: symbs.append("+") ctypes.append(ds9.RED) else: isStar = not ref.get("resolved") isVar = not ref.get("photometric") gMag = None rMag = None if doColorCut: try: gMag = -2.5 * np.log10(ref.get(gRefFluxField)) rMag = -2.5 * np.log10(ref.get(rRefFluxField)) except KeyError: self.log.warn("Cannot cut on color info; fields 'g' and 'r' do not exist") doColorCut = False isRightColor = True else: isRightColor = (gMag-rMag) >= self.config.grMin and (gMag-rMag) <= self.config.grMax isRightType = (self.config.selectStar and isStar) or (self.config.selectGalaxy and not isStar) isRightVar = (self.config.includeVariable) or (self.config.includeVariable is isVar) if isRightType and isRightVar and isRightColor: starCat.append(source) if display and displayExposure: symbs.append("+") ctypes.append(ds9.GREEN) elif display and displayExposure: symbs.append("o") ctypes.append(ds9.BLUE) if display and displayExposure: with ds9.Buffering(): for (ref, source, d), symb, ctype in zip(matches, symbs, ctypes): if display and displayExposure: ds9.dot(symb, source.getX() - mi.getX0(), source.getY() - mi.getY0(), size=4, ctype=ctype, frame=lsstDebug.frame) if display: lsstDebug.frame += 1 if pauseAtEnd: input("Continue? y[es] p[db] ") return Struct( starCat=starCat, )
def calculateThreshold(self, exposure, seed, sigma=None): """Calculate new threshold This is the main functional addition to the vanilla `SourceDetectionTask`. We identify sky objects and perform forced PSF photometry on them. Using those PSF flux measurements and estimated errors, we set the threshold so that the stdev of the measurements matches the median estimated error. Parameters ---------- exposure : `lsst.afw.image.Exposure` Exposure on which we're detecting sources. seed : `int` RNG seed to use for finding sky objects. sigma : `float`, optional Gaussian sigma of smoothing kernel; if not provided, will be deduced from the exposure's PSF. Returns ------- result : `lsst.pipe.base.Struct` Result struct with components: - ``multiplicative``: multiplicative factor to be applied to the configured detection threshold (`float`). - ``additive``: additive factor to be applied to the background level (`float`). """ # Make a catalog of sky objects fp = self.skyObjects.run(exposure.maskedImage.mask, seed) skyFootprints = FootprintSet(exposure.getBBox()) skyFootprints.setFootprints(fp) table = SourceTable.make(self.skyMeasurement.schema) catalog = SourceCatalog(table) catalog.reserve(len(skyFootprints.getFootprints())) skyFootprints.makeSources(catalog) key = catalog.getCentroidKey() for source in catalog: peaks = source.getFootprint().getPeaks() assert len(peaks) == 1 source.set(key, peaks[0].getF()) source.updateCoord(exposure.getWcs()) # Forced photometry on sky objects self.skyMeasurement.run(catalog, exposure, catalog, exposure.getWcs()) # Calculate new threshold fluxes = catalog["base_PsfFlux_instFlux"] area = catalog["base_PsfFlux_area"] bg = catalog["base_LocalBackground_instFlux"] good = (~catalog["base_PsfFlux_flag"] & ~catalog["base_LocalBackground_flag"] & np.isfinite(fluxes) & np.isfinite(area) & np.isfinite(bg)) if good.sum() < self.config.minNumSources: self.log.warn("Insufficient good flux measurements (%d < %d) for dynamic threshold calculation", good.sum(), self.config.minNumSources) return Struct(multiplicative=1.0, additive=0.0) bgMedian = np.median((fluxes/area)[good]) lq, uq = np.percentile((fluxes - bg*area)[good], [25.0, 75.0]) stdevMeas = 0.741*(uq - lq) medianError = np.median(catalog["base_PsfFlux_instFluxErr"][good]) return Struct(multiplicative=medianError/stdevMeas, additive=bgMedian)
def selectStars(self, exposure, sourceCat, matches=None): """!Return a list of PSF candidates that represent likely stars A list of PSF candidates may be used by a PSF fitter to construct a PSF. \param[in] exposure the exposure containing the sources \param[in] sourceCat catalog of sources that may be stars (an lsst.afw.table.SourceCatalog) \param[in] matches astrometric matches; ignored by this star selector \return an lsst.pipe.base.Struct containing: - starCat catalog of selected stars (a subset of sourceCat) """ import lsstDebug display = lsstDebug.Info(__name__).display displayExposure = lsstDebug.Info( __name__).displayExposure # display the Exposure + spatialCells plotMagSize = lsstDebug.Info( __name__).plotMagSize # display the magnitude-size relation dumpData = lsstDebug.Info( __name__).dumpData # dump data to pickle file? detector = exposure.getDetector() pixToTanXYTransform = None if detector is not None: tanSys = detector.makeCameraSys(TAN_PIXELS) pixToTanXYTransform = detector.getTransformMap().get(tanSys) # # Look at the distribution of stars in the magnitude-size plane # flux = sourceCat.get(self.config.sourceFluxField) xx = numpy.empty(len(sourceCat)) xy = numpy.empty_like(xx) yy = numpy.empty_like(xx) for i, source in enumerate(sourceCat): Ixx, Ixy, Iyy = source.getIxx(), source.getIxy(), source.getIyy() if pixToTanXYTransform: p = afwGeom.Point2D(source.getX(), source.getY()) linTransform = pixToTanXYTransform.linearizeForwardTransform( p).getLinear() m = Quadrupole(Ixx, Iyy, Ixy) m.transform(linTransform) Ixx, Iyy, Ixy = m.getIxx(), m.getIyy(), m.getIxy() xx[i], xy[i], yy[i] = Ixx, Ixy, Iyy width = numpy.sqrt(0.5 * (xx + yy)) bad = reduce(lambda x, y: numpy.logical_or(x, sourceCat.get(y)), self.config.badFlags, False) bad = numpy.logical_or(bad, flux < self.config.fluxMin) bad = numpy.logical_or(bad, numpy.logical_not(numpy.isfinite(width))) bad = numpy.logical_or(bad, numpy.logical_not(numpy.isfinite(flux))) bad = numpy.logical_or(bad, width < self.config.widthMin) bad = numpy.logical_or(bad, width > self.config.widthMax) if self.config.fluxMax > 0: bad = numpy.logical_or(bad, flux > self.config.fluxMax) good = numpy.logical_not(bad) if not numpy.any(good): raise RuntimeError( "No objects passed our cuts for consideration as psf stars") mag = -2.5 * numpy.log10(flux[good]) width = width[good] # # Look for the maximum in the size histogram, then search upwards for the minimum that separates # the initial peak (of, we presume, stars) from the galaxies # if dumpData: import os import pickle as pickle _ii = 0 while True: pickleFile = os.path.expanduser( os.path.join("~", "widths-%d.pkl" % _ii)) if not os.path.exists(pickleFile): break _ii += 1 with open(pickleFile, "wb") as fd: pickle.dump(mag, fd, -1) pickle.dump(width, fd, -1) centers, clusterId = _kcenters( width, nCluster=4, useMedian=True, widthStdAllowed=self.config.widthStdAllowed) if display and plotMagSize: fig = plot( mag, width, centers, clusterId, magType=self.config.sourceFluxField.split(".")[-1].title(), marker="+", markersize=3, markeredgewidth=None, ltype=':', clear=True) else: fig = None clusterId = _improveCluster( width, centers, clusterId, nsigma=self.config.nSigmaClip, widthStdAllowed=self.config.widthStdAllowed) if display and plotMagSize: plot(mag, width, centers, clusterId, marker="x", markersize=3, markeredgewidth=None, clear=False) stellar = (clusterId == 0) # # We know enough to plot, if so requested # frame = 0 if fig: if display and displayExposure: ds9.mtv(exposure.getMaskedImage(), frame=frame, title="PSF candidates") global eventHandler eventHandler = EventHandler(fig.get_axes()[0], mag, width, sourceCat.getX()[good], sourceCat.getY()[good], frames=[frame]) fig.show() while True: try: reply = input("continue? [c h(elp) q(uit) p(db)] ").strip() except EOFError: reply = None if not reply: reply = "c" if reply: if reply[0] == "h": print("""\ We cluster the points; red are the stellar candidates and the other colours are other clusters. Points labelled + are rejects from the cluster (only for cluster 0). At this prompt, you can continue with almost any key; 'p' enters pdb, and 'h' prints this text If displayExposure is true, you can put the cursor on a point and hit 'p' to see it in ds9. """) elif reply[0] == "p": import pdb pdb.set_trace() elif reply[0] == 'q': sys.exit(1) else: break if display and displayExposure: mi = exposure.getMaskedImage() with ds9.Buffering(): for i, source in enumerate(sourceCat): if good[i]: ctype = ds9.GREEN # star candidate else: ctype = ds9.RED # not star ds9.dot("+", source.getX() - mi.getX0(), source.getY() - mi.getY0(), frame=frame, ctype=ctype) starCat = SourceCatalog(sourceCat.table) goodSources = [s for g, s in zip(good, sourceCat) if g] for isStellar, source in zip(stellar, goodSources): if isStellar: starCat.append(source) return Struct(starCat=starCat, )
def _loadAndMatchCatalogs(self, repo, dataIds, matchRadius): """Load data from specific visit. Match with reference. Parameters ---------- repo : string The repository. This is generally the directory on disk that contains the repository and mapper. dataIds : list of dict List of `butler` data IDs of Image catalogs to compare to reference. The `calexp` cpixel image is needed for the photometric calibration. matchRadius : afwGeom.Angle(), optional Radius for matching. Default is 1 arcsecond. Returns ------- afw.table.GroupView An object of matched catalog. """ # Following # https://github.com/lsst/afw/blob/tickets/DM-3896/examples/repeatability.ipynb butler = dafPersist.Butler(repo) dataset = 'src' # 2016-02-08 MWV: # I feel like I could be doing something more efficient with # something along the lines of the following: # dataRefs = [dafPersist.ButlerDataRef(butler, vId) for vId in dataIds] ccdKeyName = getCcdKeyName(dataIds[0]) schema = butler.get(dataset + "_schema", immediate=True).schema mapper = SchemaMapper(schema) mapper.addMinimalSchema(schema) mapper.addOutputField(Field[float]('base_PsfFlux_snr', 'PSF flux SNR')) mapper.addOutputField(Field[float]('base_PsfFlux_mag', 'PSF magnitude')) mapper.addOutputField(Field[float]('base_PsfFlux_magerr', 'PSF magnitude uncertainty')) newSchema = mapper.getOutputSchema() # Create an object that matches multiple catalogs with same schema mmatch = MultiMatch(newSchema, dataIdFormat={ 'visit': np.int32, ccdKeyName: np.int32 }, radius=matchRadius, RecordClass=SimpleRecord) # create the new extented source catalog srcVis = SourceCatalog(newSchema) for vId in dataIds: try: calexpMetadata = butler.get("calexp_md", vId, immediate=True) except (FitsError, dafPersist.NoResults) as e: print(e) print("Could not open calibrated image file for ", vId) print("Skipping %s " % repr(vId)) continue except TypeError as te: # DECam images that haven't been properly reformatted # can trigger a TypeError because of a residual FITS header # LTV2 which is a float instead of the expected integer. # This generates an error of the form: # # lsst::pex::exceptions::TypeError: 'LTV2 has mismatched type' # # See, e.g., DM-2957 for details. print(te) print("Calibration image header information malformed.") print("Skipping %s " % repr(vId)) continue calib = afwImage.Calib(calexpMetadata) oldSrc = butler.get('src', vId, immediate=True) print( len(oldSrc), "sources in ccd %s visit %s" % (vId[ccdKeyName], vId["visit"])) # create temporary catalog tmpCat = SourceCatalog(SourceCatalog(newSchema).table) tmpCat.extend(oldSrc, mapper=mapper) tmpCat['base_PsfFlux_snr'][:] = tmpCat['base_PsfFlux_flux'] \ / tmpCat['base_PsfFlux_fluxSigma'] with afwImageUtils.CalibNoThrow(): _ = calib.getMagnitude(tmpCat['base_PsfFlux_flux'], tmpCat['base_PsfFlux_fluxSigma']) tmpCat['base_PsfFlux_mag'][:] = _[0] tmpCat['base_PsfFlux_magerr'][:] = _[1] srcVis.extend(tmpCat, False) mmatch.add(catalog=tmpCat, dataId=vId) # Complete the match, returning a catalog that includes # all matched sources with object IDs that can be used to group them. matchCat = mmatch.finish() # Create a mapping object that allows the matches to be manipulated # as a mapping of object ID to catalog of sources. allMatches = GroupView.build(matchCat) return allMatches
def combineWithForce(meas, force): """Combine the meas and forced_src catalogs.""" if len(meas) != len(force): raise Exception("# Meas and Forced_src catalogs should have " "the same size!") mapper = SchemaMapper(meas.schema) mapper.addMinimalSchema(meas.schema) newSchema = mapper.getOutputSchema() # Add new fields newSchema.addField('force_deblend_nChild', type=np.int32) newSchema.addField('force_base_ClassificationExtendedness_value', type=float) newSchema.addField('force_ext_photometryKron_KronFlux_instFlux', type=float) newSchema.addField('force_ext_photometryKron_KronFlux_instFluxErr', type=float) newSchema.addField('force_base_PsfFlux_instFlux', type=float) newSchema.addField('force_base_PsfFlux_instFluxErr', type=float) newSchema.addField('force_ext_photometryKron_KronFlux_apCorr', type=float) newSchema.addField('force_ext_photometryKron_KronFlux_apCorrErr', type=float) newSchema.addField('force_base_PsfFlux_apCorr', type=float) newSchema.addField('force_base_PsfFlux_apCorrErr', type=float) newSchema.addField('force_modelfit_CModel_instFlux', type=float) newSchema.addField('force_modelfit_CModel_instFluxErr', type=float) newSchema.addField('force_modelfit_CModel_fracDev', type=float) newSchema.addField('force_modelfit_CModel_exp_instFlux', type=float) newSchema.addField('force_modelfit_CModel_exp_instFluxErr', type=float) newSchema.addField('force_modelfit_CModel_dev_instFlux', type=float) newSchema.addField('force_modelfit_CModel_dev_instFluxErr', type=float) newSchema.addField('force_modelfit_CModel_apCorr', type=float) newSchema.addField('force_modelfit_CModel_apCorrErr', type=float) newSchema.addField('force_modelfit_CModel_exp_apCorr', type=float) newSchema.addField('force_modelfit_CModel_exp_apCorrErr', type=float) newSchema.addField('force_modelfit_CModel_dev_apCorr', type=float) newSchema.addField('force_modelfit_CModel_dev_apCorrErr', type=float) newCols = [ 'deblend_nChild', 'base_ClassificationExtendedness_value', 'ext_photometryKron_KronFlux_instFlux', 'ext_photometryKron_KronFlux_instFluxErr', 'base_PsfFlux_instFlux', 'base_PsfFlux_instFluxErr', 'ext_photometryKron_KronFlux_apCorr', 'ext_photometryKron_KronFlux_apCorrErr', 'base_PsfFlux_apCorr', 'base_PsfFlux_apCorrErr', 'modelfit_CModel_instFlux', 'modelfit_CModel_instFluxErr', 'modelfit_CModel_exp_apCorr', 'modelfit_CModel_exp_apCorrErr', 'modelfit_CModel_exp_instFlux', 'modelfit_CModel_exp_instFlux', 'modelfit_CModel_exp_apCorr', 'modelfit_CModel_exp_apCorrErr', 'modelfit_CModel_dev_instFlux', 'modelfit_CModel_dev_instFluxErr', 'modelfit_CModel_dev_apCorr', 'modelfit_CModel_dev_apCorrErr', 'modelfit_CModel_fracDev' ] measAlias = meas.schema.getAliasMap() newAlias = newSchema.getAliasMap() for aliasKey in measAlias.keys(): newAlias.set(aliasKey, measAlias[aliasKey]) combSrc = SourceCatalog(newSchema) combSrc.extend(meas, mapper=mapper) for key in newCols: combSrc['force_' + key][:] = force[key][:] return combSrc
def _loadAndMatchCatalogs(self, repo, dataIds, matchRadius, useJointCal=False): """Load data from specific visit. Match with reference. Parameters ---------- repo : string or Butler A Butler or a repository URL that can be used to construct one dataIds : list of dict List of `butler` data IDs of Image catalogs to compare to reference. The `calexp` cpixel image is needed for the photometric calibration. matchRadius : afwGeom.Angle(), optional Radius for matching. Default is 1 arcsecond. Returns ------- afw.table.GroupView An object of matched catalog. """ # Following # https://github.com/lsst/afw/blob/tickets/DM-3896/examples/repeatability.ipynb if isinstance(repo, dafPersist.Butler): butler = repo else: butler = dafPersist.Butler(repo) dataset = 'src' # 2016-02-08 MWV: # I feel like I could be doing something more efficient with # something along the lines of the following: # dataRefs = [dafPersist.ButlerDataRef(butler, vId) for vId in dataIds] ccdKeyName = getCcdKeyName(dataIds[0]) schema = butler.get(dataset + "_schema").schema mapper = SchemaMapper(schema) mapper.addMinimalSchema(schema) mapper.addOutputField(Field[float]('base_PsfFlux_snr', 'PSF flux SNR')) mapper.addOutputField(Field[float]('base_PsfFlux_mag', 'PSF magnitude')) mapper.addOutputField(Field[float]('base_PsfFlux_magErr', 'PSF magnitude uncertainty')) newSchema = mapper.getOutputSchema() newSchema.setAliasMap(schema.getAliasMap()) # Create an object that matches multiple catalogs with same schema mmatch = MultiMatch(newSchema, dataIdFormat={ 'visit': np.int32, ccdKeyName: np.int32 }, radius=matchRadius, RecordClass=SimpleRecord) # create the new extented source catalog srcVis = SourceCatalog(newSchema) for vId in dataIds: if useJointCal: try: photoCalib = butler.get("photoCalib", vId) except (FitsError, dafPersist.NoResults) as e: print(e) print("Could not open photometric calibration for ", vId) print("Skipping %s " % repr(vId)) continue try: md = butler.get("wcs_md", vId) wcs = afwImage.makeWcs(md) except (FitsError, dafPersist.NoResults) as e: print(e) print("Could not open updated WCS for ", vId) print("Skipping %s " % repr(vId)) continue else: try: calexpMetadata = butler.get("calexp_md", vId) except (FitsError, dafPersist.NoResults) as e: print(e) print("Could not open calibrated image file for ", vId) print("Skipping %s " % repr(vId)) continue except TypeError as te: # DECam images that haven't been properly reformatted # can trigger a TypeError because of a residual FITS header # LTV2 which is a float instead of the expected integer. # This generates an error of the form: # # lsst::pex::exceptions::TypeError: 'LTV2 has mismatched type' # # See, e.g., DM-2957 for details. print(te) print("Calibration image header information malformed.") print("Skipping %s " % repr(vId)) continue calib = afwImage.Calib(calexpMetadata) # We don't want to put this above the first "if useJointCal block" # because we need to use the first `butler.get` above to quickly # catch data IDs with no usable outputs. try: # HSC supports these flags, which dramatically improve I/O # performance; support for other cameras is DM-6927. oldSrc = butler.get('src', vId, flags=SOURCE_IO_NO_FOOTPRINTS) except: oldSrc = butler.get('src', vId) print( len(oldSrc), "sources in ccd %s visit %s" % (vId[ccdKeyName], vId["visit"])) # create temporary catalog tmpCat = SourceCatalog(SourceCatalog(newSchema).table) tmpCat.extend(oldSrc, mapper=mapper) tmpCat['base_PsfFlux_snr'][:] = tmpCat['base_PsfFlux_flux'] \ / tmpCat['base_PsfFlux_fluxSigma'] if useJointCal: for record in tmpCat: record.updateCoord(wcs) photoCalib.instFluxToMagnitude(tmpCat, "base_PsfFlux", "base_PsfFlux") else: with afwImageUtils.CalibNoThrow(): _ = calib.getMagnitude(tmpCat['base_PsfFlux_flux'], tmpCat['base_PsfFlux_fluxSigma']) tmpCat['base_PsfFlux_mag'][:] = _[0] tmpCat['base_PsfFlux_magErr'][:] = _[1] srcVis.extend(tmpCat, False) mmatch.add(catalog=tmpCat, dataId=vId) # Complete the match, returning a catalog that includes # all matched sources with object IDs that can be used to group them. matchCat = mmatch.finish() # Create a mapping object that allows the matches to be manipulated # as a mapping of object ID to catalog of sources. allMatches = GroupView.build(matchCat) return allMatches
def match_catalogs(inputs, photoCalibs, astromCalibs, vIds, matchRadius, apply_external_wcs=False, logger=None): schema = inputs[0].schema mapper = SchemaMapper(schema) mapper.addMinimalSchema(schema) mapper.addOutputField(Field[float]('base_PsfFlux_snr', 'PSF flux SNR')) mapper.addOutputField(Field[float]('base_PsfFlux_mag', 'PSF magnitude')) mapper.addOutputField(Field[float]('base_PsfFlux_magErr', 'PSF magnitude uncertainty')) # Needed because addOutputField(... 'slot_ModelFlux_mag') will add a field with that literal name aliasMap = schema.getAliasMap() # Possibly not needed since base_GaussianFlux is the default, but this ought to be safe modelName = aliasMap['slot_ModelFlux'] if 'slot_ModelFlux' in aliasMap.keys( ) else 'base_GaussianFlux' mapper.addOutputField(Field[float](f'{modelName}_mag', 'Model magnitude')) mapper.addOutputField(Field[float](f'{modelName}_magErr', 'Model magnitude uncertainty')) mapper.addOutputField(Field[float](f'{modelName}_snr', 'Model flux snr')) mapper.addOutputField(Field[float]('e1', 'Source Ellipticity 1')) mapper.addOutputField(Field[float]('e2', 'Source Ellipticity 1')) mapper.addOutputField(Field[float]('psf_e1', 'PSF Ellipticity 1')) mapper.addOutputField(Field[float]('psf_e2', 'PSF Ellipticity 1')) mapper.addOutputField(Field[np.int32]('filt', 'filter code')) newSchema = mapper.getOutputSchema() newSchema.setAliasMap(schema.getAliasMap()) # Create an object that matches multiple catalogs with same schema mmatch = MultiMatch(newSchema, dataIdFormat={ 'visit': np.int32, 'detector': np.int32 }, radius=matchRadius, RecordClass=SimpleRecord) # create the new extended source catalog srcVis = SourceCatalog(newSchema) filter_dict = { 'u': 1, 'g': 2, 'r': 3, 'i': 4, 'z': 5, 'y': 6, 'HSC-U': 1, 'HSC-G': 2, 'HSC-R': 3, 'HSC-I': 4, 'HSC-Z': 5, 'HSC-Y': 6 } # Sort by visit, detector, then filter vislist = [v['visit'] for v in vIds] ccdlist = [v['detector'] for v in vIds] filtlist = [v['band'] for v in vIds] tab_vids = Table([vislist, ccdlist, filtlist], names=['vis', 'ccd', 'filt']) sortinds = np.argsort(tab_vids, order=('vis', 'ccd', 'filt')) for ind in sortinds: oldSrc = inputs[ind] photoCalib = photoCalibs[ind] wcs = astromCalibs[ind] vId = vIds[ind] if logger: logger.debug( f"{len(oldSrc)} sources in ccd {vId['detector']} visit {vId['visit']}" ) # create temporary catalog tmpCat = SourceCatalog(SourceCatalog(newSchema).table) tmpCat.extend(oldSrc, mapper=mapper) filtnum = filter_dict[vId['band']] tmpCat['filt'] = np.repeat(filtnum, len(oldSrc)) tmpCat['base_PsfFlux_snr'][:] = tmpCat['base_PsfFlux_instFlux'] \ / tmpCat['base_PsfFlux_instFluxErr'] if apply_external_wcs and wcs is not None: updateSourceCoords(wcs, tmpCat) photoCalib.instFluxToMagnitude(tmpCat, "base_PsfFlux", "base_PsfFlux") tmpCat['slot_ModelFlux_snr'][:] = ( tmpCat['slot_ModelFlux_instFlux'] / tmpCat['slot_ModelFlux_instFluxErr']) photoCalib.instFluxToMagnitude(tmpCat, "slot_ModelFlux", "slot_ModelFlux") _, psf_e1, psf_e2 = ellipticity_from_cat(oldSrc, slot_shape='slot_PsfShape') _, star_e1, star_e2 = ellipticity_from_cat(oldSrc, slot_shape='slot_Shape') tmpCat['e1'][:] = star_e1 tmpCat['e2'][:] = star_e2 tmpCat['psf_e1'][:] = psf_e1 tmpCat['psf_e2'][:] = psf_e2 srcVis.extend(tmpCat, False) mmatch.add(catalog=tmpCat, dataId=vId) # Complete the match, returning a catalog that includes # all matched sources with object IDs that can be used to group them. matchCat = mmatch.finish() # Create a mapping object that allows the matches to be manipulated # as a mapping of object ID to catalog of sources. # I don't think I can persist a group view, so this may need to be called in a subsequent task # allMatches = GroupView.build(matchCat) return srcVis, matchCat
def _makeDummyCatalog(size): catalog = SourceCatalog(SourceCatalog.Table.makeMinimalSchema()) for i in range(size): catalog.addNew() return catalog
def getSchemaCatalogs(self): """Return a dict of empty catalogs for each catalog dataset produced by this task. """ sourceCat = SourceCatalog(self.schema) sourceCat.getTable().setMetadata(self.algMetadata) return {"icSrc": sourceCat}
def _loadAndMatchCatalogs(repo, dataIds, matchRadius, doApplyExternalPhotoCalib=False, externalPhotoCalibName=None, doApplyExternalSkyWcs=False, externalSkyWcsName=None, skipTEx=False, skipNonSrd=False): """Load data from specific visits and returned a calibrated catalog matched with a reference. Parameters ---------- repo : `str` or `lsst.daf.persistence.Butler` A Butler or a repository URL that can be used to construct one. dataIds : list of dict List of butler data IDs of Image catalogs to compare to reference. The calexp cpixel image is needed for the photometric calibration. matchRadius : `lsst.geom.Angle`, optional Radius for matching. Default is 1 arcsecond. doApplyExternalPhotoCalib : bool, optional Apply external photoCalib to calibrate fluxes. externalPhotoCalibName : str, optional Type of external `PhotoCalib` to apply. Currently supported are jointcal, fgcm, and fgcm_tract. Must be set if doApplyExternalPhotoCalib is True. doApplyExternalSkyWcs : bool, optional Apply external wcs to calibrate positions. externalSkyWcsName : str, optional Type of external `wcs` to apply. Currently supported is jointcal. Must be set if "doApplyExternalWcs" is True. skipTEx : `bool`, optional Skip TEx calculations (useful for older catalogs that don't have PsfShape measurements). skipNonSrd : `bool`, optional Skip any metrics not defined in the LSST SRD; default False. Returns ------- catalog : `lsst.afw.table.SourceCatalog` A new calibrated SourceCatalog. matches : `lsst.afw.table.GroupView` A GroupView of the matched sources. Raises ------ RuntimeError: Raised if "doApplyExternalPhotoCalib" is True and "externalPhotoCalibName" is None, or if "doApplyExternalSkyWcs" is True and "externalSkyWcsName" is None. """ if doApplyExternalPhotoCalib and externalPhotoCalibName is None: raise RuntimeError( "Must set externalPhotoCalibName if doApplyExternalPhotoCalib is True." ) if doApplyExternalSkyWcs and externalSkyWcsName is None: raise RuntimeError( "Must set externalSkyWcsName if doApplyExternalSkyWcs is True.") # Following # https://github.com/lsst/afw/blob/tickets/DM-3896/examples/repeatability.ipynb if isinstance(repo, dafPersist.Butler): butler = repo else: butler = dafPersist.Butler(repo) dataset = 'src' # 2016-02-08 MWV: # I feel like I could be doing something more efficient with # something along the lines of the following: # dataRefs = [dafPersist.ButlerDataRef(butler, vId) for vId in dataIds] ccdKeyName = getCcdKeyName(dataIds[0]) # Hack to support raft and sensor 0,1 IDs as ints for multimatch if ccdKeyName == 'sensor': ccdKeyName = 'raft_sensor_int' for vId in dataIds: vId[ccdKeyName] = raftSensorToInt(vId) schema = butler.get(dataset + "_schema").schema mapper = SchemaMapper(schema) mapper.addMinimalSchema(schema) mapper.addOutputField(Field[float]('base_PsfFlux_snr', 'PSF flux SNR')) mapper.addOutputField(Field[float]('base_PsfFlux_mag', 'PSF magnitude')) mapper.addOutputField(Field[float]('base_PsfFlux_magErr', 'PSF magnitude uncertainty')) if not skipNonSrd: # Needed because addOutputField(... 'slot_ModelFlux_mag') will add a field with that literal name aliasMap = schema.getAliasMap() # Possibly not needed since base_GaussianFlux is the default, but this ought to be safe modelName = aliasMap[ 'slot_ModelFlux'] if 'slot_ModelFlux' in aliasMap.keys( ) else 'base_GaussianFlux' mapper.addOutputField(Field[float](f'{modelName}_mag', 'Model magnitude')) mapper.addOutputField(Field[float](f'{modelName}_magErr', 'Model magnitude uncertainty')) mapper.addOutputField(Field[float](f'{modelName}_snr', 'Model flux snr')) mapper.addOutputField(Field[float]('e1', 'Source Ellipticity 1')) mapper.addOutputField(Field[float]('e2', 'Source Ellipticity 1')) mapper.addOutputField(Field[float]('psf_e1', 'PSF Ellipticity 1')) mapper.addOutputField(Field[float]('psf_e2', 'PSF Ellipticity 1')) newSchema = mapper.getOutputSchema() newSchema.setAliasMap(schema.getAliasMap()) # Create an object that matches multiple catalogs with same schema mmatch = MultiMatch(newSchema, dataIdFormat={ 'visit': np.int32, ccdKeyName: np.int32 }, radius=matchRadius, RecordClass=SimpleRecord) # create the new extented source catalog srcVis = SourceCatalog(newSchema) for vId in dataIds: if not butler.datasetExists('src', vId): print(f'Could not find source catalog for {vId}; skipping.') continue photoCalib = _loadPhotoCalib(butler, vId, doApplyExternalPhotoCalib, externalPhotoCalibName) if photoCalib is None: continue if doApplyExternalSkyWcs: wcs = _loadExternalSkyWcs(butler, vId, externalSkyWcsName) if wcs is None: continue # We don't want to put this above the first _loadPhotoCalib call # because we need to use the first `butler.get` in there to quickly # catch dataIDs with no usable outputs. try: # HSC supports these flags, which dramatically improve I/O # performance; support for other cameras is DM-6927. oldSrc = butler.get('src', vId, flags=SOURCE_IO_NO_FOOTPRINTS) except (OperationalError, sqlite3.OperationalError): oldSrc = butler.get('src', vId) print(len(oldSrc), "sources in ccd %s visit %s" % (vId[ccdKeyName], vId["visit"])) # create temporary catalog tmpCat = SourceCatalog(SourceCatalog(newSchema).table) tmpCat.extend(oldSrc, mapper=mapper) tmpCat['base_PsfFlux_snr'][:] = tmpCat['base_PsfFlux_instFlux'] \ / tmpCat['base_PsfFlux_instFluxErr'] if doApplyExternalSkyWcs: afwTable.updateSourceCoords(wcs, tmpCat) photoCalib.instFluxToMagnitude(tmpCat, "base_PsfFlux", "base_PsfFlux") if not skipNonSrd: tmpCat['slot_ModelFlux_snr'][:] = ( tmpCat['slot_ModelFlux_instFlux'] / tmpCat['slot_ModelFlux_instFluxErr']) photoCalib.instFluxToMagnitude(tmpCat, "slot_ModelFlux", "slot_ModelFlux") if not skipTEx: _, psf_e1, psf_e2 = ellipticity_from_cat( oldSrc, slot_shape='slot_PsfShape') _, star_e1, star_e2 = ellipticity_from_cat(oldSrc, slot_shape='slot_Shape') tmpCat['e1'][:] = star_e1 tmpCat['e2'][:] = star_e2 tmpCat['psf_e1'][:] = psf_e1 tmpCat['psf_e2'][:] = psf_e2 srcVis.extend(tmpCat, False) mmatch.add(catalog=tmpCat, dataId=vId) # Complete the match, returning a catalog that includes # all matched sources with object IDs that can be used to group them. matchCat = mmatch.finish() # Create a mapping object that allows the matches to be manipulated # as a mapping of object ID to catalog of sources. allMatches = GroupView.build(matchCat) return srcVis, allMatches
def getFakeSources(butler, dataId, tol=1.0, extraCols=('zeropoint', 'visit', 'ccd'), includeMissing=False, footprints=False, radecMatch=None, multiband=False, reffMatch=False, pix=0.168, minRad=None, raCol='RA', decCol='Dec'): """ Get list of sources which agree in pixel position with fake ones with tol. This returns a sourceCatalog of all the matched fake objects, note, there will be duplicates in this list, since I haven't checked deblend.nchild, and I'm only doing a tolerance match, which could include extra sources The outputs can include extraCols as long as they are one of: zeropoint, visit, ccd, thetaNorth, pixelScale If includeMissing is true, then the pipeline looks at the fake sources added in the header and includes an entry in the table for sources without any measurements, specifically the 'id' column will be 0 radecMatch is the fakes table. if it's not None(default), then do an ra/dec match with the input catalog instead of looking in the header for where the sources where added """ coaddData = "deepCoadd_calexp" coaddMeta = "deepCoadd_calexp_md" availExtras = { 'zeropoint': { 'type': float, 'doc': 'zeropoint' }, 'visit': { 'type': int, 'doc': 'visit id' }, 'ccd': { 'type': int, 'doc': 'ccd id' }, 'thetaNorth': { 'type': lsst.afw.geom.Angle, 'doc': 'angle to north' }, 'pixelScale': { 'type': float, 'doc': 'pixelscale in arcsec/pixel' } } if not np.in1d(extraCols, list(availExtras.keys())).all(): print("extraCols must be in ", availExtras) try: if 'filter' not in dataId: sources = butler.get('src', dataId, flags=lsst.afw.table.SOURCE_IO_NO_FOOTPRINTS, immediate=True) cal = butler.get('calexp', dataId, immediate=True) cal_md = butler.get('calexp_md', dataId, immediate=True) else: meas = butler.get('deepCoadd_meas', dataId, flags=NO_FOOTPRINT, immediate=True) force = butler.get('deepCoadd_forced_src', dataId, flags=NO_FOOTPRINT, immediate=True) sources = combineWithForce(meas, force) cal = butler.get(coaddData, dataId, immediate=True) cal_md = butler.get(coaddMeta, dataId, immediate=True) except RuntimeError: print("skipping", dataId) return None if ('pixelScale' in extraCols) or ('thetaNorth' in extraCols): wcs = cal.getWcs() availExtras['pixelScale']['value'] = wcs.getPixelScale().asArcseconds() # The 8 lines of code below find the angle to north, first the mid pixel of the calexp is found, # then the pixel to sky matrix at this point, the coordinate this gives can then be used to find the # linearized sky to pixel matrix which can then be used to find the angle. xMid = cal.getWidth() // 2 yMid = cal.getHeight() // 2 midPoint = lsst.afw.geom.Point2D(xMid, yMid) midCoord = wcs.pixelToSky(midPoint) northSkyToPixelMatrix = wcs.linearizeSkyToPixel( midCoord, lsst.afw.geom.degrees) northSkyToPixelMatrix = northSkyToPixelMatrix.getLinear() availExtras['thetaNorth']['value'] = (np.arctan2( *tuple(northSkyToPixelMatrix(lsst.afw.geom.Point2D(1.0, 0.0)))) ) * lsst.afw.geom.radians if 'visit' in extraCols: availExtras['visit']['value'] = dataId['visit'] if 'ccd' in extraCols: availExtras['ccd']['value'] = dataId['ccd'] if 'zeropoint' in extraCols: zeropoint = 2.5 * np.log10(cal_md.getScalar('FLUXMAG0')) availExtras['zeropoint']['value'] = zeropoint if radecMatch is None: fakeXY, srcIndex = getFakeMatchesHeader(cal_md, sources, tol=tol) else: if minRad is not None: print("# The min matching radius is %4.1f pixel" % minRad) bbox = lsst.afw.geom.Box2D(cal.getBBox(lsst.afw.image.PARENT)) fakeXY, srcIndex, srcClose = getFakeMatchesRaDec(sources, radecMatch, bbox, cal.getWcs(), tol=tol, reffMatch=reffMatch, pix=pix, minRad=minRad, raCol=raCol, decCol=decCol) mapper = SchemaMapper(sources.schema) mapper.addMinimalSchema(sources.schema) newSchema = mapper.getOutputSchema() newSchema.addField('fakeId', type=np.int32, doc='id of fake source matched to position') newSchema.addField('nMatched', type=np.int32, doc='Number of matched objects') newSchema.addField('nPrimary', type=np.int32, doc='Number of unique matched objects') newSchema.addField('nNoChild', type=np.int32, doc='Number of matched objects with nchild==0') newSchema.addField('rMatched', type=float, doc='Radius used form atching obects, in pixel') newSchema.addField('fakeOffX', type=float, doc='offset from input fake position in X (pixels)') newSchema.addField('fakeOffY', type=float, doc='offset from input fake position in Y (pixels)') newSchema.addField('fakeOffR', type=float, doc='offset from input fake position in radius') newSchema.addField('fakeClosest', type="Flag", doc='Is this match the closest one?') for extraName in set(extraCols).intersection(availExtras): newSchema.addField(extraName, type=availExtras[extraName]['type'], doc=availExtras[extraName]['doc']) srcList = SourceCatalog(newSchema) srcList.reserve( sum([len(s) for s in srcIndex.values()]) + (0 if not includeMissing else list(srcIndex.values()).count([]))) centroidKey = sources.getCentroidKey() isPrimary = sources.schema.find('detect_isPrimary').getKey() nChild = sources.schema.find('force_deblend_nChild').getKey() for ident, sindlist in srcIndex.items(): rMatched = fakeXY[ident][2] if minRad is not None: if rMatched < minRad: rMatched = minRad nMatched = len(sindlist) nPrimary = np.sum( [sources[int(obj)].get(isPrimary) for obj in sindlist]) nNoChild = np.sum([(sources[int(obj)].get(nChild) == 0) for obj in sindlist]) if includeMissing and (nMatched == 0): newRec = srcList.addNew() newRec.set('fakeId', ident) newRec.set('id', 0) newRec.set('nMatched', 0) newRec.set('rMatched', rMatched) for ss in sindlist: newRec = srcList.addNew() newRec.assign(sources[int(ss)], mapper) newRec.set('fakeId', ident) newRec.set('nMatched', nMatched) newRec.set('nPrimary', nPrimary) newRec.set('nNoChild', nNoChild) newRec.set('rMatched', rMatched) offsetX = (sources[int(ss)].get(centroidKey).getX() - fakeXY[ident][0]) newRec.set('fakeOffX', offsetX) offsetY = (sources[int(ss)].get(centroidKey).getY() - fakeXY[ident][1]) newRec.set('fakeOffY', offsetY) newRec.set('fakeOffR', np.sqrt(offsetX**2.0 + offsetY**2.0)) if radecMatch: if int(ss) == int(srcClose[ident]): newRec.set('fakeClosest', True) else: newRec.set('fakeClosest', False) if includeMissing: srcList = srcList.copy(deep=True) for extraName in set(extraCols).intersection(availExtras): tempCol = srcList.get(extraName) tempCol.fill(availExtras[extraName]['value']) return srcList
def _loadAndMatchCatalogs(repo, dataIds, matchRadius, useJointCal=False, skipTEx=False): """Load data from specific visit. Match with reference. Parameters ---------- repo : string or Butler A Butler or a repository URL that can be used to construct one dataIds : list of dict List of `butler` data IDs of Image catalogs to compare to reference. The `calexp` cpixel image is needed for the photometric calibration. matchRadius : afwGeom.Angle(), optional Radius for matching. Default is 1 arcsecond. useJointCal : `bool`, optional Use jointcal/meas_mosaic outputs to calibrate positions and fluxes. skipTEx : `bool`, optional Skip TEx calculations (useful for older catalogs that don't have PsfShape measurements). Returns ------- catalog_list : afw.table.SourceCatalog List of all of the catalogs matched_catalog : afw.table.GroupView An object of matched catalog. """ # Following # https://github.com/lsst/afw/blob/tickets/DM-3896/examples/repeatability.ipynb if isinstance(repo, dafPersist.Butler): butler = repo else: butler = dafPersist.Butler(repo) dataset = 'src' # 2016-02-08 MWV: # I feel like I could be doing something more efficient with # something along the lines of the following: # dataRefs = [dafPersist.ButlerDataRef(butler, vId) for vId in dataIds] ccdKeyName = getCcdKeyName(dataIds[0]) # Hack to support raft and sensor 0,1 IDs as ints for multimatch if ccdKeyName == 'sensor': ccdKeyName = 'raft_sensor_int' for vId in dataIds: vId[ccdKeyName] = raftSensorToInt(vId) schema = butler.get(dataset + "_schema").schema mapper = SchemaMapper(schema) mapper.addMinimalSchema(schema) mapper.addOutputField(Field[float]('base_PsfFlux_snr', 'PSF flux SNR')) mapper.addOutputField(Field[float]('base_PsfFlux_mag', 'PSF magnitude')) mapper.addOutputField(Field[float]('base_PsfFlux_magErr', 'PSF magnitude uncertainty')) mapper.addOutputField(Field[float]('e1', 'Source Ellipticity 1')) mapper.addOutputField(Field[float]('e2', 'Source Ellipticity 1')) mapper.addOutputField(Field[float]('psf_e1', 'PSF Ellipticity 1')) mapper.addOutputField(Field[float]('psf_e2', 'PSF Ellipticity 1')) newSchema = mapper.getOutputSchema() newSchema.setAliasMap(schema.getAliasMap()) # Create an object that matches multiple catalogs with same schema mmatch = MultiMatch(newSchema, dataIdFormat={ 'visit': np.int32, ccdKeyName: np.int32 }, radius=matchRadius, RecordClass=SimpleRecord) # create the new extented source catalog srcVis = SourceCatalog(newSchema) for vId in dataIds: if useJointCal: try: photoCalib = butler.get("jointcal_photoCalib", vId) except (FitsError, dafPersist.NoResults) as e: print(e) print("Could not open photometric calibration for ", vId) print("Skipping this dataId.") continue try: wcs = butler.get("jointcal_wcs", vId) except (FitsError, dafPersist.NoResults) as e: print(e) print("Could not open updated WCS for ", vId) print("Skipping this dataId.") continue else: try: photoCalib = butler.get("calexp_photoCalib", vId) except (FitsError, dafPersist.NoResults) as e: print(e) print("Could not open calibrated image file for ", vId) print("Skipping this dataId.") continue except TypeError as te: # DECam images that haven't been properly reformatted # can trigger a TypeError because of a residual FITS header # LTV2 which is a float instead of the expected integer. # This generates an error of the form: # # lsst::pex::exceptions::TypeError: 'LTV2 has mismatched type' # # See, e.g., DM-2957 for details. print(te) print("Calibration image header information malformed.") print("Skipping this dataId.") continue # We don't want to put this above the first "if useJointCal block" # because we need to use the first `butler.get` above to quickly # catch data IDs with no usable outputs. try: # HSC supports these flags, which dramatically improve I/O # performance; support for other cameras is DM-6927. oldSrc = butler.get('src', vId, flags=SOURCE_IO_NO_FOOTPRINTS) except (OperationalError, sqlite3.OperationalError): oldSrc = butler.get('src', vId) print(len(oldSrc), "sources in ccd %s visit %s" % (vId[ccdKeyName], vId["visit"])) # create temporary catalog tmpCat = SourceCatalog(SourceCatalog(newSchema).table) tmpCat.extend(oldSrc, mapper=mapper) tmpCat['base_PsfFlux_snr'][:] = tmpCat['base_PsfFlux_instFlux'] \ / tmpCat['base_PsfFlux_instFluxErr'] if useJointCal: for record in tmpCat: record.updateCoord(wcs) photoCalib.instFluxToMagnitude(tmpCat, "base_PsfFlux", "base_PsfFlux") if not skipTEx: _, psf_e1, psf_e2 = ellipticity_from_cat( oldSrc, slot_shape='slot_PsfShape') _, star_e1, star_e2 = ellipticity_from_cat(oldSrc, slot_shape='slot_Shape') tmpCat['e1'][:] = star_e1 tmpCat['e2'][:] = star_e2 tmpCat['psf_e1'][:] = psf_e1 tmpCat['psf_e2'][:] = psf_e2 srcVis.extend(tmpCat, False) mmatch.add(catalog=tmpCat, dataId=vId) # Complete the match, returning a catalog that includes # all matched sources with object IDs that can be used to group them. matchCat = mmatch.finish() # Create a mapping object that allows the matches to be manipulated # as a mapping of object ID to catalog of sources. allMatches = GroupView.build(matchCat) return srcVis, allMatches