Пример #1
0
    def __init__(self, schema, peakSchema=None, **kwargs):
        """!
        Create the task, adding necessary fields to the given schema.

        @param[in,out] schema        Schema object for measurement fields; will be modified in-place.
        @param[in]     peakSchema    Schema of Footprint Peaks that will be passed to the deblender.
                                     Any fields beyond the PeakTable minimal schema will be transferred
                                     to the main source Schema.  If None, no fields will be transferred
                                     from the Peaks.
        @param[in]     **kwargs      Passed to Task.__init__.
        """
        pipeBase.Task.__init__(self, **kwargs)
        peakMinimalSchema = afwDet.PeakTable.makeMinimalSchema()
        if peakSchema is None:
            # In this case, the peakSchemaMapper will transfer nothing, but we'll still have one
            # to simplify downstream code
            self.peakSchemaMapper = afwTable.SchemaMapper(
                peakMinimalSchema, schema)
        else:
            self.peakSchemaMapper = afwTable.SchemaMapper(peakSchema, schema)
            for item in peakSchema:
                if item.key not in peakMinimalSchema:
                    self.peakSchemaMapper.addMapping(item.key, item.field)
                    # Because SchemaMapper makes a copy of the output schema you give its ctor, it isn't
                    # updating this Schema in place.  That's probably a design flaw, but in the meantime,
                    # we'll keep that schema in sync with the peakSchemaMapper.getOutputSchema() manually,
                    # by adding the same fields to both.
                    schema.addField(item.field)
            assert schema == self.peakSchemaMapper.getOutputSchema(
            ), "Logic bug mapping schemas"
        self.addSchemaKeys(schema)
Пример #2
0
    def addCalibColumns(self, catalog, dataRef):
        """Add columns with local calibration evaluated at each centroid

        for backwards compatibility with old repos.
        This exists for the purpose of converting old src catalogs
        (which don't have the expected local calib columns) to Source Tables.

        Parameters
        ----------
        catalog: `afwTable.SourceCatalog`
            catalog to which calib columns will be added
        dataRef: `lsst.daf.persistence.ButlerDataRef
            for fetching the calibs from disk.

        Returns
        -------
        newCat:  `afwTable.SourceCatalog`
            Source Catalog with requested local calib columns
        """
        mapper = afwTable.SchemaMapper(catalog.schema)
        measureConfig = SingleFrameMeasurementTask.ConfigClass()
        measureConfig.doReplaceWithNoise = False

        # Just need the WCS or the PhotoCalib attached to an exposue
        exposure = dataRef.get('calexp_sub',
                               bbox=lsst.geom.Box2I(lsst.geom.Point2I(0, 0),
                                                    lsst.geom.Point2I(0, 0)))

        mapper = afwTable.SchemaMapper(catalog.schema)
        mapper.addMinimalSchema(catalog.schema, True)
        schema = mapper.getOutputSchema()

        exposureIdInfo = dataRef.get("expIdInfo")
        measureConfig.plugins.names = []
        if self.config.doApplyExternalSkyWcs:
            plugin = 'base_LocalWcs'
            if plugin in schema:
                raise RuntimeError(
                    f"{plugin} already in src catalog. Set doApplyExternalSkyWcs=False"
                )
            else:
                measureConfig.plugins.names.add(plugin)

        if self.config.doApplyExternalPhotoCalib:
            plugin = 'base_LocalPhotoCalib'
            if plugin in schema:
                raise RuntimeError(
                    f"{plugin} already in src catalog. Set doApplyExternalPhotoCalib=False"
                )
            else:
                measureConfig.plugins.names.add(plugin)

        measurement = SingleFrameMeasurementTask(config=measureConfig,
                                                 schema=schema)
        newCat = afwTable.SourceCatalog(schema)
        newCat.extend(catalog, mapper=mapper)
        measurement.run(measCat=newCat,
                        exposure=exposure,
                        exposureId=exposureIdInfo.expId)
        return newCat
Пример #3
0
    def measureForcedSources(self, diaSources, science, wcs):
        """Perform forced measurement of the diaSources on the science image.

        Parameters
        ----------
        diaSources : `lsst.afw.table.SourceCatalog`
            The catalog of detected sources.
        science : `lsst.afw.image.ExposureF`
            Science exposure that the template was subtracted from.
        wcs : `lsst.afw.geom.SkyWcs`
            Coordinate system definition (wcs) for the exposure.
        """
        # Run forced psf photometry on the PVI at the diaSource locations.
        # Copy the measured flux and error into the diaSource.
        forcedSources = self.forcedMeasurement.generateMeasCat(
            science, diaSources, wcs)
        self.forcedMeasurement.run(forcedSources, science, diaSources, wcs)
        mapper = afwTable.SchemaMapper(forcedSources.schema, diaSources.schema)
        mapper.addMapping(forcedSources.schema.find("base_PsfFlux_instFlux")[0],
                          "ip_diffim_forced_PsfFlux_instFlux", True)
        mapper.addMapping(forcedSources.schema.find("base_PsfFlux_instFluxErr")[0],
                          "ip_diffim_forced_PsfFlux_instFluxErr", True)
        mapper.addMapping(forcedSources.schema.find("base_PsfFlux_area")[0],
                          "ip_diffim_forced_PsfFlux_area", True)
        mapper.addMapping(forcedSources.schema.find("base_PsfFlux_flag")[0],
                          "ip_diffim_forced_PsfFlux_flag", True)
        mapper.addMapping(forcedSources.schema.find("base_PsfFlux_flag_noGoodPixels")[0],
                          "ip_diffim_forced_PsfFlux_flag_noGoodPixels", True)
        mapper.addMapping(forcedSources.schema.find("base_PsfFlux_flag_edge")[0],
                          "ip_diffim_forced_PsfFlux_flag_edge", True)
        for diaSource, forcedSource in zip(diaSources, forcedSources):
            diaSource.assign(forcedSource, mapper)
Пример #4
0
def make_refcat(ra, dec):
    """
    Make a reference catalog for forced photometry

    Parameters:
    -----------
    ra : sequence or array
        Right Ascension in decimal degrees
    dec : sequence or array
        Declination in decimal degrees

    Returns:
    --------
    src_cat : lsst.afw.table.tableLib.SourceCatalog
        Source catalog for the forced photometry task
    """
    schema = out_butler.get('src_schema', immediate=True).schema
    mapper = afwTable.SchemaMapper(schema)
    mapper.addMinimalSchema(schema)
    newSchema = mapper.getOutputSchema()
    src_cat = afwTable.SourceCatalog(newSchema)
    for row in zip(ra, dec):
        record = src_cat.addNew()
        record.set('coord_ra', Angle(row[0] * degrees))
        record.set('coord_dec', Angle(row[1] * degrees))
    return (src_cat)
Пример #5
0
    def run(self, dataRef):
        calexp = dataRef.get("calexp")
        psf = calexp.getPsf()
        sources = dataRef.get("src")

        mapper = afwTable.SchemaMapper(sources.getSchema())
        # map all the existing fields
        mapper.addMinimalSchema(sources.getSchema(), True)
        schema = mapper.getOutputSchema()

        # It would be better for the schema-populating code to not be in
        # the SourceDeblendTask constructor!
        self.makeSubtask("deblend", schema=schema)

        outsources = afwTable.SourceCatalog(schema)
        outsources.reserve(2 * len(sources))
        outsources.extend(sources, mapper=mapper)
        sources = outsources
        print(len(sources), 'sources before deblending')

        self.deblend.run(calexp, sources)
        print(len(sources), 'sources after deblending')

        fn = 'deblended.fits'
        print('Writing sources...')
        sources.writeFits(fn)
        print('Wrote sources to', fn)

        fn = 'calexp.fits'
        calexp.writeFits(fn)
        print('Wrote calexp to', fn)

        fn = 'psf.fits'
        psf.writeFits(fn)
        print('Wrote PSF to', fn)
 def __init__(self, initInputs, **kwargs):
     super().__init__(initInputs=initInputs, **kwargs)
     schema = initInputs["inputSchema"].schema
     self.peakSchema = initInputs["peakSchema"].schema
     self.schemaMapper = afwTable.SchemaMapper(schema)
     self.schemaMapper.addMinimalSchema(schema)
     self.schema = self.schemaMapper.getOutputSchema()
Пример #7
0
    def __init__(self, butler=None, schema=None, initInputs=None, **kwargs):
        """!
        Initialize the task.

        @param[in] schema: the schema of the detection catalogs used as input to this one
        @param[in] butler: a butler used to read the input schema from disk, if schema is None

        The task will set its own self.schema attribute to the schema of the output merged catalog.
        """
        super().__init__(**kwargs)

        if initInputs is not None:
            inputSchema = initInputs['inputSchema'].schema
        else:
            inputSchema = self.getInputSchema(butler=butler, schema=schema)
        self.schemaMapper = afwTable.SchemaMapper(inputSchema, True)
        self.schemaMapper.addMinimalSchema(inputSchema, True)
        self.instFluxKey = inputSchema.find(self.config.snName +
                                            "_instFlux").getKey()
        self.instFluxErrKey = inputSchema.find(self.config.snName +
                                               "_instFluxErr").getKey()
        self.fluxFlagKey = inputSchema.find(self.config.snName +
                                            "_flag").getKey()

        self.flagKeys = {}
        for band in self.config.priorityList:
            short = getShortFilterName(band)
            outputKey = self.schemaMapper.editOutputSchema().addField(
                "merge_measurement_%s" % short,
                type="Flag",
                doc=
                "Flag field set if the measurements here are from the %s filter"
                % band)
            peakKey = inputSchema.find("merge_peak_%s" % short).key
            footprintKey = inputSchema.find("merge_footprint_%s" % short).key
            self.flagKeys[band] = pipeBase.Struct(peak=peakKey,
                                                  footprint=footprintKey,
                                                  output=outputKey)
        self.schema = self.schemaMapper.getOutputSchema()

        self.pseudoFilterKeys = []
        for filt in self.config.pseudoFilterList:
            try:
                self.pseudoFilterKeys.append(
                    self.schema.find("merge_peak_%s" % filt).getKey())
            except Exception as e:
                self.log.warn(
                    "merge_peak is not set for pseudo-filter %s: %s" %
                    (filt, e))

        self.badFlags = {}
        for flag in self.config.flags:
            try:
                self.badFlags[flag] = self.schema.find(flag).getKey()
            except KeyError as exc:
                self.log.warn("Can't find flag %s in schema: %s" % (
                    flag,
                    exc,
                ))
        self.outputSchema = afwTable.SourceCatalog(self.schema)
Пример #8
0
    def __init__(self, measConfig, inputSchema, outputDataset, *args,
                 **kwargs):
        """!Initialize TransformTask.

        @param[in] measConfig      Configuration for the measurement task which
                                   produced the measurments being transformed.
        @param[in] inputSchema     The schema of the input catalog.
        @param[in] outputDataset   The butler dataset type of the output catalog.
        @param[in] *args           Passed through to pipeBase.Task.__init__()
        @param[in] *kwargs         Passed through to pipeBase.Task.__init__()
        """
        pipeBase.Task.__init__(self, *args, **kwargs)

        # This task can be used to generate multiple different output dataset types. We
        # need to be able to specify the output type together with its schema.
        self.outputDataset = outputDataset

        # Define a mapper and add the basic fields to be copied.
        self.mapper = afwTable.SchemaMapper(inputSchema)
        for field in self.config.copyFields:
            self.mapper.addMapping(inputSchema.find(field).key)

        # Build a list of all transforms that will be applied to the input. We
        # will iterate over this in run().
        self.transforms = []
        for name in measConfig.plugins.names:
            config = measConfig.plugins.get(name)
            transformClass = measConfig.plugins.registry.get(
                name).PluginClass.getTransformClass()
            self.transforms.append(transformClass(config, name, self.mapper))
    def __init__(self, schema, peakSchema=None, **kwargs):
        """Create the task, adding necessary fields to the given schema.

        Parameters
        ----------
        schema : `lsst.afw.table.schema.schema.Schema`
            Schema object for measurement fields; will be modified in-place.
        peakSchema : `lsst.afw.table.schema.schema.Schema`
            Schema of Footprint Peaks that will be passed to the deblender.
            Any fields beyond the PeakTable minimal schema will be transferred
            to the main source Schema.  If None, no fields will be transferred
            from the Peaks.
        filters : list of str
            Names of the filters used for the eposures. This is needed to store
            the SED as a field
        **kwargs
            Passed to Task.__init__.
        """
        pipeBase.Task.__init__(self, **kwargs)

        peakMinimalSchema = afwDet.PeakTable.makeMinimalSchema()
        if peakSchema is None:
            # In this case, the peakSchemaMapper will transfer nothing, but
            # we'll still have one
            # to simplify downstream code
            self.peakSchemaMapper = afwTable.SchemaMapper(
                peakMinimalSchema, schema)
        else:
            self.peakSchemaMapper = afwTable.SchemaMapper(peakSchema, schema)
            for item in peakSchema:
                if item.key not in peakMinimalSchema:
                    self.peakSchemaMapper.addMapping(item.key, item.field)
                    # Because SchemaMapper makes a copy of the output schema
                    # you give its ctor, it isn't updating this Schema in
                    # place. That's probably a design flaw, but in the
                    # meantime, we'll keep that schema in sync with the
                    # peakSchemaMapper.getOutputSchema() manually, by adding
                    # the same fields to both.
                    schema.addField(item.field)
            assert schema == self.peakSchemaMapper.getOutputSchema(
            ), "Logic bug mapping schemas"
        self._addSchemaKeys(schema)
        self.schema = schema
        self.toCopyFromParent = [
            item.key for item in self.schema
            if item.field.getName().startswith("merge_footprint")
        ]
Пример #10
0
    def _formatCatalog(self, fgcmStarCat, offsets, bands):
        """
        Turn an FGCM-formatted star catalog, applying zeropoint offsets.

        Parameters
        ----------
        fgcmStarCat : `lsst.afw.Table.SimpleCatalog`
            SimpleCatalog as output by fgcmcal
        offsets : `list` with len(self.bands) entries
            Zeropoint offsets to apply
        bands : `list` [`str`]
            List of band names from FGCM output

        Returns
        -------
        formattedCat: `lsst.afw.table.SimpleCatalog`
           SimpleCatalog suitable for using as a reference catalog
        """

        sourceMapper = afwTable.SchemaMapper(fgcmStarCat.schema)
        minSchema = LoadIndexedReferenceObjectsTask.makeMinimalSchema(
            bands, addCentroid=False, addIsResolved=True, coordErrDim=0)
        sourceMapper.addMinimalSchema(minSchema)
        for band in bands:
            sourceMapper.editOutputSchema().addField('%s_nGood' % (band),
                                                     type=np.int32)
            sourceMapper.editOutputSchema().addField('%s_nTotal' % (band),
                                                     type=np.int32)
            sourceMapper.editOutputSchema().addField('%s_nPsfCandidate' %
                                                     (band),
                                                     type=np.int32)

        formattedCat = afwTable.SimpleCatalog(sourceMapper.getOutputSchema())
        formattedCat.reserve(len(fgcmStarCat))
        formattedCat.extend(fgcmStarCat, mapper=sourceMapper)

        # Note that we don't have to set `resolved` because the default is False

        for b, band in enumerate(bands):
            mag = fgcmStarCat['mag_std_noabs'][:, b].astype(
                np.float64) + offsets[b]
            # We want fluxes in nJy from calibrated AB magnitudes
            # (after applying offset).  Updated after RFC-549 and RFC-575.
            flux = (mag * units.ABmag).to_value(units.nJy)
            fluxErr = (np.log(10.) /
                       2.5) * flux * fgcmStarCat['magErr_std'][:, b].astype(
                           np.float64)

            formattedCat['%s_flux' % (band)][:] = flux
            formattedCat['%s_fluxErr' % (band)][:] = fluxErr
            formattedCat['%s_nGood' % (band)][:] = fgcmStarCat['ngood'][:, b]
            formattedCat['%s_nTotal' % (band)][:] = fgcmStarCat['ntotal'][:, b]
            formattedCat['%s_nPsfCandidate' %
                         (band)][:] = fgcmStarCat['npsfcand'][:, b]

        addRefCatMetadata(formattedCat)

        return formattedCat
Пример #11
0
    def __init__(self, inputSchema, outputSchema, **kwargs):
        pipeBase.Task.__init__(self, **kwargs)
        self.inputSchema = inputSchema
        self.outputSchema = outputSchema

        self.mapper = afwTable.SchemaMapper(inputSchema, outputSchema)

        for inputName, outputName in self.config.copyColumns.items():
            self.mapper.addMapping(
                self.inputSchema.find(inputName).key, outputName, True)
Пример #12
0
 def _performTransform(self, transformClass, inCat, doExtend=True):
     """Operate on inCat with a transform of class transformClass"""
     mapper = afwTable.SchemaMapper(inCat.schema)
     config = SillyCentroidConfig()
     transform = transformClass(config, self.pluginName, mapper)
     outCat = afwTable.BaseCatalog(mapper.getOutputSchema())
     if doExtend:
         outCat.extend(inCat, mapper=mapper)
     transform(inCat, outCat, makeWcs(), afwImage.Calib())
     return outCat
Пример #13
0
def run():
    exposure, srcCat = loadData()
    schema = srcCat.getSchema()
    #
    # Create the astrometry task
    #
    config = AstrometryTask.ConfigClass()
    config.refObjLoader.filterMap = {"_unknown_": "r"}
    config.matcher.sourceFluxType = "Psf"  # sample catalog does not contain aperture flux
    aTask = AstrometryTask(config=config)
    #
    # And the photometry Task
    #
    config = PhotoCalTask.ConfigClass()
    config.applyColorTerms = False  # we don't have any available, so this suppresses a warning
    pTask = PhotoCalTask(config=config, schema=schema)
    #
    # The tasks may have added extra elements to the schema (e.g. AstrometryTask's centroidKey to
    # handle distortion; photometryTask's config.outputField).  If this is so, we need to add
    # these columns to the Source table.
    #
    # We wouldn't need to do this if we created the schema prior to measuring the exposure,
    # but in this case we read the sources from disk
    #
    if schema != srcCat.getSchema():  # the tasks added fields
        print("Adding columns to the source catalogue")
        cat = afwTable.SourceCatalog(schema)
        cat.table.defineCentroid(srcCat.table.getCentroidDefinition())
        cat.table.definePsfFlux(srcCat.table.getPsfFluxDefinition())

        scm = afwTable.SchemaMapper(srcCat.getSchema(), schema)
        for schEl in srcCat.getSchema():
            scm.addMapping(schEl.getKey(), True)

        cat.extend(srcCat, True, scm)  # copy srcCat to cat, adding new columns

        srcCat = cat
        del cat
    #
    # Process the data
    #
    matches = aTask.run(exposure, srcCat).matches
    result = pTask.run(exposure, matches)

    calib = result.calib
    fm0, fm0Err = calib.getFluxMag0()

    print("Used %d calibration sources out of %d matches" %
          (len(result.matches), len(matches)))

    delta = result.arrays.refMag - result.arrays.srcMag
    q25, q75 = np.percentile(delta, [25, 75])
    print("RMS error is %.3fmmsg (robust %.3f, Calib says %.3f)" %
          (np.std(delta), 0.741 *
           (q75 - q25), 2.5 / np.log(10) * fm0Err / fm0))
Пример #14
0
    def __init__(self, icSourceSchema=None, **kwargs):
        """!Construct a CalibrateTask

        @param[in] icSourceSchema  schema for icSource catalog, or None.
            If measuring aperture correction and the task detectAndMeasure cannot determine
            its own suitable candidates, then this argument must be specified.
            See also config field `icSourceFieldsToCopy`.
        @param[in,out] kwargs  other keyword arguments for lsst.pipe.base.CmdLineTask
        """
        print " \n ******* In calib, init function "  #mg
        pipeBase.Task.__init__(self, **kwargs)

        if icSourceSchema is not None:
            # use a schema mapper to avoid copying each field separately
            self.schemaMapper = afwTable.SchemaMapper(icSourceSchema)
            self.schemaMapper.addMinimalSchema(
                afwTable.SourceTable.makeMinimalSchema(), False)

            # Add fields to copy from an icSource catalog
            # and a field to indicate that the source matched a source in that catalog
            # If any fields are missing then raise an exception, but first find all missing fields
            # in order to make the error message more useful.
            self.calibSourceKey = self.schemaMapper.addOutputField(
                afwTable.Field["Flag"]("calib_detected",
                                       "Source was detected as an icSource"))
            missingFieldNames = []
            for fieldName in self.config.icSourceFieldsToCopy:
                try:
                    schemaItem = icSourceSchema.find(fieldName)
                except Exception:
                    missingFieldNames.append(fieldName)
                else:
                    # field found; if addMapping fails then raise an exception
                    self.schemaMapper.addMapping(schemaItem.getKey())

            if missingFieldNames:
                raise RuntimeError(
                    "isSourceCat is missing fields %s specified in icSourceFieldsToCopy"
                    % (missingFieldNames, ))

            # produce a temporary schema to pass to the subtasks; finalize it later
            self.schema = self.schemaMapper.editOutputSchema()
        else:
            self.schemaMapper = None
            self.schema = afwTable.SourceTable.makeMinimalSchema()

        self.makeSubtask("detectAndMeasure", schema=self.schema)
        if self.config.doAstrometry or self.config.doPhotoCal:
            self.makeSubtask("astrometry", schema=self.schema)
        if self.config.doPhotoCal:
            self.makeSubtask("photoCal", schema=self.schema)

        if self.schemaMapper is not None:
            # finalize the schema
            self.schema = self.schemaMapper.getOutputSchema()
Пример #15
0
def applyCalib(catalog, calib, hscRun=None):
    """Convert all fluxes in a catalog to magnitudes

    The fluxes are converted in-place, so that the "_flux*" are now really
    magnitudes.
    """
    fluxKeys, errKeys = getFluxKeys(catalog.schema, hscRun=hscRun)
    mapper = afwTable.SchemaMapper(catalog.schema, True)
    for item in catalog.schema:
        name = item.field.getName()
        if name in fluxKeys:
            continue
        mapper.addMapping(item.key)
    aliasMap = catalog.schema.getAliasMap()

    newFluxKeys = {}
    newErrKeys = {}
    for name in fluxKeys:
        fluxField = catalog.schema.find(name).field
        newName = name.replace("flux", "mag")
        newField = fluxField.__class__(newName, "Calibrated magnitude from %s (%s)" %
                                       (fluxField.getName(), fluxField.getDoc()), "mag")
        newFluxKeys[newName] = mapper.addMapping(fluxKeys[name], newField)

        sigmaName = "Sigma"
        if hscRun is not None:
            sigmaName = "_err"

        if name + sigmaName in errKeys:
            errField = catalog.schema.find(name + sigmaName).field
            newErrField = errField.__class__(newName + sigmaName,
                                             "Calibrated magnitude error from %s (%s)" %
                                             (errField.getName(), errField.getDoc()), "mag")
            newErrKeys[newName] = mapper.addMapping(errKeys[name + sigmaName], newErrField)
        aliasMap.set(name, newName)
        aliasMap.set(name + sigmaName, newName + sigmaName)

    calib.setThrowOnNegativeFlux(False)

    newCatalog = afwTable.SourceCatalog(mapper.getOutputSchema())
    newCatalog.extend(catalog, mapper=mapper)

    for name, key in newFluxKeys.items():
        flux = newCatalog[key]
        if name in newErrKeys:
            fluxErr = newCatalog[newErrKeys[name]]
            magArray = numpy.array([calib.getMagnitude(f, e) for f, e in zip(flux, fluxErr)])
            mag = magArray[:,0]
            fluxErr[:] = magArray[:,1]
        else:
            mag = numpy.array([calib.getMagnitude(f) for f in flux])
        flux[:] = mag

    return newCatalog
Пример #16
0
    def loadSkyCircle(self, ctrCoord, radius, filterName=None):
        """!Load reference objects that overlap a circular sky region

        @param[in] ctrCoord  center of search region (an lsst.afw.geom.Coord)
        @param[in] radius  radius of search region (an lsst.afw.geom.Angle)
        @param[in] filterName  name of filter, or None for the default filter;
            used for flux values in case we have flux limits (which are not yet implemented)

        @return an lsst.pipe.base.Struct containing:
        - refCat a catalog of reference objects with the
            \link meas_algorithms_loadReferenceObjects_Schema standard schema \endlink
            as documented in LoadReferenceObjects, including photometric, resolved and variable;
            hasCentroid is False for all objects.
        - fluxField = name of flux field for specified filterName.  None if refCat is None.
        """
        id_list, boundary_mask = self.indexer.get_pixel_ids(ctrCoord, radius)
        shards = self.get_shards(id_list)
        refCat = self.butler.get('ref_cat',
                                 dataId=self.indexer.make_data_id(
                                     'master_schema', self.ref_dataset_name),
                                 immediate=True)
        self._addFluxAliases(refCat.schema)
        fluxField = getRefFluxField(schema=refCat.schema,
                                    filterName=filterName)
        for shard, is_on_boundary in zip(shards, boundary_mask):
            if shard is None:
                continue
            if is_on_boundary:
                refCat.extend(self._trim_to_circle(shard, ctrCoord, radius))
            else:
                refCat.extend(shard)

        # make sure catalog is contiguous
        if not refCat.isContiguous():
            refCat = refCat.copy()

        # add and initialize centroid and hasCentroid fields (these are added
        # after loading to avoid wasting space in the saved catalogs)
        # the new fields are automatically initialized to (nan, nan) and False
        # so no need to set them explicitly
        mapper = afwTable.SchemaMapper(refCat.schema, True)
        mapper.addMinimalSchema(refCat.schema, True)
        mapper.editOutputSchema().addField("centroid_x", type=float)
        mapper.editOutputSchema().addField("centroid_y", type=float)
        mapper.editOutputSchema().addField("hasCentroid", type="Flag")
        expandedCat = afwTable.SimpleCatalog(mapper.getOutputSchema())
        expandedCat.extend(refCat, mapper=mapper)
        del refCat  # avoid accidentally returning the unexpanded reference catalog

        # return reference catalog
        return pipeBase.Struct(
            refCat=expandedCat,
            fluxField=fluxField,
        )
Пример #17
0
    def setPeakSignificance(self, exposure, footprints, threshold, negative=False):
        """Set the significance of each detected peak to the pixel value divided
        by the appropriate standard-deviation for ``config.thresholdType``.

        Only sets significance for "stdev" and "pixel_stdev" thresholdTypes;
        we leave it undefined for "value" and "variance" as it does not have a
        well-defined meaning in those cases.

        Parameters
        ----------
        exposure : `lsst.afw.image.Exposure`
            Exposure that footprints were detected on, likely the convolved,
            local background-subtracted image.
        footprints : `lsst.afw.detection.FootprintSet`
            Footprints detected on the image.
        threshold : `lsst.afw.detection.Threshold`
            Threshold used to find footprints.
        negative : `bool`, optional
            Are we calculating for negative sources?
        """
        if footprints is None or footprints.getFootprints() == []:
            return footprints
        polarity = -1 if negative else 1

        # All incoming footprints have the same schema.
        mapper = afwTable.SchemaMapper(footprints.getFootprints()[0].peaks.schema)
        mapper.addMinimalSchema(footprints.getFootprints()[0].peaks.schema)
        mapper.addOutputField("significance", type=float,
                              doc="Ratio of peak value to configured standard deviation.")

        # Copy the old peaks to the new ones with a significance field.
        # Do this independent of the threshold type, so we always have a
        # significance field.
        newFootprints = afwDet.FootprintSet(footprints)
        for old, new in zip(footprints.getFootprints(), newFootprints.getFootprints()):
            newPeaks = afwDet.PeakCatalog(mapper.getOutputSchema())
            newPeaks.extend(old.peaks, mapper=mapper)
            new.getPeaks().clear()
            new.setPeakCatalog(newPeaks)

        # Compute the significance values.
        if self.config.thresholdType == "pixel_stdev":
            for footprint in newFootprints.getFootprints():
                footprint.updatePeakSignificance(exposure.variance, polarity)
        elif self.config.thresholdType == "stdev":
            sigma = threshold.getValue() / self.config.thresholdValue
            for footprint in newFootprints.getFootprints():
                footprint.updatePeakSignificance(polarity*sigma)
        else:
            for footprint in newFootprints.getFootprints():
                for peak in footprint.peaks:
                    peak["significance"] = 0

        return newFootprints
Пример #18
0
 def testApplyCppTransform(self):
     """Test that we can apply a simple C++ transform"""
     inCat = self._generateCatalog()
     sillyControl = testLib.SillyCentroidControl()
     mapper = afwTable.SchemaMapper(inCat.schema)
     sillyTransform = testLib.SillyTransform(sillyControl, self.pluginName, mapper)
     outCat = afwTable.BaseCatalog(mapper.getOutputSchema())
     outCat.extend(inCat, mapper=mapper)
     self.assertEqual(len(inCat), len(outCat))
     sillyTransform(inCat, outCat, makeWcs(), afwImage.Calib())
     self._checkSillyOutputs(inCat, outCat)
Пример #19
0
def applyCalib(catalog, photoCalib, hscRun=None):
    """Convert all fluxes in a catalog to magnitudes

    The fluxes are converted in-place, so that the "_flux*" are now really
    magnitudes.
    """
    fluxKeys, errKeys = getFluxKeys(catalog.schema, hscRun=hscRun)
    mapper = afwTable.SchemaMapper(catalog.schema, True)
    for item in catalog.schema:
        name = item.field.getName()
        if name in fluxKeys:
            continue
        mapper.addMapping(item.key)
    aliasMap = catalog.schema.getAliasMap()

    newFluxKeys = {}
    newErrKeys = {}
    for name in fluxKeys:
        fluxField = catalog.schema.find(name).field
        newName = name.replace("instFlux", "mag")
        newField = fluxField.__class__(
            newName, "Calibrated magnitude from %s (%s)" %
            (fluxField.getName(), fluxField.getDoc()), "mag")
        newFluxKeys[newName] = mapper.addMapping(fluxKeys[name], newField)

        errName = "Err"
        if hscRun is not None:
            errName = "_err"

        if name + errName in errKeys:
            errField = catalog.schema.find(name + errName).field
            newErrField = errField.__class__(
                newName + errName, "Calibrated magnitude error from %s (%s)" %
                (errField.getName(), errField.getDoc()), "mag")
            newErrKeys[newName] = mapper.addMapping(errKeys[name + errName],
                                                    newErrField)
        aliasMap.set(name, newName)
        aliasMap.set(name + errName, newName + errName)

    newCatalog = afwTable.SourceCatalog(mapper.getOutputSchema())
    newCatalog.extend(catalog, mapper=mapper)

    for name, key in newFluxKeys.items():
        flux = newCatalog[key]
        if name in newErrKeys:
            result = photoCalib.instFluxToMagnitude(newCatalog,
                                                    name.strip('_mag'))
            flux[:] = result[:, 0]
            newCatalog[newErrKeys[name]] = result[:, 1]
        else:
            flux[:] = numpy.array(
                [photoCalib.instFluxToMagnitude(f) for f in flux])

    return newCatalog
Пример #20
0
def applyMosaicResultsCatalog(dataRef, catalog, addCorrection=True):
    """!Apply the results of meas_mosaic to a source catalog

    The coordinates and all fluxes are updated in-place with the meas_mosaic solution.

    This assumes that the mosaic solution exists; an exception will be raised
    in the event that it does not.
    """
    ffp = getFluxFitParams(dataRef)
    calexp_md = dataRef.get("calexp_md", immediate=True)
    hscRun = mosaicUtils.checkHscStack(calexp_md)
    if hscRun is None:
        detector = dataRef.get("camera")[dataRef.dataId["ccd"]]
        nQuarter = detector.getOrientation().getNQuarter()
        if nQuarter % 4 != 0:
            dimensions = dataRef.get("calexp_bbox").getDimensions()
            catalog = mosaicUtils.rotatePixelCoords(catalog, dimensions.getX(),
                                                    dimensions.getY(),
                                                    nQuarter)
    xx, yy = catalog.getX(), catalog.getY()
    corr = numpy.power(10.0, -0.4 * ffp.ffp.eval(xx, yy)) * calculateJacobian(
        ffp.wcs, xx, yy)

    if addCorrection:
        mapper = afwTable.SchemaMapper(catalog.schema, True)
        for s in catalog.schema:
            mapper.addMapping(s.key)
        corrField = afwTable.Field[float](
            "mosaic_corr", "Magnitude correction from meas_mosaic")
        corrKey = mapper.addOutputField(corrField)
        outCatalog = type(catalog)(mapper.getOutputSchema())
        outCatalog.extend(catalog, mapper=mapper)
        outCatalog[corrKey][:] = corr
        catalog = outCatalog

    fluxKeys, errKeys = getFluxKeys(catalog.schema, hscRun=hscRun)
    for name, key in list(fluxKeys.items()) + list(errKeys.items()):
        # Note this skips correcting the aperture fluxes in HSC processed data, but that's ok because
        # we are using the flux_sinc as our comparison to base_CircularApertureFlux_12_0_flux
        if key.subfields is None:
            catalog[key][:] *= corr

    # Now rotate them back to the LSST coord system
    if hscRun is None:
        if nQuarter % 4 != 0:
            catalog = mosaicUtils.rotatePixelCoordsBack(
                catalog, dimensions.getX(), dimensions.getY(), nQuarter)

    wcs = getWcs(dataRef)
    for rec in catalog:
        rec.updateCoord(wcs)

    return Struct(catalog=catalog, wcs=wcs, ffp=ffp)
Пример #21
0
    def loadSkyCircle(self, ctrCoord, radius, filterName=None, epoch=None):
        shardIdList, isOnBoundaryList = self.indexer.getShardIds(
            ctrCoord, radius)
        shards = self.getShards(shardIdList)
        refCat = self.butler.get('ref_cat',
                                 dataId=self.indexer.makeDataId(
                                     'master_schema', self.ref_dataset_name),
                                 immediate=True)
        self._addFluxAliases(refCat.schema)
        fluxField = getRefFluxField(schema=refCat.schema,
                                    filterName=filterName)
        for shard, isOnBoundary in zip(shards, isOnBoundaryList):
            if shard is None:
                continue
            if isOnBoundary:
                refCat.extend(self._trimToCircle(shard, ctrCoord, radius))
            else:
                refCat.extend(shard)

        if epoch is not None and "pm_ra" in refCat.schema:
            # check for a catalog in a non-standard format
            if isinstance(refCat.schema["pm_ra"].asKey(),
                          lsst.afw.table.KeyAngle):
                self.applyProperMotions(refCat, epoch)
            else:
                self.log.warn(
                    "Catalog pm_ra field is not an Angle; not applying proper motion"
                )

        # add and initialize centroid and hasCentroid fields (these are
        # added after loading to avoid wasting space in the saved catalogs)
        # the new fields are automatically initialized to (nan, nan) and
        # False so no need to set them explicitly
        mapper = afwTable.SchemaMapper(refCat.schema, True)
        mapper.addMinimalSchema(refCat.schema, True)
        mapper.editOutputSchema().addField("centroid_x", type=float)
        mapper.editOutputSchema().addField("centroid_y", type=float)
        mapper.editOutputSchema().addField("hasCentroid", type="Flag")
        expandedCat = afwTable.SimpleCatalog(mapper.getOutputSchema())
        expandedCat.extend(refCat, mapper=mapper)
        del refCat  # avoid accidentally returning the unexpanded ref cat

        # make sure catalog is contiguous
        if not expandedCat.isContiguous():
            expandedCat = expandedCat.copy(True)

        # return reference catalog
        return pipeBase.Struct(
            refCat=expandedCat,
            fluxField=fluxField,
        )
Пример #22
0
    def run(self, dataRef):
        self.log.info("Processing %s" % (dataRef.dataId))
        calexp = dataRef.get('calexp')
        srcs = dataRef.get('src')
        print('Calexp:', calexp)
        print('srcs:', srcs)

        ## FIXME -- this whole mapping business is very fragile -- it
        ## seems to fail, eg, if you don't set "-c
        ## doMeasurement=False" when creating the input 'srcs' list.

        mapper = afwTable.SchemaMapper(srcs.getSchema())
        # map all the existing fields
        mapper.addMinimalSchema(srcs.getSchema(), True)
        schema = mapper.getOutputSchema()
        self.algMetadata = dafBase.PropertyList()
        if self.config.doDeblend:
            self.makeSubtask("deblend", schema=schema)
        if self.config.doMeasurement:
            self.makeSubtask("measurement",
                             schema=schema,
                             algMetadata=self.algMetadata)
        self.schema = schema

        parents = []
        for src in srcs:
            if src.getParent() == 0:
                parents.append(src)

        outsources = afwTable.SourceCatalog(schema)
        outsources.reserve(len(parents))
        outsources.extend(parents, mapper=mapper)
        srcs = outsources
        print(len(srcs), 'sources before deblending')

        if self.config.doDeblend:
            self.deblend.run(calexp, srcs)

        if self.config.doMeasurement:
            self.measurement.run(calexp, srcs)

        if srcs is not None and self.config.doWriteSources:
            sourceWriteFlags = (0
                                if self.config.doWriteHeavyFootprintsInSources
                                else afwTable.SOURCE_IO_NO_HEAVY_FOOTPRINTS)
            print('Writing "src" outputs')
            if self.config.sourceOutputFile:
                srcs.writeFits(self.config.sourceOutputFile,
                               flags=sourceWriteFlags)
            else:
                dataRef.put(srcs, 'src', flags=sourceWriteFlags)
Пример #23
0
def rotateIcSrc(icSrc, theta):
    """!Return a new icSrc catalog with columns added to hold rotated focal
    plane coordinates as if measured under a rotation of the focal plane by
    theta.

    @param icSrc   Input ic SourceCatalog
    @oaram theta   afwGeom.Angle specifying the rotation to apply
    @returns  A new sourceCatalog
    """
    schema = icSrc.schema
    schemaMapper = afwTable.SchemaMapper(schema, schema)
    for key, field in schema:
        schemaMapper.addMapping(key, field.getName())
    newSchema = schemaMapper.editOutputSchema()

    # Make keys for old columns and new columns (both in the new schema)
    fpKey = afwTable.ArrayDKey([
        schema.find("base_FPPosition_x").key,
        schema.find("base_FPPosition_y").key
    ])
    newFpKeyList = [
        newSchema.addField("base_FPPosition_rot_x", type=np.float64),
        newSchema.addField("base_FPPosition_rot_y", type=np.float64)
    ]
    newFpKey = afwTable.ArrayDKey(newFpKeyList)

    # Copy unrotated columns
    newIcSrc = afwTable.SourceCatalog(newSchema)
    newIcSrc.reserve(len(icSrc))
    for record in icSrc:
        newIcSrc.addNew().assign(record, schemaMapper)

    # Collect items to be transformed
    fps = np.zeros((len(icSrc), 2), dtype=np.float64)
    for i, r in enumerate(icSrc):
        fps[i] = r.get(fpKey)

    # Assemble transformation matrix
    rotZ = zernikeRotMatrix(3, theta)
    rot2 = rotZ[1:3, 1:3]

    # Do the transformation
    newFps = np.dot(rot2, fps.T).T

    # Write into new columns
    for i, r in enumerate(newIcSrc):
        r.set(newFpKey, newFps[i].astype(np.float64))

    return newIcSrc
    def _formatCatalog(self, fgcmStarCat, offsets):
        """
        Turn an FGCM-formatted star catalog, applying zeropoint offsets.

        Parameters
        ----------
        fgcmStarCat: `afwTable.SimpleCatalog`
           SimpleCatalog as output by fgcmcal
        offsets: `list` with len(self.bands) entries
           Zeropoint offsets to apply

        Returns
        -------
        formattedCat: `afwTable.SimpleCatalog`
           SimpleCatalog suitable for using as a reference catalog
        """

        sourceMapper = afwTable.SchemaMapper(fgcmStarCat.schema)
        minSchema = LoadIndexedReferenceObjectsTask.makeMinimalSchema(
            self.bands, addCentroid=False, addIsResolved=True, coordErrDim=0)
        sourceMapper.addMinimalSchema(minSchema)
        for band in self.bands:
            sourceMapper.editOutputSchema().addField('%s_nGood' % (band),
                                                     type=np.int32)

        formattedCat = afwTable.SimpleCatalog(sourceMapper.getOutputSchema())
        formattedCat.reserve(len(fgcmStarCat))
        formattedCat.extend(fgcmStarCat, mapper=sourceMapper)

        # Note that we don't have to set `resolved` because the default is False

        for b, band in enumerate(self.bands):
            mag = fgcmStarCat['mag_std_noabs'][:, b] + offsets[b]
            # We want fluxes in Jy from calibrated AB magnitudes
            # (after applying offset)
            # TODO: Full implementation of RFC-549 will have all reference
            # catalogs in nJy instead of Jy.
            flux = afwImage.fluxFromABMag(mag)
            fluxErr = afwImage.fluxErrFromABMagErr(
                fgcmStarCat['magErr_std'][:, b], mag)
            formattedCat['%s_flux' % (band)][:] = flux
            formattedCat['%s_fluxErr' % (band)][:] = fluxErr
            formattedCat['%s_nGood' % (band)][:] = fgcmStarCat['ngood'][:, b]

        return formattedCat
Пример #25
0
    def testFlags(self):
        """test that all the calib_photometry flags are set to reasonable values"""
        schema = self.srcCat.schema
        task = PhotoCalTask(self.refObjLoader, config=self.config, schema=schema)
        mapper = afwTable.SchemaMapper(self.srcCat.schema, schema)
        cat = afwTable.SourceCatalog(schema)
        for name in self.srcCat.schema.getNames():
            mapper.addMapping(self.srcCat.schema.find(name).key)
        cat.extend(self.srcCat, mapper=mapper)

        # test that by default, no stars are reserved and all used are candidates
        task.run(exposure=self.exposure, sourceCat=cat)
        used = 0
        for source in cat:
            if source.get("calib_photometry_used"):
                used += 1
            self.assertFalse(source.get("calib_photometry_reserved"))
        # test that some are actually used
        self.assertGreater(used, 0)
Пример #26
0
    def testFlags(self):
        """test that all the calib_photometry flags are set to reasonable values"""
        schema = self.srcCat.schema
        task = PhotoCalTask(self.refObjLoader,
                            config=self.config,
                            schema=schema)
        mapper = afwTable.SchemaMapper(self.srcCat.schema, schema)
        cat = afwTable.SourceCatalog(schema)
        for name in self.srcCat.schema.getNames():
            mapper.addMapping(self.srcCat.schema.find(name).key)
        cat.extend(self.srcCat, mapper=mapper)

        #   test that by default, no stars are reserved and used < candidates
        task.run(exposure=self.exposure, sourceCat=cat)
        candidates = 0
        used = 0
        reserved = 0
        for source in cat:
            if source.get("calib_photometryCandidate"):
                candidates += 1
            if source.get("calib_photometryUsed"):
                used += 1
            if source.get("calib_photometryReserved"):
                reserved += 1
        self.assertLessEqual(used, candidates)
        self.assertEqual(reserved, 0)

        #   set the reserve fraction, and see if the right proportion are reserved.
        self.config.reserveFraction = .3
        task.run(exposure=self.exposure, sourceCat=cat)
        candidates = 0
        reserved = 0
        used = 0
        for source in cat:
            if source.get("calib_photometryCandidate"):
                candidates += 1
            if source.get("calib_photometryUsed"):
                used += 1
            if source.get("calib_photometryReserved"):
                reserved += 1
        self.assertEqual(reserved, int(.3 * candidates))
        self.assertLessEqual(used, (candidates - reserved))
Пример #27
0
    def _formatCatalog(self, fgcmStarCat, offsets):
        """
        Turn an FGCM-formatted star catalog, applying zp offsets.

        parameters
        ----------
        fgcmStarCat: SimpleCatalog
           SimpleCatalog as output by fgcmcal
        offsets: list with len(self.bands) entries
           Zeropoint offsets to apply

        returns
        -------
        formattedCat: SimpleCatalog
           SimpleCatalog suitable for ref_cat
        """

        sourceMapper = afwTable.SchemaMapper(fgcmStarCat.schema)
        sourceMapper.addMinimalSchema(afwTable.SimpleTable.makeMinimalSchema())
        for band in self.bands:
            sourceMapper.editOutputSchema().addField('%s_flux' % (band),
                                                     type=np.float64)
            sourceMapper.editOutputSchema().addField('%s_fluxErr' % (band),
                                                     type=np.float64)
            sourceMapper.editOutputSchema().addField('%s_nGood' % (band),
                                                     type=np.float64)

        formattedCat = afwTable.SimpleCatalog(sourceMapper.getOutputSchema())
        formattedCat.reserve(len(fgcmStarCat))
        formattedCat.extend(fgcmStarCat, mapper=sourceMapper)

        for b, band in enumerate(self.bands):
            mag = fgcmStarCat['mag_std_noabs'][:, b] + offsets[b]
            flux = afwImage.fluxFromABMag(mag)
            fluxErr = afwImage.fluxErrFromABMagErr(
                fgcmStarCat['magerr_std'][:, b], mag)
            formattedCat['%s_flux' % (band)][:] = flux
            formattedCat['%s_fluxErr' % (band)][:] = fluxErr
            formattedCat['%s_nGood' % (band)][:] = fgcmStarCat['ngood'][:, b]

        return formattedCat
Пример #28
0
    def _makeSourceMapper(self, sourceSchema):
        """
        Make a schema mapper for fgcm sources

        Parameters
        ----------
        sourceSchema: `afwTable.Schema`
           Default source schema from the butler

        Returns
        -------
        sourceMapper: `afwTable.schemaMapper`
           Mapper to the FGCM source schema
        """

        # create a mapper to the preferred output
        sourceMapper = afwTable.SchemaMapper(sourceSchema)

        # map to ra/dec
        sourceMapper.addMapping(sourceSchema['coord_ra'].asKey(), 'ra')
        sourceMapper.addMapping(sourceSchema['coord_dec'].asKey(), 'dec')
        sourceMapper.addMapping(sourceSchema[self.config.jacobianName].asKey(),
                                'jacobian')
        sourceMapper.addMapping(sourceSchema['slot_Centroid_x'].asKey(), 'x')
        sourceMapper.addMapping(sourceSchema['slot_Centroid_y'].asKey(), 'y')

        # and add the fields we want
        sourceMapper.editOutputSchema().addField("visit",
                                                 type=np.int32,
                                                 doc="Visit number")
        sourceMapper.editOutputSchema().addField("ccd",
                                                 type=np.int32,
                                                 doc="CCD number")
        sourceMapper.editOutputSchema().addField("instMag",
                                                 type=np.float32,
                                                 doc="Instrumental magnitude")
        sourceMapper.editOutputSchema().addField(
            "instMagErr", type=np.float32, doc="Instrumental magnitude error")

        return sourceMapper
Пример #29
0
    def create_source_catalog_from_external_catalog(self,
                                                    dataRef,
                                                    coord_file,
                                                    dataset='src',
                                                    debug=False):
        butler = dataRef.getButler()
        schema = butler.get(dataset + "_schema", immediate=True).schema
        mapper = afwTable.SchemaMapper(schema)
        mapper.addMinimalSchema(schema)
        newSchema = mapper.getOutputSchema()

        info = load_external_catalog_info(coord_file)

        src_cat = afwTable.SourceCatalog(newSchema)
        for row in info:
            record = src_cat.addNew()
            record.set('coord_ra', Angle(row['RA'] * degrees))
            record.set('coord_dec', Angle(row['Dec'] * degrees))

        if debug:
            print(src_cat['coord_ra'], src_cat['coord_dec'])
        return (src_cat)
Пример #30
0
    def __init__(self, config: pexConfig.Config, initInput: Mapping, *args,
                 **kwargs):
        super().__init__(config=config, *args, **kwargs)
        self.apRad = self.config.apRad
        inputSchema = initInput["inputSchema"].schema

        # Create a camera mapper to create a copy of the input schema
        self.mapper = afwTable.SchemaMapper(inputSchema)
        self.mapper.addMinimalSchema(inputSchema, True)

        # Add the new field
        self.apKey = self.mapper.editOutputSchema().addField(
            "apFlux", type=np.float64, doc="Ap flux measured")

        # Get the output schema
        self.schema = self.mapper.getOutputSchema()

        # create the catalog in which new measurements will be stored
        self.outputCatalog = afwTable.SourceCatalog(self.schema)

        # Put the outputSchema into a SourceCatalog container. This var name
        # matches an initOut so will be persisted
        self.outputSchema = afwTable.SourceCatalog(self.schema)