コード例 #1
0
    def read_fits(cls, filename):
        """Build an instance of this class from a file.

        Parameters
        ----------
        filename : `str`
            Name of the file to read.
        """
        # Extract info from metadata.
        global_metadata = afwFits.readMetadata(filename, hdu=0)
        has_default = global_metadata.getBool("HAS_DEFAULT")
        if global_metadata.getBool("HAS_REGIONS"):
            focal_plane_region_names = global_metadata.getArray("REGION_NAMES")
        else:
            focal_plane_region_names = []
        f = afwFits.Fits(filename, "r")
        n_extensions = f.countHdus()
        extended_psf_parts = {}
        for j in range(1, n_extensions):
            md = afwFits.readMetadata(filename, hdu=j)
            if has_default and md["REGION"] == "DEFAULT":
                if md["EXTNAME"] == "IMAGE":
                    default_image = afwImage.ImageF(filename, hdu=j)
                elif md["EXTNAME"] == "MASK":
                    default_mask = afwImage.MaskX(filename, hdu=j)
                continue
            if md["EXTNAME"] == "IMAGE":
                extended_psf_part = afwImage.ImageF(filename, hdu=j)
            elif md["EXTNAME"] == "MASK":
                extended_psf_part = afwImage.MaskX(filename, hdu=j)
            extended_psf_parts.setdefault(
                md["REGION"], {})[md["EXTNAME"].lower()] = extended_psf_part
        # Handle default if present.
        if has_default:
            extended_psf = cls(
                afwImage.MaskedImageF(default_image, default_mask))
        else:
            extended_psf = cls()
        # Ensure we recovered an extended PSF for all focal plane regions.
        if len(extended_psf_parts) != len(focal_plane_region_names):
            raise ValueError(
                f"Number of per-region extended PSFs read ({len(extended_psf_parts)}) does not "
                "match with the number of regions recorded in the metadata "
                f"({len(focal_plane_region_names)}).")
        # Generate extended PSF regions mappings.
        for r_name in focal_plane_region_names:
            extended_psf_image = afwImage.MaskedImageF(
                **extended_psf_parts[r_name])
            detector_list = global_metadata.getArray(r_name)
            extended_psf.add_regional_extended_psf(extended_psf_image, r_name,
                                                   detector_list)
        # Instantiate ExtendedPsf.
        return extended_psf
コード例 #2
0
def morphToHeavy(source, peakSchema, xy0=Point2I()):
    """Convert the morphology to a `HeavyFootprint`

    Parameters
    ----------
    source : `scarlet.Component`
        The scarlet source with a morphology to convert to
        a `HeavyFootprint`.
    peakSchema : `lsst.daf.butler.Schema`
        The schema for the `PeakCatalog` of the `HeavyFootprint`.
    xy0 : `tuple`
        `(x,y)` coordinates of the bounding box containing the
        `HeavyFootprint`.

    Returns
    -------
    heavy : `lsst.afw.detection.HeavyFootprint`
    """
    mask = afwImage.MaskX(np.array(source.morph > 0, dtype=np.int32), xy0=xy0)
    ss = SpanSet.fromMask(mask)

    if len(ss) == 0:
        return None

    tfoot = afwDet.Footprint(ss, peakSchema=peakSchema)
    cy, cx = source.pixel_center
    xmin, ymin = xy0
    # HeavyFootprints are not defined for 64 bit floats
    morph = source.morph.astype(np.float32)
    peakFlux = morph[cy, cx]
    tfoot.addPeak(cx + xmin, cy + ymin, peakFlux)
    timg = afwImage.ImageF(morph, xy0=xy0)
    timg = timg[tfoot.getBBox()]
    heavy = afwDet.makeHeavyFootprint(tfoot, afwImage.MaskedImageF(timg))
    return heavy
コード例 #3
0
ファイル: test_exposure.py プロジェクト: frossie-shadow/afw
    def testProperties(self):
        self.assertMaskedImagesEqual(self.exposureMiOnly.maskedImage,
                                     self.exposureMiOnly.getMaskedImage())
        mi2 = afwImage.MaskedImageF(self.exposureMiOnly.getDimensions())
        mi2.image.array[:] = 5.0
        mi2.variance.array[:] = 3.0
        mi2.mask.array[:] = 0x1
        self.exposureMiOnly.maskedImage = mi2
        self.assertMaskedImagesEqual(self.exposureMiOnly.maskedImage, mi2)
        self.assertImagesEqual(self.exposureMiOnly.image,
                               self.exposureMiOnly.maskedImage.image)

        image3 = afwImage.ImageF(self.exposureMiOnly.getDimensions())
        image3.array[:] = 3.0
        self.exposureMiOnly.image = image3
        self.assertImagesEqual(self.exposureMiOnly.image, image3)

        mask3 = afwImage.MaskX(self.exposureMiOnly.getDimensions())
        mask3.array[:] = 0x2
        self.exposureMiOnly.mask = mask3
        self.assertMasksEqual(self.exposureMiOnly.mask, mask3)

        var3 = afwImage.ImageF(self.exposureMiOnly.getDimensions())
        var3.array[:] = 2.0
        self.exposureMiOnly.variance = var3
        self.assertImagesEqual(self.exposureMiOnly.variance, var3)
コード例 #4
0
 def _isMasked(self, footprint, mExposure):
     """Returns whether the footprint violates the mask limits"""
     bbox = footprint.getBBox()
     mask = np.bitwise_or.reduce(mExposure.mask[:, bbox].array, axis=0)
     size = float(footprint.getArea())
     for maskName, limit in self.config.maskLimits.items():
         maskVal = mExposure.mask.getPlaneBitMask(maskName)
         _mask = afwImage.MaskX(mask & maskVal, xy0=bbox.getMin())
         unmaskedSpan = footprint.spans.intersectNot(
             _mask)  # spanset of unmasked pixels
         if (size - unmaskedSpan.getArea()) / size > limit:
             return True
     return False
コード例 #5
0
    def setUp(self):
        self.dataDir = os.path.join(os.path.split(__file__)[0], "data")

        # Check the values below against what was written by comparing with
        # the code in `afw/tests/data/makeTestExposure.py`
        nx = ny = 10
        image = afwImage.ImageF(np.arange(nx*ny, dtype='f').reshape(nx, ny))
        variance = afwImage.ImageF(np.ones((nx, ny), dtype='f'))
        mask = afwImage.MaskX(nx, ny)
        mask.array[5, 5] = 5
        self.maskedImage = afwImage.MaskedImageF(image, mask, variance)

        self.v0PhotoCalib = afwImage.makePhotoCalibFromCalibZeroPoint(1e6, 2e4)
        self.v1PhotoCalib = afwImage.PhotoCalib(1e6, 2e4)
コード例 #6
0
    def deblend(self, mExposure, sources):
        """Deblend a data cube of multiband images

        Parameters
        ----------
        mExposure : `MultibandExposure`
            The exposures should be co-added images of the same
            shape and region of the sky.
        sources : `SourceCatalog`
            The merged `SourceCatalog` that contains parent footprints
            to (potentially) deblend.

        Returns
        -------
        fluxCatalogs : dict or None
            Keys are the names of the filters and the values are
            `lsst.afw.table.source.source.SourceCatalog`'s.
            These are the flux-conserved catalogs with heavy footprints with
            the image data weighted by the multiband templates.
            If `self.config.conserveFlux` is `False`, then this item will be
            None
        templateCatalogs : dict or None
            Keys are the names of the filters and the values are
            `lsst.afw.table.source.source.SourceCatalog`'s.
            These are catalogs with heavy footprints that are the templates
            created by the multiband templates.
            If `self.config.saveTemplates` is `False`, then this item will be
            None
        """
        import time

        filters = mExposure.filters
        self.log.info("Deblending {0} sources in {1} exposure bands".format(
            len(sources), len(mExposure)))

        # Create the output catalogs
        templateCatalogs = {}
        # This must be returned but is not calculated right now, setting it to
        # None to be consistent with doc string
        fluxCatalogs = None
        for f in filters:
            _catalog = afwTable.SourceCatalog(sources.table.clone())
            _catalog.extend(sources)
            templateCatalogs[f] = _catalog

        n0 = len(sources)
        nparents = 0
        for pk, src in enumerate(sources):
            foot = src.getFootprint()
            bbox = foot.getBBox()
            logger.info("id: {0}".format(src["id"]))
            peaks = foot.getPeaks()

            # Since we use the first peak for the parent object, we should
            # propagate its flags to the parent source.
            src.assign(peaks[0], self.peakSchemaMapper)

            # Block of Skipping conditions
            if len(peaks) < 2 and not self.config.processSingles:
                for f in filters:
                    templateCatalogs[f][pk].set(self.runtimeKey, 0)
                continue
            if self._isLargeFootprint(foot):
                src.set(self.tooBigKey, True)
                self._skipParent(src, mExposure.mask)
                self.log.trace('Parent %i: skipping large footprint',
                               int(src.getId()))
                continue
            if self._isMasked(foot, mExposure):
                src.set(self.maskedKey, True)
                mask = np.bitwise_or.reduce(mExposure.mask[:, bbox].array,
                                            axis=0)
                mask = afwImage.MaskX(mask, xy0=bbox.getMin())
                self._skipParent(src, mask)
                self.log.trace('Parent %i: skipping masked footprint',
                               int(src.getId()))
                continue
            if len(peaks) > self.config.maxNumberOfPeaks:
                src.set(self.tooManyPeaksKey, True)
                msg = 'Parent {0}: Too many peaks, using the first {1} peaks'
                self.log.trace(
                    msg.format(int(src.getId()), self.config.maxNumberOfPeaks))

            nparents += 1
            self.log.trace('Parent %i: deblending %i peaks', int(src.getId()),
                           len(peaks))
            # Run the deblender
            try:
                t0 = time.time()
                # Build the parameter lists with the same ordering
                blend, skipped = deblend(mExposure, foot, self.config)
                tf = time.time()
                runtime = (tf - t0) * 1000
                src.set(self.deblendFailedKey, False)
                src.set(self.runtimeKey, runtime)
                converged = checkBlendConvergence(blend,
                                                  self.config.relativeError)
                src.set(self.blendConvergenceFailedFlagKey, converged)
                sources = [src for src in blend.sources]
                # Re-insert place holders for skipped sources
                # to propagate them in the catalog so
                # that the peaks stay consistent
                for k in skipped:
                    sources.insert(k, None)
            except Exception as e:
                if self.config.catchFailures:
                    self.log.warn("Unable to deblend source %d: %s" %
                                  (src.getId(), e))
                    src.set(self.deblendFailedKey, True)
                    src.set(self.runtimeKey, 0)
                    import traceback
                    traceback.print_exc()
                    continue
                else:
                    raise

            # Add the merged source as a parent in the catalog for each band
            templateParents = {}
            parentId = src.getId()
            for f in filters:
                templateParents[f] = templateCatalogs[f][pk]
                templateParents[f].set(self.runtimeKey, runtime)
                templateParents[f].set(self.iterKey, len(blend.loss))

            # Add each source to the catalogs in each band
            templateSpans = {f: afwGeom.SpanSet() for f in filters}
            nchild = 0
            for k, source in enumerate(sources):
                # Skip any sources with no flux or that scarlet skipped because
                # it could not initialize
                if k in skipped:
                    if not self.config.propagateAllPeaks:
                        # We don't care
                        continue
                    # We need to preserve the peak: make sure we have enough
                    # info to create a minimal child src
                    msg = "Peak at {0} failed deblending.  Using minimal default info for child."
                    self.log.trace(msg.format(src.getFootprint().peaks[k]))
                    # copy the full footprint and strip out extra peaks
                    foot = afwDet.Footprint(src.getFootprint())
                    peakList = foot.getPeaks()
                    peakList.clear()
                    peakList.append(src.peaks[k])
                    zeroMimg = afwImage.MaskedImageF(foot.getBBox())
                    heavy = afwDet.makeHeavyFootprint(foot, zeroMimg)
                    models = afwDet.MultibandFootprint(
                        mExposure.filters, [heavy] * len(mExposure.filters))
                else:
                    src.set(self.deblendSkippedKey, False)
                    models = modelToHeavy(source,
                                          filters,
                                          xy0=bbox.getMin(),
                                          observation=blend.observations[0])
                # TODO: We should eventually write the morphology and SED to
                # the catalog
                # morph = source.morphToHeavy(xy0=bbox.getMin())
                # sed = source.sed / source.sed.sum()

                for f in filters:
                    if len(models[f].getPeaks()) != 1:
                        err = "Heavy footprint should have a single peak, got {0}"
                        raise ValueError(err.format(len(models[f].peaks)))
                    cat = templateCatalogs[f]
                    child = self._addChild(parentId,
                                           cat,
                                           models[f],
                                           source,
                                           converged,
                                           xy0=bbox.getMin())
                    if parentId == 0:
                        child.setId(src.getId())
                        child.set(self.runtimeKey, runtime)
                    else:
                        templateSpans[f] = templateSpans[f].union(
                            models[f].getSpans())
                nchild += 1

            # Child footprints may extend beyond the full extent of their
            # parent's which results in a failure of the replace-by-noise code
            # to reinstate these pixels to their original values.  The
            # following updates the parent footprint in-place to ensure it
            # contains the full union of itself and all of its
            # children's footprints.
            for f in filters:
                templateParents[f].set(self.nChildKey, nchild)
                templateParents[f].getFootprint().setSpans(templateSpans[f])

        K = len(list(templateCatalogs.values())[0])
        self.log.info(
            'Deblended: of %i sources, %i were deblended, creating %i children, total %i sources'
            % (n0, nparents, K - n0, K))
        return fluxCatalogs, templateCatalogs
コード例 #7
0
    def deblend(self, mExposure, sources):
        """Deblend a data cube of multiband images

        Parameters
        ----------
        mExposure : `MultibandExposure`
            The exposures should be co-added images of the same
            shape and region of the sky.
        sources : `SourceCatalog`
            The merged `SourceCatalog` that contains parent footprints
            to (potentially) deblend.

        Returns
        -------
        templateCatalogs : dict or None
            Keys are the names of the filters and the values are
            `lsst.afw.table.source.source.SourceCatalog`'s.
            These are catalogs with heavy footprints that are the templates
            created by the multiband templates.
        """
        import time

        filters = mExposure.filters
        self.log.info("Deblending {0} sources in {1} exposure bands".format(
            len(sources), len(mExposure)))

        # Create the output catalogs
        templateCatalogs = {}
        # This must be returned but is not calculated right now, setting it to
        # None to be consistent with doc string
        for f in filters:
            _catalog = afwTable.SourceCatalog(sources.table.clone())
            _catalog.extend(sources)
            templateCatalogs[f] = _catalog

        n0 = len(sources)
        nparents = 0
        for pk, src in enumerate(sources):
            foot = src.getFootprint()
            bbox = foot.getBBox()
            peaks = foot.getPeaks()

            # Since we use the first peak for the parent object, we should
            # propagate its flags to the parent source.
            src.assign(peaks[0], self.peakSchemaMapper)

            # Block of Skipping conditions
            if len(peaks) < 2 and not self.config.processSingles:
                for f in filters:
                    templateCatalogs[f][pk].set(self.runtimeKey, 0)
                continue
            if self._isLargeFootprint(foot):
                src.set(self.tooBigKey, True)
                self._skipParent(src, mExposure.mask)
                self.log.trace('Parent %i: skipping large footprint',
                               int(src.getId()))
                continue
            if self._isMasked(foot, mExposure):
                src.set(self.maskedKey, True)
                mask = np.bitwise_or.reduce(mExposure.mask[:, bbox].array,
                                            axis=0)
                mask = afwImage.MaskX(mask, xy0=bbox.getMin())
                self._skipParent(src, mask)
                self.log.trace('Parent %i: skipping masked footprint',
                               int(src.getId()))
                continue
            if self.config.maxNumberOfPeaks > 0 and len(
                    peaks) > self.config.maxNumberOfPeaks:
                src.set(self.tooManyPeaksKey, True)
                self._skipParent(src, mExposure.mask)
                msg = 'Parent {0}: Too many peaks, skipping blend'
                self.log.trace(msg.format(int(src.getId())))
                # Unlike meas_deblender, in scarlet we skip the entire blend
                # if the number of peaks exceeds max peaks, since neglecting
                # to model any peaks often results in catastrophic failure
                # of scarlet to generate models for the brighter sources.
                continue

            nparents += 1
            self.log.trace('Parent %i: deblending %i peaks', int(src.getId()),
                           len(peaks))
            # Run the deblender
            blendError = None
            try:
                t0 = time.time()
                # Build the parameter lists with the same ordering
                blend, skipped = deblend(mExposure, foot, self.config)
                tf = time.time()
                runtime = (tf - t0) * 1000
                src.set(self.deblendFailedKey, False)
                src.set(self.runtimeKey, runtime)
                converged = _checkBlendConvergence(blend,
                                                   self.config.relativeError)
                src.set(self.blendConvergenceFailedFlagKey, converged)
                sources = [src for src in blend.sources]
                # Re-insert place holders for skipped sources
                # to propagate them in the catalog so
                # that the peaks stay consistent
                for k in skipped:
                    sources.insert(k, None)
            # Catch all errors and filter out the ones that we know about
            except Exception as e:
                blendError = type(e).__name__
                if isinstance(e, ScarletGradientError):
                    src.set(self.iterKey, e.iterations)
                elif not isinstance(e, IncompleteDataError):
                    blendError = "UnknownError"
                    self._skipParent(src, mExposure.mask)
                    if self.config.catchFailures:
                        # Make it easy to find UnknownErrors in the log file
                        self.log.warn("UnknownError")
                        import traceback
                        traceback.print_exc()
                    else:
                        raise

                self.log.warn("Unable to deblend source %d: %s" %
                              (src.getId(), blendError))
                src.set(self.deblendFailedKey, True)
                src.set(self.deblendErrorKey, blendError)
                self._skipParent(src, mExposure.mask)
                continue

            # Add the merged source as a parent in the catalog for each band
            templateParents = {}
            parentId = src.getId()
            for f in filters:
                templateParents[f] = templateCatalogs[f][pk]
                templateParents[f].set(self.nPeaksKey, len(foot.peaks))
                templateParents[f].set(self.runtimeKey, runtime)
                templateParents[f].set(self.iterKey, len(blend.loss))
                logL = blend.loss[-1] - blend.observations[0].log_norm
                templateParents[f].set(self.scarletLogLKey, logL)

            # Add each source to the catalogs in each band
            nchild = 0
            for k, source in enumerate(sources):
                # Skip any sources with no flux or that scarlet skipped because
                # it could not initialize
                if k in skipped:
                    # No need to propagate anything
                    continue
                else:
                    src.set(self.deblendSkippedKey, False)
                    models = modelToHeavy(source,
                                          filters,
                                          xy0=bbox.getMin(),
                                          observation=blend.observations[0])

                flux = scarlet.measure.flux(source)
                for fidx, f in enumerate(filters):
                    if len(models[f].getPeaks()) != 1:
                        err = "Heavy footprint should have a single peak, got {0}"
                        raise ValueError(err.format(len(models[f].peaks)))
                    cat = templateCatalogs[f]
                    child = self._addChild(src,
                                           cat,
                                           models[f],
                                           source,
                                           converged,
                                           xy0=bbox.getMin(),
                                           flux=flux[fidx])
                    if parentId == 0:
                        child.setId(src.getId())
                        child.set(self.runtimeKey, runtime)
                nchild += 1

            # Set the number of children for each parent
            for f in filters:
                templateParents[f].set(self.nChildKey, nchild)

        K = len(list(templateCatalogs.values())[0])
        self.log.info(
            'Deblended: of %i sources, %i were deblended, creating %i children, total %i sources'
            % (n0, nparents, K - n0, K))
        return templateCatalogs