Пример #1
0
    def testStringToInterpStyle(self):
        for name, desiredStyle in afwMath.Interpolate.Style.__members__.items():
            if name in ("UNKNOWN", "NUM_STYLES"):
                with self.assertRaises(pexExcept.InvalidParameterError):
                    afwMath.stringToInterpStyle(name)
            else:
                style = afwMath.stringToInterpStyle(name)
                self.assertEqual(style, desiredStyle)

        for badName in ("BOGUS", ""):
            with self.assertRaises(pexExcept.InvalidParameterError):
                afwMath.stringToInterpStyle(badName)
Пример #2
0
    def exposureToBackground(bgExp):
        """Convert an exposure to background model

        Calibs need to be persisted as an Exposure, so we need to convert
        the persisted Exposure to a background model.

        Parameters
        ----------
        bgExp : `lsst.afw.image.Exposure`
            Background model in Exposure format.

        Returns
        -------
        bg : `lsst.afw.math.BackgroundList`
            Background model
        """
        header = bgExp.getMetadata()
        xMin = header.getScalar("BOX.MINX")
        yMin = header.getScalar("BOX.MINY")
        xMax = header.getScalar("BOX.MAXX")
        yMax = header.getScalar("BOX.MAXY")
        algorithm = header.getScalar("ALGORITHM")
        bbox = afwGeom.Box2I(afwGeom.Point2I(xMin, yMin), afwGeom.Point2I(xMax, yMax))
        return afwMath.BackgroundList(
            (afwMath.BackgroundMI(bbox, bgExp.getMaskedImage()),
             afwMath.stringToInterpStyle(algorithm),
             afwMath.stringToUndersampleStyle("REDUCE_INTERP_ORDER"),
             afwMath.ApproximateControl.UNKNOWN,
             0, 0, False))
Пример #3
0
def interpolateBadPixels(array, isBad, interpolationStyle):
    """Interpolate bad pixels in an image array

    The bad pixels are modified in the array.

    Parameters
    ----------
    array : `numpy.ndarray`
        Image array with bad pixels.
    isBad : `numpy.ndarray` of type `bool`
        Boolean array indicating which pixels are bad.
    interpolationStyle : `str`
        Style for interpolation (see `lsst.afw.math.Background`);
        supported values are CONSTANT, LINEAR, NATURAL_SPLINE,
        AKIMA_SPLINE.
    """
    if numpy.all(isBad):
        raise RuntimeError("No good pixels in image array")
    height, width = array.shape
    xIndices = numpy.arange(width, dtype=float)
    yIndices = numpy.arange(height, dtype=float)
    method = afwMath.stringToInterpStyle(interpolationStyle)
    isGood = ~isBad
    for y in range(height):
        if numpy.any(isBad[y, :]) and numpy.any(isGood[y, :]):
            array[y][isBad[y]] = interpolate1D(method, xIndices[isGood[y]], array[y][isGood[y]],
                                               xIndices[isBad[y]])

    isBad = numpy.isnan(array)
    isGood = ~isBad
    for x in range(width):
        if numpy.any(isBad[:, x]) and numpy.any(isGood[:, x]):
            array[:, x][isBad[:, x]] = interpolate1D(method, yIndices[isGood[:, x]],
                                                     array[:, x][isGood[:, x]], yIndices[isBad[:, x]])
Пример #4
0
    def __call__(self, image, **kwargs):
        """Correct for non-linearity.

        Parameters
        ----------
        image : `lsst.afw.image.Image`
            Image to be corrected
        kwargs : `dict`
            Dictionary of parameter keywords:
            ``"coeffs"``
                Coefficient vector (`list` or `numpy.array`).
            ``"log"``
                Logger to handle messages (`lsst.log.Log`).

        Returns
        -------
        output : `tuple` [`bool`, `int`]
            If true, a correction was applied successfully.  The
            integer indicates the number of pixels that were
            uncorrectable by being out of range.
        """
        splineCoeff = kwargs['coeffs']
        centers, values = np.split(splineCoeff, 2)
        interp = afwMath.makeInterpolate(centers.tolist(), values.tolist(),
                                         afwMath.stringToInterpStyle("AKIMA_SPLINE"))

        ampArr = image.getArray()
        delta = interp.interpolate(ampArr.flatten())
        ampArr -= np.array(delta).reshape(ampArr.shape)

        return True, 0
Пример #5
0
    def splineFit(self, indices, collapsed, numBins):
        """Wrapper function to match spline fit API to polynomial fit API.

        Parameters
        ----------
        indices : `numpy.ndarray`
            Locations to evaluate the spline.
        collapsed : `numpy.ndarray`
            Collapsed overscan values corresponding to the spline
            evaluation points.
        numBins : `int`
            Number of bins to use in constructing the spline.

        Returns
        -------
        interp : `lsst.afw.math.Interpolate`
            Interpolation object for later evaluation.
        """
        if not np.ma.is_masked(collapsed):
            collapsed.mask = np.array(len(collapsed) * [np.ma.nomask])

        numPerBin, binEdges = np.histogram(indices,
                                           bins=numBins,
                                           weights=1 -
                                           collapsed.mask.astype(int))
        with np.errstate(invalid="ignore"):
            values = np.histogram(
                indices,
                bins=numBins,
                weights=collapsed.data * ~collapsed.mask)[0] / numPerBin
            binCenters = np.histogram(
                indices, bins=numBins,
                weights=indices * ~collapsed.mask)[0] / numPerBin

            if len(binCenters[numPerBin > 0]) < 5:
                self.log.warn(
                    "Cannot do spline fitting for overscan: %s valid points.",
                    len(binCenters[numPerBin > 0]))
                # Return a scalar value if we have one, otherwise
                # return zero.  This amplifier is hopefully already
                # masked.
                if len(values[numPerBin > 0]) != 0:
                    return float(values[numPerBin > 0][0])
                else:
                    return 0.0

            interp = afwMath.makeInterpolate(
                binCenters.astype(float)[numPerBin > 0],
                values.astype(float)[numPerBin > 0],
                afwMath.stringToInterpStyle(self.config.fitType))
        return interp
Пример #6
0
    def toCcdBackground(self, detector, bbox):
        """Produce a background model for a CCD

        The superpixel background model is warped back to the
        CCD frame, for application to the individual CCD.

        Parameters
        ----------
        detector : `lsst.afw.cameraGeom.Detector`
            CCD for which to produce background model.
        bbox : `lsst.geom.Box2I`
            Bounding box of CCD exposure.

        Returns
        -------
        bg : `lsst.afw.math.BackgroundList`
            Background model for CCD.
        """
        transform = detector.getTransformMap().getTransform(
            detector.makeCameraSys(afwCameraGeom.PIXELS),
            detector.makeCameraSys(afwCameraGeom.FOCAL_PLANE))
        binTransform = (
            geom.AffineTransform.makeScaling(self.config.binning) *
            geom.AffineTransform.makeTranslation(geom.Extent2D(0.5, 0.5)))

        # Binned image on CCD --> unbinned image on CCD --> focal plane --> binned focal plane
        toSample = afwGeom.makeTransform(binTransform).then(transform).then(
            self.transform)

        focalPlane = self.getStatsImage()
        fpNorm = afwImage.ImageF(focalPlane.getBBox())
        fpNorm.set(1.0)

        image = afwImage.ImageF(bbox.getDimensions() // self.config.binning)
        norm = afwImage.ImageF(image.getBBox())
        ctrl = afwMath.WarpingControl("bilinear")
        afwMath.warpImage(image, focalPlane, toSample.inverted(), ctrl)
        afwMath.warpImage(norm, fpNorm, toSample.inverted(), ctrl)
        image /= norm

        mask = afwImage.Mask(image.getBBox())
        isBad = numpy.isnan(image.getArray())
        mask.getArray()[isBad] = mask.getPlaneBitMask("BAD")
        image.getArray()[isBad] = image.getArray()[~isBad].mean()

        return afwMath.BackgroundList(
            (afwMath.BackgroundMI(bbox, afwImage.makeMaskedImage(image, mask)),
             afwMath.stringToInterpStyle(self.config.interpolation),
             afwMath.stringToUndersampleStyle("REDUCE_INTERP_ORDER"),
             afwMath.ApproximateControl.UNKNOWN, 0, 0, False))
Пример #7
0
    def measureBackground(self, image):
        """Measure a background model for image

        This doesn't use a full-featured background model (e.g., no Chebyshev
        approximation) because we just want the binning behaviour.  This will
        allow us to average the bins later (`averageBackgrounds`).

        The `BackgroundMI` is wrapped in a `BackgroundList` so it can be
        pickled and persisted.

        Parameters
        ----------
        image : `lsst.afw.image.MaskedImage`
            Image for which to measure background.

        Returns
        -------
        bgModel : `lsst.afw.math.BackgroundList`
            Background model.
        """
        stats = afwMath.StatisticsControl()
        stats.setAndMask(image.getMask().getPlaneBitMask(self.config.background.mask))
        stats.setNanSafe(True)
        ctrl = afwMath.BackgroundControl(
            self.config.background.algorithm,
            max(int(image.getWidth()/self.config.background.xBinSize + 0.5), 1),
            max(int(image.getHeight()/self.config.background.yBinSize + 0.5), 1),
            "REDUCE_INTERP_ORDER",
            stats,
            self.config.background.statistic
        )

        bg = afwMath.makeBackground(image, ctrl)

        return afwMath.BackgroundList((
            bg,
            afwMath.stringToInterpStyle(self.config.background.algorithm),
            afwMath.stringToUndersampleStyle("REDUCE_INTERP_ORDER"),
            afwMath.ApproximateControl.UNKNOWN,
            0, 0, False
        ))
    def __init__(self, interpStyle, xList, yList, scaleList):
        """Construct an SdssImageScaler

        @warning: scaleErrList is presently not used

        @param[in] interpStyle: interpolation style (see lsst.afw.math.Interpolate for options)
        @param[in] xList: list of X pixel positions
        @param[in] yList: list of Y pixel positions
        @param[in] scaleList: list of multiplicative scales at (x,y)

        @raise RuntimeError if the lists have different lengths
        """
        if len(xList) != len(yList) or len(xList) != len(scaleList):
            raise RuntimeError(
                "len(xList)=%s len(yList)=%s, len(scaleList)=%s but all lists must have the same length"
                % (len(xList), len(yList), len(scaleList)))

        self.interpStyle = afwMath.stringToInterpStyle(interpStyle)
        self._xList = xList
        self._yList = yList
        self._scaleList = scaleList
Пример #9
0
    def splineFit(self, indices, collapsed, numBins):
        """Wrapper function to match spline fit API to polynomial fit API.

        Parameters
        ----------
        indices : `numpy.ndarray`
            Locations to evaluate the spline.
        collapsed : `numpy.ndarray`
            Collapsed overscan values corresponding to the spline
            evaluation points.
        numBins : `int`
            Number of bins to use in constructing the spline.

        Returns
        -------
        interp : `lsst.afw.math.Interpolate`
            Interpolation object for later evaluation.
        """
        if not np.ma.is_masked(collapsed):
            collapsed.mask = np.array(len(collapsed) * [np.ma.nomask])

        numPerBin, binEdges = np.histogram(indices,
                                           bins=numBins,
                                           weights=1 -
                                           collapsed.mask.astype(int))
        with np.errstate(invalid="ignore"):
            values = np.histogram(
                indices,
                bins=numBins,
                weights=collapsed.data * ~collapsed.mask)[0] / numPerBin
            binCenters = np.histogram(
                indices, bins=numBins,
                weights=indices * ~collapsed.mask)[0] / numPerBin
            interp = afwMath.makeInterpolate(
                binCenters.astype(float)[numPerBin > 0],
                values.astype(float)[numPerBin > 0],
                afwMath.stringToInterpStyle(self.config.fitType))
        return interp
Пример #10
0
def overscanCorrection(ampMaskedImage, overscanImage, fitType='MEDIAN', order=1, collapseRej=3.0,
                       statControl=None, overscanIsInt=True):
    """Apply overscan correction in place.

    Parameters
    ----------
    ampMaskedImage : `lsst.afw.image.MaskedImage`
        Image of amplifier to correct; modified.
    overscanImage : `lsst.afw.image.Image` or `lsst.afw.image.MaskedImage`
        Image of overscan; modified.
    fitType : `str`
        Type of fit for overscan correction. May be one of:

        - ``MEAN``: use mean of overscan.
        - ``MEANCLIP``: use clipped mean of overscan.
        - ``MEDIAN``: use median of overscan.
        - ``POLY``: fit with ordinary polynomial.
        - ``CHEB``: fit with Chebyshev polynomial.
        - ``LEG``: fit with Legendre polynomial.
        - ``NATURAL_SPLINE``: fit with natural spline.
        - ``CUBIC_SPLINE``: fit with cubic spline.
        - ``AKIMA_SPLINE``: fit with Akima spline.

    order : `int`
        Polynomial order or number of spline knots; ignored unless
        ``fitType`` indicates a polynomial or spline.
    statControl : `lsst.afw.math.StatisticsControl`
        Statistics control object.  In particular, we pay attention to numSigmaClip
    overscanIsInt : `bool`
        Treat the overscan region as consisting of integers, even if it's been
        converted to float.  E.g. handle ties properly.

    Returns
    -------
    result : `lsst.pipe.base.Struct`
        Result struct with components:

        - ``imageFit``: Value(s) removed from image (scalar or
            `lsst.afw.image.Image`)
        - ``overscanFit``: Value(s) removed from overscan (scalar or
            `lsst.afw.image.Image`)
        - ``overscanImage``: Overscan corrected overscan region
            (`lsst.afw.image.Image`)
    Raises
    ------
    pexExcept.Exception
        Raised if ``fitType`` is not an allowed value.

    Notes
    -----
    The ``ampMaskedImage`` and ``overscanImage`` are modified, with the fit
    subtracted. Note that the ``overscanImage`` should not be a subimage of
    the ``ampMaskedImage``, to avoid being subtracted twice.

    Debug plots are available for the SPLINE fitTypes by setting the
    `debug.display` for `name` == "lsst.ip.isr.isrFunctions".  These
    plots show the scatter plot of the overscan data (collapsed along
    the perpendicular dimension) as a function of position on the CCD
    (normalized between +/-1).
    """
    ampImage = ampMaskedImage.getImage()
    if statControl is None:
        statControl = afwMath.StatisticsControl()

    numSigmaClip = statControl.getNumSigmaClip()

    if fitType in ('MEAN', 'MEANCLIP'):
        fitType = afwMath.stringToStatisticsProperty(fitType)
        offImage = afwMath.makeStatistics(overscanImage, fitType, statControl).getValue()
        overscanFit = offImage
    elif fitType in ('MEDIAN',):
        if overscanIsInt:
            # we need an image with integer pixels to handle ties properly
            if hasattr(overscanImage, "image"):
                imageI = overscanImage.image.convertI()
                overscanImageI = afwImage.MaskedImageI(imageI, overscanImage.mask, overscanImage.variance)
            else:
                overscanImageI = overscanImage.convertI()
        else:
            overscanImageI = overscanImage

        fitType = afwMath.stringToStatisticsProperty(fitType)
        offImage = afwMath.makeStatistics(overscanImageI, fitType, statControl).getValue()
        overscanFit = offImage

        if overscanIsInt:
            del overscanImageI
    elif fitType in ('POLY', 'CHEB', 'LEG', 'NATURAL_SPLINE', 'CUBIC_SPLINE', 'AKIMA_SPLINE'):
        if hasattr(overscanImage, "getImage"):
            biasArray = overscanImage.getImage().getArray()
            biasArray = numpy.ma.masked_where(overscanImage.getMask().getArray() & statControl.getAndMask(),
                                              biasArray)
        else:
            biasArray = overscanImage.getArray()
        # Fit along the long axis, so collapse along each short row and fit the resulting array
        shortInd = numpy.argmin(biasArray.shape)
        if shortInd == 0:
            # Convert to some 'standard' representation to make things easier
            biasArray = numpy.transpose(biasArray)

        # Do a single round of clipping to weed out CR hits and signal leaking into the overscan
        percentiles = numpy.percentile(biasArray, [25.0, 50.0, 75.0], axis=1)
        medianBiasArr = percentiles[1]
        stdevBiasArr = 0.74*(percentiles[2] - percentiles[0])  # robust stdev
        diff = numpy.abs(biasArray - medianBiasArr[:, numpy.newaxis])
        biasMaskedArr = numpy.ma.masked_where(diff > numSigmaClip*stdevBiasArr[:, numpy.newaxis], biasArray)
        collapsed = numpy.mean(biasMaskedArr, axis=1)
        if collapsed.mask.sum() > 0:
            collapsed.data[collapsed.mask] = numpy.mean(biasArray.data[collapsed.mask], axis=1)
        del biasArray, percentiles, stdevBiasArr, diff, biasMaskedArr

        if shortInd == 0:
            collapsed = numpy.transpose(collapsed)

        num = len(collapsed)
        indices = 2.0*numpy.arange(num)/float(num) - 1.0

        if fitType in ('POLY', 'CHEB', 'LEG'):
            # A numpy polynomial
            poly = numpy.polynomial
            fitter, evaler = {"POLY": (poly.polynomial.polyfit, poly.polynomial.polyval),
                              "CHEB": (poly.chebyshev.chebfit, poly.chebyshev.chebval),
                              "LEG": (poly.legendre.legfit, poly.legendre.legval),
                              }[fitType]

            coeffs = fitter(indices, collapsed, order)
            fitBiasArr = evaler(indices, coeffs)
        elif 'SPLINE' in fitType:
            # An afw interpolation
            numBins = order
            #
            # numpy.histogram needs a real array for the mask, but numpy.ma "optimises" the case
            # no-values-are-masked by replacing the mask array by a scalar, numpy.ma.nomask
            #
            # Issue DM-415
            #
            collapsedMask = collapsed.mask
            try:
                if collapsedMask == numpy.ma.nomask:
                    collapsedMask = numpy.array(len(collapsed)*[numpy.ma.nomask])
            except ValueError:      # If collapsedMask is an array the test fails [needs .all()]
                pass

            numPerBin, binEdges = numpy.histogram(indices, bins=numBins,
                                                  weights=1-collapsedMask.astype(int))
            # Binning is just a histogram, with weights equal to the values.
            # Use a similar trick to get the bin centers (this deals with different numbers per bin).
            with numpy.errstate(invalid="ignore"):  # suppress NAN warnings
                values = numpy.histogram(indices, bins=numBins,
                                         weights=collapsed.data*~collapsedMask)[0]/numPerBin
                binCenters = numpy.histogram(indices, bins=numBins,
                                             weights=indices*~collapsedMask)[0]/numPerBin
                interp = afwMath.makeInterpolate(binCenters.astype(float)[numPerBin > 0],
                                                 values.astype(float)[numPerBin > 0],
                                                 afwMath.stringToInterpStyle(fitType))
            fitBiasArr = numpy.array([interp.interpolate(i) for i in indices])

        import lsstDebug
        if lsstDebug.Info(__name__).display:
            import matplotlib.pyplot as plot
            figure = plot.figure(1)
            figure.clear()
            axes = figure.add_axes((0.1, 0.1, 0.8, 0.8))
            axes.plot(indices[~collapsedMask], collapsed[~collapsedMask], 'k+')
            if collapsedMask.sum() > 0:
                axes.plot(indices[collapsedMask], collapsed.data[collapsedMask], 'b+')
            axes.plot(indices, fitBiasArr, 'r-')
            plot.xlabel("centered/scaled position along overscan region")
            plot.ylabel("pixel value/fit value")
            figure.show()
            prompt = "Press Enter or c to continue [chp]... "
            while True:
                ans = input(prompt).lower()
                if ans in ("", "c",):
                    break
                if ans in ("p",):
                    import pdb
                    pdb.set_trace()
                elif ans in ("h", ):
                    print("h[elp] c[ontinue] p[db]")
            plot.close()

        offImage = ampImage.Factory(ampImage.getDimensions())
        offArray = offImage.getArray()
        overscanFit = afwImage.ImageF(overscanImage.getDimensions())
        overscanArray = overscanFit.getArray()
        if shortInd == 1:
            offArray[:, :] = fitBiasArr[:, numpy.newaxis]
            overscanArray[:, :] = fitBiasArr[:, numpy.newaxis]
        else:
            offArray[:, :] = fitBiasArr[numpy.newaxis, :]
            overscanArray[:, :] = fitBiasArr[numpy.newaxis, :]

        # We don't trust any extrapolation: mask those pixels as SUSPECT
        # This will occur when the top and or bottom edges of the overscan
        # contain saturated values. The values will be extrapolated from
        # the surrounding pixels, but we cannot entirely trust the value of
        # the extrapolation, and will mark the image mask plane to flag the
        # image as such.
        mask = ampMaskedImage.getMask()
        maskArray = mask.getArray() if shortInd == 1 else mask.getArray().transpose()
        suspect = mask.getPlaneBitMask("SUSPECT")
        try:
            if collapsed.mask == numpy.ma.nomask:
                # There is no mask, so the whole array is fine
                pass
        except ValueError:      # If collapsed.mask is an array the test fails [needs .all()]
            for low in range(num):
                if not collapsed.mask[low]:
                    break
            if low > 0:
                maskArray[:low, :] |= suspect
            for high in range(1, num):
                if not collapsed.mask[-high]:
                    break
            if high > 1:
                maskArray[-high:, :] |= suspect

    else:
        raise pexExcept.Exception('%s : %s an invalid overscan type' % ("overscanCorrection", fitType))
    ampImage -= offImage
    overscanImage -= overscanFit
    return Struct(imageFit=offImage, overscanFit=overscanFit, overscanImage=overscanImage)
Пример #11
0
def getDistanceFromFocus(dIcSrc,
                         dCcd,
                         dCcdDims,
                         zemaxFilename,
                         config,
                         plotFilename=None):
    # Focus error is measured by using rms^2 of stars on focus CCDs.
    # If there is a focus error d, rms^2 can be written as
    # rms^2 = rms_atm^2 + rms_opt_0^2 + alpha*d^2,
    # where rms_atm is from atmosphere and rms_opt if from optics with out any focus error.
    # On the focus CCDs which have +/-delta offset, the equation becomes
    # rms_+^2 = rms_atm^2 + rms_opt_0^2 + alpha(d+delta)^2
    # rms_-^2 = rms_atm^2 + rms_opt_0^2 + alpha(d-delta)^2
    # Thus, the difference of these rms^2 gives the focus error as
    # d = (rms_+^2 - rms_-^2)/(4 alpha delta)
    # alpha is determined by ZEMAX simulations. It turned out that alpha is a function of distance from the center of FOV r.
    # Also the best focus varies as a function of r. Thus the focus error can be rewritten as
    # d(r) = (rms_+(r)^2 - rms_-(r)^2)/(4 alpha(r) delta) + d0(r)
    # I take a pair of CCDs on the corner, divide the focus CCDs into radian bins, calculate focus error d for each radial bin with alpha and d0 values at this radius, and then take median of these focus errors for all the radian bins and CCD pairs.
    # rms^2 is measured by shape.simple. Although I intend to include minimum measurement bias, there exists still some bias. This is corrected by getCorrectedFocusError() at the end, which is a polynomial function derived by calibration data (well-behaved focus sweeps).

    # set up radial bins
    lRadialBinEdges = config.radialBinEdges
    lRadialBinCenters = config.radialBinCenters
    lRadialBinsLowerEdges = lRadialBinEdges[0:-1]
    lRadialBinsUpperEdges = lRadialBinEdges[1:]

    # make selection on data and get rms^2 for each bin, CCD by CCD
    dlRmssq = dict(
    )  # rmssq list for radial bin, which is dictionary for each ccd

    for ccdId in dIcSrc.keys():
        # use only objects classified as PSF candidate
        icSrc = dIcSrc[ccdId][dIcSrc[ccdId].get("hscPipeline_focus_candidate")]

        # prepare for getting distance from center for each object
        ccd = dCcd[ccdId]
        x1, y1 = dCcdDims[ccdId]
        # Get focal plane position in pixels
        # Note that we constructed the zemax values alpha(r), d0(r), and this r is in pixel.
        transform = ccd.getTransformMap().get(
            ccd.makeCameraSys(afwCameraGeom.FOCAL_PLANE))
        uLlc, vLlc = transform.forwardTransform(afwGeom.PointD(0., 0.))
        uLrc, vLrc = transform.forwardTransform(afwGeom.PointD(x1, 0.))
        uUlc, vUlc = transform.forwardTransform(afwGeom.PointD(0., y1))
        uUrc, vUrc = transform.forwardTransform(afwGeom.PointD(x1, y1))

        lDistanceFromCenter = list()
        lRmssq = list()
        for s in icSrc:
            # reject blended objects
            if len(s.getFootprint().getPeaks()) != 1:
                continue

            # calculate distance from center for each objects
            x = s.getX()
            y = s.getY()

            uL = (uLrc - uLlc) / x1 * x + uLlc
            uU = (uUrc - uUlc) / x1 * x + uUlc
            u = (uU - uL) / y1 * y + uL

            vL = (vLrc - vLlc) / x1 * x + vLlc
            vU = (vUrc - vUlc) / x1 * x + vUlc
            v = (vU - vL) / y1 * y + vL
            lDistanceFromCenter.append(np.sqrt(u**2 + v**2))

            # calculate rms^2
            ixx = s.get(config.shape + "_xx")
            iyy = s.get(config.shape + "_yy")
            lRmssq.append((ixx + iyy) *
                          config.pixelScale**2)  # convert from pixel^2 to mm^2

        # calculate median rms^2 for each radial bin
        lDistanceFromCenter = np.array(lDistanceFromCenter)
        lRmssq = np.array(lRmssq)
        lRmssqMedian = list()
        for radialBinLowerEdge, radialBinUpperEdge in zip(
                lRadialBinsLowerEdges, lRadialBinsUpperEdges):
            sel = np.logical_and(lDistanceFromCenter > radialBinLowerEdge,
                                 lDistanceFromCenter < radialBinUpperEdge)
            lRmssqMedian.append(np.median(lRmssq[sel]))
        dlRmssq[ccdId] = np.ma.masked_array(lRmssqMedian,
                                            mask=np.isnan(lRmssqMedian))

    # get ZEMAX values
    d = np.loadtxt(zemaxFilename)

    interpStyle = afwMath.stringToInterpStyle("NATURAL_SPLINE")
    sAlpha = afwMath.makeInterpolate(d[:, 0], d[:, 1], interpStyle).interpolate
    sD0 = afwMath.makeInterpolate(d[:, 0], d[:, 2], interpStyle).interpolate

    # calculate rms^2 for each CCD pair
    lCcdPairs = zip(config.belowList, config.aboveList)
    llFocurErrors = list()
    for ccdPair in lCcdPairs:
        lFocusErrors = list()
        if (ccdPair[0] not in dlRmssq or ccdPair[1] not in dlRmssq
                or dlRmssq[ccdPair[0]] is None or dlRmssq[ccdPair[1]] is None):
            continue
        for i, radialBinCenter in enumerate(lRadialBinCenters):
            rmssqAbove = dlRmssq[ccdPair[1]][i]
            rmssqBelow = dlRmssq[ccdPair[0]][i]
            rmssqDiff = rmssqAbove - rmssqBelow
            delta = getFocusCcdOffset(ccdPair[1], config)
            alpha = sAlpha(radialBinCenter)
            focusError = rmssqDiff / 4. / alpha / delta + sD0(radialBinCenter)
            lFocusErrors.append(focusError)
        llFocurErrors.append(np.array(lFocusErrors))

    llFocurErrors = np.ma.masked_array(llFocurErrors,
                                       mask=np.isnan(llFocurErrors))
    reconstructedFocusError = np.ma.median(llFocurErrors)
    n = np.sum(np.invert(llFocurErrors.mask))
    reconstructedFocusErrorStd = np.ma.std(llFocurErrors) * np.sqrt(
        np.pi / 2.) / np.sqrt(n)

    if config.doPlot == True:
        if not plotFilename:
            raise ValueError("no filename for focus plot")
        import matplotlib
        matplotlib.use("Agg")
        import matplotlib.pyplot as plt

        lMarker = ["o", "x", "d", "^", "<", ">"]
        lColor = ["blue", "green", "red", "cyan", "magenta", "yellow"]
        for i, ccdPair in enumerate(lCcdPairs):
            delta_plot = np.ma.masked_array([
                getFocusCcdOffset(ccdPair[0], config),
                getFocusCcdOffset(ccdPair[1], config)
            ])
            rmssq_plot = np.ma.masked_array(
                [dlRmssq[ccdPair[0]], dlRmssq[ccdPair[1]]])
            for j in range(len(lRadialBinCenters)):
                plt.plot(delta_plot,
                         rmssq_plot[:, j],
                         "%s--" % lMarker[i],
                         color=lColor[j])
        plt.savefig(plotFilename)

    correctedFocusError, correctedFocusErrorStd = getCorrectedFocusError(
        reconstructedFocusError, reconstructedFocusErrorStd, config.corrCoeff)
    return (correctedFocusError[0], correctedFocusErrorStd[0],
            reconstructedFocusError[0], reconstructedFocusErrorStd, n)
Пример #12
0
def referenceImage(image, detector, linearityType, inputData, table=None):
    """Generate a reference linearization.

    Parameters
    ----------
    image: `lsst.afw.image.Image`
        Image to linearize.
    detector: `lsst.afw.cameraGeom.Detector`
        Detector this image is from.
    linearityType: `str`
        Type of linearity to apply.
    inputData: `numpy.array`
        An array of values for the linearity correction.
    table: `numpy.array`, optional
        An optional lookup table to use.

    Returns
    -------
    outImage: `lsst.afw.image.Image`
        The output linearized image.
    numOutOfRange: `int`
        The number of values that could not be linearized.

    Raises
    ------
    RuntimeError :
        Raised if an invalid linearityType is supplied.
    """
    numOutOfRange = 0
    for ampIdx, amp in enumerate(detector.getAmplifiers()):
        ampIdx = (ampIdx // 3, ampIdx % 3)
        bbox = amp.getBBox()
        imageView = image.Factory(image, bbox)

        if linearityType == 'Squared':
            sqCoeff = inputData[ampIdx]
            array = imageView.getArray()

            array[:] = array + sqCoeff * array**2
        elif linearityType == 'LookupTable':
            rowInd, colIndOffset = inputData[ampIdx]
            rowInd = int(rowInd)
            tableRow = table[rowInd, :]
            numOutOfRange += applyLookupTable(imageView, tableRow,
                                              colIndOffset)
        elif linearityType == 'Polynomial':
            coeffs = inputData[ampIdx]
            array = imageView.getArray()
            summation = np.zeros_like(array)
            for index, coeff in enumerate(coeffs):
                summation += coeff * np.power(array, (index + 2))
            array += summation
        elif linearityType == 'Spline':
            centers, values = np.split(inputData, 2)  # This uses the full data
            interp = afwMath.makeInterpolate(
                centers.tolist(), values.tolist(),
                afwMath.stringToInterpStyle('AKIMA_SPLINE'))
            array = imageView.getArray()
            delta = interp.interpolate(array.flatten())
            array -= np.array(delta).reshape(array.shape)
        else:
            raise RuntimeError(f"Unknown linearity: {linearityType}")
    return image, numOutOfRange
Пример #13
0
def overscanCorrection(ampMaskedImage,
                       overscanImage,
                       fitType='MEDIAN',
                       order=1,
                       collapseRej=3.0,
                       statControl=None,
                       overscanIsInt=True):
    """Apply overscan correction in place.

    Parameters
    ----------
    ampMaskedImage : `lsst.afw.image.MaskedImage`
        Image of amplifier to correct; modified.
    overscanImage : `lsst.afw.image.Image` or `lsst.afw.image.MaskedImage`
        Image of overscan; modified.
    fitType : `str`
        Type of fit for overscan correction. May be one of:

        - ``MEAN``: use mean of overscan.
        - ``MEANCLIP``: use clipped mean of overscan.
        - ``MEDIAN``: use median of overscan.
        - ``POLY``: fit with ordinary polynomial.
        - ``CHEB``: fit with Chebyshev polynomial.
        - ``LEG``: fit with Legendre polynomial.
        - ``NATURAL_SPLINE``: fit with natural spline.
        - ``CUBIC_SPLINE``: fit with cubic spline.
        - ``AKIMA_SPLINE``: fit with Akima spline.

    order : `int`
        Polynomial order or number of spline knots; ignored unless
        ``fitType`` indicates a polynomial or spline.
    statControl : `lsst.afw.math.StatisticsControl`
        Statistics control object.  In particular, we pay attention to numSigmaClip
    overscanIsInt : `bool`
        Treat the overscan region as consisting of integers, even if it's been
        converted to float.  E.g. handle ties properly.

    Returns
    -------
    result : `lsst.pipe.base.Struct`
        Result struct with components:

        - ``imageFit``: Value(s) removed from image (scalar or
            `lsst.afw.image.Image`)
        - ``overscanFit``: Value(s) removed from overscan (scalar or
            `lsst.afw.image.Image`)
        - ``overscanImage``: Overscan corrected overscan region
            (`lsst.afw.image.Image`)
    Raises
    ------
    pexExcept.Exception
        Raised if ``fitType`` is not an allowed value.

    Notes
    -----
    The ``ampMaskedImage`` and ``overscanImage`` are modified, with the fit
    subtracted. Note that the ``overscanImage`` should not be a subimage of
    the ``ampMaskedImage``, to avoid being subtracted twice.

    Debug plots are available for the SPLINE fitTypes by setting the
    `debug.display` for `name` == "lsst.ip.isr.isrFunctions".  These
    plots show the scatter plot of the overscan data (collapsed along
    the perpendicular dimension) as a function of position on the CCD
    (normalized between +/-1).
    """
    ampImage = ampMaskedImage.getImage()
    if statControl is None:
        statControl = afwMath.StatisticsControl()

    numSigmaClip = statControl.getNumSigmaClip()

    if fitType in ('MEAN', 'MEANCLIP'):
        fitType = afwMath.stringToStatisticsProperty(fitType)
        offImage = afwMath.makeStatistics(overscanImage, fitType,
                                          statControl).getValue()
        overscanFit = offImage
    elif fitType in ('MEDIAN', ):
        if overscanIsInt:
            # we need an image with integer pixels to handle ties properly
            if hasattr(overscanImage, "image"):
                imageI = overscanImage.image.convertI()
                overscanImageI = afwImage.MaskedImageI(imageI,
                                                       overscanImage.mask,
                                                       overscanImage.variance)
            else:
                overscanImageI = overscanImage.convertI()
        else:
            overscanImageI = overscanImage

        fitType = afwMath.stringToStatisticsProperty(fitType)
        offImage = afwMath.makeStatistics(overscanImageI, fitType,
                                          statControl).getValue()
        overscanFit = offImage

        if overscanIsInt:
            del overscanImageI
    elif fitType in ('POLY', 'CHEB', 'LEG', 'NATURAL_SPLINE', 'CUBIC_SPLINE',
                     'AKIMA_SPLINE'):
        if hasattr(overscanImage, "getImage"):
            biasArray = overscanImage.getImage().getArray()
            biasArray = numpy.ma.masked_where(
                overscanImage.getMask().getArray() & statControl.getAndMask(),
                biasArray)
        else:
            biasArray = overscanImage.getArray()
        # Fit along the long axis, so collapse along each short row and fit the resulting array
        shortInd = numpy.argmin(biasArray.shape)
        if shortInd == 0:
            # Convert to some 'standard' representation to make things easier
            biasArray = numpy.transpose(biasArray)

        # Do a single round of clipping to weed out CR hits and signal leaking into the overscan
        percentiles = numpy.percentile(biasArray, [25.0, 50.0, 75.0], axis=1)
        medianBiasArr = percentiles[1]
        stdevBiasArr = 0.74 * (percentiles[2] - percentiles[0])  # robust stdev
        diff = numpy.abs(biasArray - medianBiasArr[:, numpy.newaxis])
        biasMaskedArr = numpy.ma.masked_where(
            diff > numSigmaClip * stdevBiasArr[:, numpy.newaxis], biasArray)
        collapsed = numpy.mean(biasMaskedArr, axis=1)
        if collapsed.mask.sum() > 0:
            collapsed.data[collapsed.mask] = numpy.mean(
                biasArray.data[collapsed.mask], axis=1)
        del biasArray, percentiles, stdevBiasArr, diff, biasMaskedArr

        if shortInd == 0:
            collapsed = numpy.transpose(collapsed)

        num = len(collapsed)
        indices = 2.0 * numpy.arange(num) / float(num) - 1.0

        if fitType in ('POLY', 'CHEB', 'LEG'):
            # A numpy polynomial
            poly = numpy.polynomial
            fitter, evaler = {
                "POLY": (poly.polynomial.polyfit, poly.polynomial.polyval),
                "CHEB": (poly.chebyshev.chebfit, poly.chebyshev.chebval),
                "LEG": (poly.legendre.legfit, poly.legendre.legval),
            }[fitType]

            coeffs = fitter(indices, collapsed, order)
            fitBiasArr = evaler(indices, coeffs)
        elif 'SPLINE' in fitType:
            # An afw interpolation
            numBins = order
            #
            # numpy.histogram needs a real array for the mask, but numpy.ma "optimises" the case
            # no-values-are-masked by replacing the mask array by a scalar, numpy.ma.nomask
            #
            # Issue DM-415
            #
            collapsedMask = collapsed.mask
            try:
                if collapsedMask == numpy.ma.nomask:
                    collapsedMask = numpy.array(
                        len(collapsed) * [numpy.ma.nomask])
            except ValueError:  # If collapsedMask is an array the test fails [needs .all()]
                pass

            numPerBin, binEdges = numpy.histogram(indices,
                                                  bins=numBins,
                                                  weights=1 -
                                                  collapsedMask.astype(int))
            # Binning is just a histogram, with weights equal to the values.
            # Use a similar trick to get the bin centers (this deals with different numbers per bin).
            with numpy.errstate(invalid="ignore"):  # suppress NAN warnings
                values = numpy.histogram(
                    indices,
                    bins=numBins,
                    weights=collapsed.data * ~collapsedMask)[0] / numPerBin
                binCenters = numpy.histogram(
                    indices, bins=numBins,
                    weights=indices * ~collapsedMask)[0] / numPerBin
                interp = afwMath.makeInterpolate(
                    binCenters.astype(float)[numPerBin > 0],
                    values.astype(float)[numPerBin > 0],
                    afwMath.stringToInterpStyle(fitType))
            fitBiasArr = numpy.array([interp.interpolate(i) for i in indices])

        import lsstDebug
        if lsstDebug.Info(__name__).display:
            import matplotlib.pyplot as plot
            figure = plot.figure(1)
            figure.clear()
            axes = figure.add_axes((0.1, 0.1, 0.8, 0.8))
            axes.plot(indices[~collapsedMask], collapsed[~collapsedMask], 'k+')
            if collapsedMask.sum() > 0:
                axes.plot(indices[collapsedMask],
                          collapsed.data[collapsedMask], 'b+')
            axes.plot(indices, fitBiasArr, 'r-')
            plot.xlabel("centered/scaled position along overscan region")
            plot.ylabel("pixel value/fit value")
            figure.show()
            prompt = "Press Enter or c to continue [chp]... "
            while True:
                ans = input(prompt).lower()
                if ans in (
                        "",
                        "c",
                ):
                    break
                if ans in ("p", ):
                    import pdb
                    pdb.set_trace()
                elif ans in ("h", ):
                    print("h[elp] c[ontinue] p[db]")
            plot.close()

        offImage = ampImage.Factory(ampImage.getDimensions())
        offArray = offImage.getArray()
        overscanFit = afwImage.ImageF(overscanImage.getDimensions())
        overscanArray = overscanFit.getArray()
        if shortInd == 1:
            offArray[:, :] = fitBiasArr[:, numpy.newaxis]
            overscanArray[:, :] = fitBiasArr[:, numpy.newaxis]
        else:
            offArray[:, :] = fitBiasArr[numpy.newaxis, :]
            overscanArray[:, :] = fitBiasArr[numpy.newaxis, :]

        # We don't trust any extrapolation: mask those pixels as SUSPECT
        # This will occur when the top and or bottom edges of the overscan
        # contain saturated values. The values will be extrapolated from
        # the surrounding pixels, but we cannot entirely trust the value of
        # the extrapolation, and will mark the image mask plane to flag the
        # image as such.
        mask = ampMaskedImage.getMask()
        maskArray = mask.getArray() if shortInd == 1 else mask.getArray(
        ).transpose()
        suspect = mask.getPlaneBitMask("SUSPECT")
        try:
            if collapsed.mask == numpy.ma.nomask:
                # There is no mask, so the whole array is fine
                pass
        except ValueError:  # If collapsed.mask is an array the test fails [needs .all()]
            for low in range(num):
                if not collapsed.mask[low]:
                    break
            if low > 0:
                maskArray[:low, :] |= suspect
            for high in range(1, num):
                if not collapsed.mask[-high]:
                    break
            if high > 1:
                maskArray[-high:, :] |= suspect

    else:
        raise pexExcept.Exception('%s : %s an invalid overscan type' %
                                  ("overscanCorrection", fitType))
    ampImage -= offImage
    overscanImage -= overscanFit
    return Struct(imageFit=offImage,
                  overscanFit=overscanFit,
                  overscanImage=overscanImage)
Пример #14
0
    def run(self, inputPtc, camera, inputDims):
        """Fit non-linearity to PTC data, returning the correct Linearizer
        object.

        Parameters
        ----------
        inputPtc : `lsst.cp.pipe.PtcDataset`
            Pre-measured PTC dataset.
        camera : `lsst.afw.cameraGeom.Camera`
            Camera geometry.
        inputDims : `lsst.daf.butler.DataCoordinate` or `dict`
            DataIds to use to populate the output calibration.

        Returns
        -------
        results : `lsst.pipe.base.Struct`
            The results struct containing:

            ``outputLinearizer`` : `lsst.ip.isr.Linearizer`
                Final linearizer calibration.
            ``outputProvenance`` : `lsst.ip.isr.IsrProvenance`
                Provenance data for the new calibration.

        Notes
        -----
        This task currently fits only polynomial-defined corrections,
        where the correction coefficients are defined such that:
            corrImage = uncorrImage + sum_i c_i uncorrImage^(2 + i)
        These `c_i` are defined in terms of the direct polynomial fit:
            meanVector ~ P(x=timeVector) = sum_j k_j x^j
        such that c_(j-2) = -k_j/(k_1^j) in units of DN^(1-j) (c.f.,
        Eq. 37 of 2003.05978). The `config.polynomialOrder` or
        `config.splineKnots` define the maximum order of x^j to fit.
        As k_0 and k_1 are degenerate with bias level and gain, they
        are not included in the non-linearity correction.
        """
        detector = camera[inputDims['detector']]
        if self.config.linearityType == 'LookupTable':
            table = np.zeros((len(detector), self.config.maxLookupTableAdu),
                             dtype=np.float32)
            tableIndex = 0
        else:
            table = None
            tableIndex = None  # This will fail if we increment it.

        if self.config.linearityType == 'Spline':
            fitOrder = self.config.splineKnots
        else:
            fitOrder = self.config.polynomialOrder

        # Initialize the linearizer.
        linearizer = Linearizer(detector=detector, table=table, log=self.log)

        for i, amp in enumerate(detector):
            ampName = amp.getName()
            if (len(inputPtc.expIdMask[ampName]) == 0):
                self.log.warn(
                    f"Mask not found for {ampName} in non-linearity fit. Using all points."
                )
                mask = np.repeat(True, len(inputPtc.expIdMask[ampName]))
            else:
                mask = inputPtc.expIdMask[ampName]

            inputAbscissa = np.array(inputPtc.rawExpTimes[ampName])[mask]
            inputOrdinate = np.array(inputPtc.rawMeans[ampName])[mask]

            # Determine proxy-to-linear-flux transformation
            fluxMask = inputOrdinate < self.config.maxLinearAdu
            lowMask = inputOrdinate > self.config.minLinearAdu
            fluxMask = fluxMask & lowMask
            linearAbscissa = inputAbscissa[fluxMask]
            linearOrdinate = inputOrdinate[fluxMask]

            linearFit, linearFitErr, chiSq, weights = irlsFit([0.0, 100.0],
                                                              linearAbscissa,
                                                              linearOrdinate,
                                                              funcPolynomial)
            # Convert this proxy-to-flux fit into an expected linear flux
            linearOrdinate = linearFit[0] + linearFit[1] * inputAbscissa

            # Exclude low end outliers
            threshold = self.config.nSigmaClipLinear * np.sqrt(linearOrdinate)
            fluxMask = np.abs(inputOrdinate - linearOrdinate) < threshold
            linearOrdinate = linearOrdinate[fluxMask]
            fitOrdinate = inputOrdinate[fluxMask]
            self.debugFit('linearFit', inputAbscissa, inputOrdinate,
                          linearOrdinate, fluxMask, ampName)
            # Do fits
            if self.config.linearityType in [
                    'Polynomial', 'Squared', 'LookupTable'
            ]:
                polyFit = np.zeros(fitOrder + 1)
                polyFit[1] = 1.0
                polyFit, polyFitErr, chiSq, weights = irlsFit(
                    polyFit, linearOrdinate, fitOrdinate, funcPolynomial)

                # Truncate the polynomial fit
                k1 = polyFit[1]
                linearityFit = [
                    -coeff / (k1**order) for order, coeff in enumerate(polyFit)
                ]
                significant = np.where(
                    np.abs(linearityFit) > 1e-10, True, False)
                self.log.info(f"Significant polynomial fits: {significant}")

                modelOrdinate = funcPolynomial(polyFit, linearAbscissa)
                self.debugFit('polyFit', linearAbscissa, fitOrdinate,
                              modelOrdinate, None, ampName)

                if self.config.linearityType == 'Squared':
                    linearityFit = [linearityFit[2]]
                elif self.config.linearityType == 'LookupTable':
                    # Use linear part to get time at wich signal is maxAduForLookupTableLinearizer DN
                    tMax = (self.config.maxLookupTableAdu -
                            polyFit[0]) / polyFit[1]
                    timeRange = np.linspace(0, tMax,
                                            self.config.maxLookupTableAdu)
                    signalIdeal = polyFit[0] + polyFit[1] * timeRange
                    signalUncorrected = funcPolynomial(polyFit, timeRange)
                    lookupTableRow = signalIdeal - signalUncorrected  # LinearizerLookupTable has correction

                    linearizer.tableData[tableIndex, :] = lookupTableRow
                    linearityFit = [tableIndex, 0]
                    tableIndex += 1
            elif self.config.linearityType in ['Spline']:
                # See discussion in `lsst.ip.isr.linearize.py` before modifying.
                numPerBin, binEdges = np.histogram(linearOrdinate,
                                                   bins=fitOrder)
                with np.errstate(invalid="ignore"):
                    # Algorithm note: With the counts of points per
                    # bin above, the next histogram calculates the
                    # values to put in each bin by weighting each
                    # point by the correction value.
                    values = np.histogram(
                        linearOrdinate,
                        bins=fitOrder,
                        weights=(inputOrdinate[fluxMask] -
                                 linearOrdinate))[0] / numPerBin

                    # After this is done, the binCenters are
                    # calculated by weighting by the value we're
                    # binning over.  This ensures that widely
                    # spaced/poorly sampled data aren't assigned to
                    # the midpoint of the bin (as could be done using
                    # the binEdges above), but to the weighted mean of
                    # the inputs.  Note that both histograms are
                    # scaled by the count per bin to normalize what
                    # the histogram returns (a sum of the points
                    # inside) into an average.
                    binCenters = np.histogram(
                        linearOrdinate, bins=fitOrder,
                        weights=linearOrdinate)[0] / numPerBin
                    values = values[numPerBin > 0]
                    binCenters = binCenters[numPerBin > 0]

                self.debugFit('splineFit', binCenters, np.abs(values), values,
                              None, ampName)
                interp = afwMath.makeInterpolate(
                    binCenters.tolist(), values.tolist(),
                    afwMath.stringToInterpStyle("AKIMA_SPLINE"))
                modelOrdinate = linearOrdinate + interp.interpolate(
                    linearOrdinate)
                self.debugFit('splineFit', linearOrdinate, fitOrdinate,
                              modelOrdinate, None, ampName)

                # If we exclude a lot of points, we may end up with
                # less than fitOrder points.  Pad out the low-flux end
                # to ensure equal lengths.
                if len(binCenters) != fitOrder:
                    padN = fitOrder - len(binCenters)
                    binCenters = np.pad(binCenters, (padN, 0),
                                        'linear_ramp',
                                        end_values=(binCenters.min() - 1.0, ))
                    # This stores the correction, which is zero at low values.
                    values = np.pad(values, (padN, 0))

                # Pack the spline into a single array.
                linearityFit = np.concatenate(
                    (binCenters.tolist(), values.tolist())).tolist()
                polyFit = [0.0]
                polyFitErr = [0.0]
                chiSq = np.nan
            else:
                polyFit = [0.0]
                polyFitErr = [0.0]
                chiSq = np.nan
                linearityFit = [0.0]

            linearizer.linearityType[ampName] = self.config.linearityType
            linearizer.linearityCoeffs[ampName] = np.array(linearityFit)
            linearizer.linearityBBox[ampName] = amp.getBBox()
            linearizer.fitParams[ampName] = np.array(polyFit)
            linearizer.fitParamsErr[ampName] = np.array(polyFitErr)
            linearizer.fitChiSq[ampName] = chiSq

            image = afwImage.ImageF(len(inputOrdinate), 1)
            image.getArray()[:, :] = inputOrdinate
            linearizeFunction = linearizer.getLinearityTypeByName(
                linearizer.linearityType[ampName])
            linearizeFunction()(image, **{
                'coeffs': linearizer.linearityCoeffs[ampName],
                'table': linearizer.tableData,
                'log': linearizer.log
            })
            linearizeModel = image.getArray()[0, :]

            self.debugFit('solution', inputOrdinate[fluxMask], linearOrdinate,
                          linearizeModel[fluxMask], None, ampName)

        linearizer.hasLinearity = True
        linearizer.validate()
        linearizer.updateMetadata(camera=camera,
                                  detector=detector,
                                  filterName='NONE')
        linearizer.updateMetadata(setDate=True, setCalibId=True)
        provenance = IsrProvenance(calibType='linearizer')

        return pipeBase.Struct(
            outputLinearizer=linearizer,
            outputProvenance=provenance,
        )
Пример #15
0
def getDistanceFromFocus(dIcSrc, dCcd, dCcdDims, zemaxFilename, config, plotFilename=None):
    # Focus error is measured by using rms^2 of stars on focus CCDs.
    # If there is a focus error d, rms^2 can be written as
    # rms^2 = rms_atm^2 + rms_opt_0^2 + alpha*d^2,
    # where rms_atm is from atmosphere and rms_opt if from optics with out any focus error. 
    # On the focus CCDs which have +/-delta offset, the equation becomes
    # rms_+^2 = rms_atm^2 + rms_opt_0^2 + alpha(d+delta)^2
    # rms_-^2 = rms_atm^2 + rms_opt_0^2 + alpha(d-delta)^2
    # Thus, the difference of these rms^2 gives the focus error as
    # d = (rms_+^2 - rms_-^2)/(4 alpha delta)
    # alpha is determined by ZEMAX simulations. It turned out that alpha is a function of distance from the center of FOV r.
    # Also the best focus varies as a function of r. Thus the focus error can be rewritten as
    # d(r) = (rms_+(r)^2 - rms_-(r)^2)/(4 alpha(r) delta) + d0(r)
    # I take a pair of CCDs on the corner, divide the focus CCDs into radian bins, calculate focus error d for each radial bin with alpha and d0 values at this radius, and then take median of these focus errors for all the radian bins and CCD pairs.
    # rms^2 is measured by shape.simple. Although I intend to include minimum measurement bias, there exists still some bias. This is corrected by getCorrectedFocusError() at the end, which is a polynomial function derived by calibration data (well-behaved focus sweeps).

    # set up radial bins
    lRadialBinEdges = config.radialBinEdges
    lRadialBinCenters = config.radialBinCenters
    lRadialBinsLowerEdges = lRadialBinEdges[0:-1]
    lRadialBinsUpperEdges = lRadialBinEdges[1:]

    # make selection on data and get rms^2 for each bin, CCD by CCD
    dlRmssq = dict() # rmssq list for radial bin, which is dictionary for each ccd

    for ccdId in dIcSrc.keys():
        # use only objects classified as PSF candidate
        icSrc = dIcSrc[ccdId][dIcSrc[ccdId].get("hscPipeline_focus_candidate")]

        # prepare for getting distance from center for each object
        ccd = dCcd[ccdId]
        x1, y1 = dCcdDims[ccdId]
        # Get focal plane position in pixels
        # Note that we constructed the zemax values alpha(r), d0(r), and this r is in pixel.
        transform = ccd.getTransformMap().get(ccd.makeCameraSys(afwCameraGeom.FOCAL_PLANE))
        uLlc, vLlc = transform.forwardTransform(afwGeom.PointD(0., 0.))
        uLrc, vLrc = transform.forwardTransform(afwGeom.PointD(x1, 0.))
        uUlc, vUlc = transform.forwardTransform(afwGeom.PointD(0., y1))
        uUrc, vUrc = transform.forwardTransform(afwGeom.PointD(x1, y1))

        lDistanceFromCenter = list()
        lRmssq = list()
        for s in icSrc:
            # reject blended objects
            if len(s.getFootprint().getPeaks()) != 1:
                continue

            # calculate distance from center for each objects
            x = s.getX()
            y = s.getY()

            uL = (uLrc-uLlc)/x1*x+uLlc
            uU = (uUrc-uUlc)/x1*x+uUlc
            u = (uU-uL)/y1*y+uL

            vL = (vLrc-vLlc)/x1*x+vLlc
            vU = (vUrc-vUlc)/x1*x+vUlc
            v = (vU-vL)/y1*y+vL
            lDistanceFromCenter.append(np.sqrt(u**2 + v**2))

            # calculate rms^2
            ixx = s.get(config.shape + "_xx")
            iyy = s.get(config.shape + "_yy")
            lRmssq.append((ixx + iyy)*config.pixelScale**2) # convert from pixel^2 to mm^2

        # calculate median rms^2 for each radial bin
        lDistanceFromCenter = np.array(lDistanceFromCenter)
        lRmssq = np.array(lRmssq)
        lRmssqMedian = list()
        for radialBinLowerEdge, radialBinUpperEdge in zip(lRadialBinsLowerEdges, lRadialBinsUpperEdges):
            sel = np.logical_and(lDistanceFromCenter > radialBinLowerEdge, lDistanceFromCenter < radialBinUpperEdge)
            lRmssqMedian.append(np.median(lRmssq[sel]))
        dlRmssq[ccdId] = np.ma.masked_array(lRmssqMedian, mask = np.isnan(lRmssqMedian))

    # get ZEMAX values
    d = np.loadtxt(zemaxFilename)

    interpStyle = afwMath.stringToInterpStyle("NATURAL_SPLINE")
    sAlpha = afwMath.makeInterpolate(d[:,0], d[:,1], interpStyle).interpolate
    sD0 = afwMath.makeInterpolate(d[:,0], d[:,2], interpStyle).interpolate

    # calculate rms^2 for each CCD pair
    lCcdPairs = zip(config.belowList, config.aboveList)
    llFocurErrors = list()
    for ccdPair in lCcdPairs:
        lFocusErrors = list()
        if (ccdPair[0] not in dlRmssq or ccdPair[1] not in dlRmssq or
            dlRmssq[ccdPair[0]] is None or dlRmssq[ccdPair[1]] is None):
            continue
        for i, radialBinCenter in enumerate(lRadialBinCenters):
            rmssqAbove = dlRmssq[ccdPair[1]][i]
            rmssqBelow = dlRmssq[ccdPair[0]][i]
            rmssqDiff = rmssqAbove - rmssqBelow
            delta = getFocusCcdOffset(ccdPair[1], config)
            alpha = sAlpha(radialBinCenter)
            focusError = rmssqDiff/4./alpha/delta + sD0(radialBinCenter)
            lFocusErrors.append(focusError)
        llFocurErrors.append(np.array(lFocusErrors))

    llFocurErrors = np.ma.masked_array(llFocurErrors, mask = np.isnan(llFocurErrors))
    reconstructedFocusError = np.ma.median(llFocurErrors)
    n = np.sum(np.invert(llFocurErrors.mask))
    reconstructedFocusErrorStd= np.ma.std(llFocurErrors)*np.sqrt(np.pi/2.)/np.sqrt(n)

    if config.doPlot == True:
        if not plotFilename:
            raise ValueError("no filename for focus plot")
        import matplotlib
        matplotlib.use("Agg")
        import matplotlib.pyplot as plt

        lMarker = ["o", "x", "d", "^", "<", ">"]
        lColor = ["blue", "green", "red", "cyan", "magenta", "yellow"]
        for i, ccdPair in enumerate(lCcdPairs):
            delta_plot = np.ma.masked_array([getFocusCcdOffset(ccdPair[0], config),
                                             getFocusCcdOffset(ccdPair[1], config)])
            rmssq_plot = np.ma.masked_array([dlRmssq[ccdPair[0]], dlRmssq[ccdPair[1]]])
            for j in range(len(lRadialBinCenters)):
                plt.plot(delta_plot, rmssq_plot[:, j], "%s--" % lMarker[i], color = lColor[j])
        plt.savefig(plotFilename)

    correctedFocusError, correctedFocusErrorStd = getCorrectedFocusError(
        reconstructedFocusError, reconstructedFocusErrorStd, config.corrCoeff)
    return (correctedFocusError[0], correctedFocusErrorStd[0],
            reconstructedFocusError[0], reconstructedFocusErrorStd, n)
Пример #16
0
def overscanCorrection(ampMaskedImage,
                       overscanImage,
                       fitType='MEDIAN',
                       order=1,
                       collapseRej=3.0,
                       statControl=None):
    """Apply overscan correction in place

    @param[in,out] ampMaskedImage  masked image to correct
    @param[in] overscanImage  overscan data as an afw.image.Image or afw.image.MaskedImage.
                              If a masked image is passed in the mask plane will be used
                              to constrain the fit of the bias level.
    @param[in] fitType  type of fit for overscan correction; one of:
                        - 'MEAN'
                        - 'MEDIAN'
                        - 'POLY' (ordinary polynomial)
                        - 'CHEB' (Chebyshev polynomial)
                        - 'LEG' (Legendre polynomial)
                        - 'NATURAL_SPLINE', 'CUBIC_SPLINE', 'AKIMA_SPLINE' (splines)
    @param[in] order  polynomial order or spline knots (ignored unless fitType
                      indicates a polynomial or spline)
    @param[in] collapseRej  Rejection threshold (sigma) for collapsing dimension of overscan
    @param[in] statControl  Statistics control object
    """
    ampImage = ampMaskedImage.getImage()
    if statControl is None:
        statControl = afwMath.StatisticsControl()
    if fitType == 'MEAN':
        offImage = afwMath.makeStatistics(overscanImage, afwMath.MEAN,
                                          statControl).getValue(afwMath.MEAN)
    elif fitType == 'MEDIAN':
        offImage = afwMath.makeStatistics(overscanImage, afwMath.MEDIAN,
                                          statControl).getValue(afwMath.MEDIAN)
    elif fitType in ('POLY', 'CHEB', 'LEG', 'NATURAL_SPLINE', 'CUBIC_SPLINE',
                     'AKIMA_SPLINE'):
        if hasattr(overscanImage, "getImage"):
            biasArray = overscanImage.getImage().getArray()
            biasArray = numpy.ma.masked_where(
                overscanImage.getMask().getArray() & statControl.getAndMask(),
                biasArray)
        else:
            biasArray = overscanImage.getArray()
        # Fit along the long axis, so collapse along each short row and fit the resulting array
        shortInd = numpy.argmin(biasArray.shape)
        if shortInd == 0:
            # Convert to some 'standard' representation to make things easier
            biasArray = numpy.transpose(biasArray)

        # Do a single round of clipping to weed out CR hits and signal leaking into the overscan
        percentiles = numpy.percentile(biasArray, [25.0, 50.0, 75.0], axis=1)
        medianBiasArr = percentiles[1]
        stdevBiasArr = 0.74 * (percentiles[2] - percentiles[0])  # robust stdev
        diff = numpy.abs(biasArray - medianBiasArr[:, numpy.newaxis])
        biasMaskedArr = numpy.ma.masked_where(
            diff > collapseRej * stdevBiasArr[:, numpy.newaxis], biasArray)
        collapsed = numpy.mean(biasMaskedArr, axis=1)
        if collapsed.mask.sum() > 0:
            collapsed.data[collapsed.mask] = numpy.mean(
                biasArray.data[collapsed.mask], axis=1)
        del biasArray, percentiles, stdevBiasArr, diff, biasMaskedArr

        if shortInd == 0:
            collapsed = numpy.transpose(collapsed)

        num = len(collapsed)
        indices = 2.0 * numpy.arange(num) / float(num) - 1.0

        if fitType in ('POLY', 'CHEB', 'LEG'):
            # A numpy polynomial
            poly = numpy.polynomial
            fitter, evaler = {
                "POLY": (poly.polynomial.polyfit, poly.polynomial.polyval),
                "CHEB": (poly.chebyshev.chebfit, poly.chebyshev.chebval),
                "LEG": (poly.legendre.legfit, poly.legendre.legval),
            }[fitType]

            coeffs = fitter(indices, collapsed, order)
            fitBiasArr = evaler(indices, coeffs)
        elif 'SPLINE' in fitType:
            # An afw interpolation
            numBins = order
            #
            # numpy.histogram needs a real array for the mask, but numpy.ma "optimises" the case
            # no-values-are-masked by replacing the mask array by a scalar, numpy.ma.nomask
            #
            # Issue DM-415
            #
            collapsedMask = collapsed.mask
            try:
                if collapsedMask == numpy.ma.nomask:
                    collapsedMask = numpy.array(
                        len(collapsed) * [numpy.ma.nomask])
            except ValueError:  # If collapsedMask is an array the test fails [needs .all()]
                pass

            numPerBin, binEdges = numpy.histogram(indices,
                                                  bins=numBins,
                                                  weights=1 -
                                                  collapsedMask.astype(int))
            # Binning is just a histogram, with weights equal to the values.
            # Use a similar trick to get the bin centers (this deals with different numbers per bin).
            values = numpy.histogram(
                indices, bins=numBins,
                weights=collapsed.data * ~collapsedMask)[0] / numPerBin
            binCenters = numpy.histogram(
                indices, bins=numBins,
                weights=indices * ~collapsedMask)[0] / numPerBin
            interp = afwMath.makeInterpolate(
                binCenters.astype(float)[numPerBin > 0],
                values.astype(float)[numPerBin > 0],
                afwMath.stringToInterpStyle(fitType))
            fitBiasArr = numpy.array([interp.interpolate(i) for i in indices])

        import lsstDebug
        if lsstDebug.Info(__name__).display:
            import matplotlib.pyplot as plot
            figure = plot.figure(1)
            figure.clear()
            axes = figure.add_axes((0.1, 0.1, 0.8, 0.8))
            axes.plot(indices[~collapsedMask], collapsed[~collapsedMask], 'k+')
            if collapsedMask.sum() > 0:
                axes.plot(indices[collapsedMask],
                          collapsed.data[collapsedMask], 'b+')
            axes.plot(indices, fitBiasArr, 'r-')
            figure.show()
            prompt = "Press Enter or c to continue [chp]... "
            while True:
                ans = input(prompt).lower()
                if ans in (
                        "",
                        "c",
                ):
                    break
                if ans in ("p", ):
                    import pdb
                    pdb.set_trace()
                elif ans in ("h", ):
                    print("h[elp] c[ontinue] p[db]")
                figure.close()

        offImage = ampImage.Factory(ampImage.getDimensions())
        offArray = offImage.getArray()
        if shortInd == 1:
            offArray[:, :] = fitBiasArr[:, numpy.newaxis]
        else:
            offArray[:, :] = fitBiasArr[numpy.newaxis, :]

        # We don't trust any extrapolation: mask those pixels as SUSPECT
        # This will occur when the top and or bottom edges of the overscan
        # contain saturated values. The values will be extrapolated from
        # the surrounding pixels, but we cannot entirely trust the value of
        # the extrapolation, and will mark the image mask plane to flag the
        # image as such.
        mask = ampMaskedImage.getMask()
        maskArray = mask.getArray() if shortInd == 1 else mask.getArray(
        ).transpose()
        suspect = mask.getPlaneBitMask("SUSPECT")
        try:
            if collapsed.mask == numpy.ma.nomask:
                # There is no mask, so the whole array is fine
                pass
        except ValueError:  # If collapsed.mask is an array the test fails [needs .all()]
            for low in range(num):
                if not collapsed.mask[low]:
                    break
            if low > 0:
                maskArray[:low, :] |= suspect
            for high in range(1, num):
                if not collapsed.mask[-high]:
                    break
            if high > 1:
                maskArray[-high:, :] |= suspect

    else:
        raise pexExcept.Exception('%s : %s an invalid overscan type' % \
            ("overscanCorrection", fitType))
    ampImage -= offImage
Пример #17
0
def overscanCorrection(ampMaskedImage, overscanImage, fitType='MEDIAN', order=1, collapseRej=3.0,
                       statControl=None):
    """Apply overscan correction in place

    @param[in,out] ampMaskedImage  masked image to correct
    @param[in] overscanImage  overscan data as an afw.image.Image or afw.image.MaskedImage.
                              If a masked image is passed in the mask plane will be used
                              to constrain the fit of the bias level.
    @param[in] fitType  type of fit for overscan correction; one of:
                        - 'MEAN'
                        - 'MEDIAN'
                        - 'POLY' (ordinary polynomial)
                        - 'CHEB' (Chebyshev polynomial)
                        - 'LEG' (Legendre polynomial)
                        - 'NATURAL_SPLINE', 'CUBIC_SPLINE', 'AKIMA_SPLINE' (splines)
    @param[in] order  polynomial order or spline knots (ignored unless fitType
                      indicates a polynomial or spline)
    @param[in] collapseRej  Rejection threshold (sigma) for collapsing dimension of overscan
    @param[in] statControl  Statistics control object
    """
    ampImage = ampMaskedImage.getImage()
    if statControl is None:
        statControl = afwMath.StatisticsControl()
    if fitType == 'MEAN':
        offImage = afwMath.makeStatistics(overscanImage, afwMath.MEAN, statControl).getValue(afwMath.MEAN)
    elif fitType == 'MEDIAN':
        offImage = afwMath.makeStatistics(overscanImage, afwMath.MEDIAN, statControl).getValue(afwMath.MEDIAN)
    elif fitType in ('POLY', 'CHEB', 'LEG', 'NATURAL_SPLINE', 'CUBIC_SPLINE', 'AKIMA_SPLINE'):
        if hasattr(overscanImage, "getImage"):
            biasArray = overscanImage.getImage().getArray()
            biasArray = numpy.ma.masked_where(overscanImage.getMask().getArray() & statControl.getAndMask(),
                                              biasArray)
        else:
            biasArray = overscanImage.getArray()
        # Fit along the long axis, so collapse along each short row and fit the resulting array
        shortInd = numpy.argmin(biasArray.shape)
        if shortInd == 0:
            # Convert to some 'standard' representation to make things easier
            biasArray = numpy.transpose(biasArray)

        # Do a single round of clipping to weed out CR hits and signal leaking into the overscan
        percentiles = numpy.percentile(biasArray, [25.0, 50.0, 75.0], axis=1)
        medianBiasArr = percentiles[1]
        stdevBiasArr = 0.74*(percentiles[2] - percentiles[0]) # robust stdev
        diff = numpy.abs(biasArray - medianBiasArr[:,numpy.newaxis])
        biasMaskedArr = numpy.ma.masked_where(diff > collapseRej*stdevBiasArr[:,numpy.newaxis], biasArray)
        collapsed = numpy.mean(biasMaskedArr, axis=1)
        del biasArray, percentiles, stdevBiasArr, diff, biasMaskedArr

        if shortInd == 0:
            collapsed = numpy.transpose(collapsed)

        num = len(collapsed)
        indices = 2.0*numpy.arange(num)/float(num) - 1.0

        if fitType in ('POLY', 'CHEB', 'LEG'):
            # A numpy polynomial
            poly = numpy.polynomial
            fitter, evaler = {"POLY": (poly.polynomial.polyfit, poly.polynomial.polyval),
                              "CHEB": (poly.chebyshev.chebfit, poly.chebyshev.chebval),
                              "LEG":  (poly.legendre.legfit, poly.legendre.legval),
                              }[fitType]

            coeffs = fitter(indices, collapsed, order)
            fitBiasArr = evaler(indices, coeffs)
        elif 'SPLINE' in fitType:
            # An afw interpolation
            numBins = order
            #
            # numpy.histogram needs a real array for the mask, but numpy.ma "optimises" the case
            # no-values-are-masked by replacing the mask array by a scalar, numpy.ma.nomask
            #
            # Issue DM-415
            #
            collapsedMask = collapsed.mask
            try:
                if collapsedMask == numpy.ma.nomask:
                    collapsedMask = numpy.array(len(collapsed)*[numpy.ma.nomask])
            except ValueError:      # If collapsedMask is an array the test fails [needs .all()]
                pass

            numPerBin, binEdges = numpy.histogram(indices, bins=numBins,
                                                  weights=1-collapsedMask.astype(int))
            # Binning is just a histogram, with weights equal to the values.
            # Use a similar trick to get the bin centers (this deals with different numbers per bin).
            values = numpy.histogram(indices, bins=numBins, weights=collapsed)[0]/numPerBin
            binCenters = numpy.histogram(indices, bins=numBins, weights=indices)[0]/numPerBin
            interp = afwMath.makeInterpolate(binCenters.astype(float)[numPerBin > 0],
                                             values.astype(float)[numPerBin > 0],
                                             afwMath.stringToInterpStyle(fitType))
            fitBiasArr = numpy.array([interp.interpolate(i) for i in indices])

        import lsstDebug
        if lsstDebug.Info(__name__).display:
            import matplotlib.pyplot as plot
            figure = plot.figure(1)
            figure.clear()
            axes = figure.add_axes((0.1, 0.1, 0.8, 0.8))
            axes.plot(indices, collapsed, 'k+')
            axes.plot(indices, fitBiasArr, 'r-')
            figure.show()
            prompt = "Press Enter or c to continue [chp]... "
            while True:
                ans = raw_input(prompt).lower()
                if ans in ("", "c",):
                    break
                if ans in ("p",):
                    import pdb; pdb.set_trace()
                elif ans in ("h", ):
                    print "h[elp] c[ontinue] p[db]"
                figure.close()

        offImage = ampImage.Factory(ampImage.getDimensions())
        offArray = offImage.getArray()
        if shortInd == 1:
            offArray[:,:] = fitBiasArr[:,numpy.newaxis]
        else:
            offArray[:,:] = fitBiasArr[numpy.newaxis,:]

        # We don't trust any extrapolation: mask those pixels as SUSPECT
        # This will occur when the top and or bottom edges of the overscan
        # contain saturated values. The values will be extrapolated from
        # the surrounding pixels, but we cannot entirely trust the value of
        # the extrapolation, and will mark the image mask plane to flag the
        # image as such.
        mask = ampMaskedImage.getMask()
        maskArray = mask.getArray() if shortInd == 1 else mask.getArray().transpose()
        suspect = mask.getPlaneBitMask("SUSPECT")
        try:
            if collapsed.mask == numpy.ma.nomask:
                # There is no mask, so the whole array is fine
                pass
        except ValueError:      # If collapsed.mask is an array the test fails [needs .all()]
            for low in xrange(num):
                if not collapsed.mask[low]:
                    break
            if low > 0:
                maskArray[:low,:] |= suspect
            for high in xrange(1, num):
                if not collapsed.mask[-high]:
                    break
            if high > 1:
                maskArray[-high:,:] |= suspect

    else:
        raise pexExcept.Exception, '%s : %s an invalid overscan type' % \
            ("overscanCorrection", fitType)
    ampImage -= offImage