Example #1
0
 def setUp(self):
     self.calib = IsrProvenance(detectorName='test_calibType Det00',
                                detectorSerial='Det00',
                                calibType="Test Calib")
     self.calib.updateMetadata()
     self.calib.fromDataIds([{'exposure': 1234, 'detector': 0, 'filter': 'G'},
                             {'exposure': 1235, 'detector': 0, 'filter': 'G'},
                             {'exposure': 1234, 'detector': 1, 'filter': 'G'},
                             {'exposure': 1235, 'detector': 1, 'filter': 'G'}])
Example #2
0
    def test_Fits(self):
        filename = tempfile.mktemp()
        usedFilename = self.calib.writeFits(filename + '.fits')
        fromFits = IsrProvenance.readFits(usedFilename)
        self.assertEqual(self.calib, fromFits)

        fromFits.updateMetadata(setDate=True)
        self.assertNotEqual(self.calib, fromFits)
Example #3
0
    def runText(self, textType):
        filename = tempfile.mktemp()
        usedFilename = self.calib.writeText(filename + textType)
        fromText = IsrProvenance.readText(usedFilename)
        self.assertEqual(self.calib, fromText)

        # Test generic interface:
        fromGeneric = IsrCalib.readText(usedFilename)
        self.assertEqual(self.calib, fromGeneric)
Example #4
0
class IsrCalibCases(lsst.utils.tests.TestCase):
    """Test unified calibration type.
    """
    def setUp(self):
        self.calib = IsrProvenance(detectorName='test_calibType Det00',
                                   detectorSerial='Det00',
                                   calibType="Test Calib")
        self.calib.updateMetadata()
        self.calib.fromDataIds([{'exposure': 1234, 'detector': 0, 'filter': 'G'},
                                {'exposure': 1235, 'detector': 0, 'filter': 'G'},
                                {'exposure': 1234, 'detector': 1, 'filter': 'G'},
                                {'exposure': 1235, 'detector': 1, 'filter': 'G'}])

    def runText(self, textType):
        filename = tempfile.mktemp()
        usedFilename = self.calib.writeText(filename + textType)
        fromText = IsrProvenance.readText(usedFilename)
        self.assertEqual(self.calib, fromText)

    def test_Text(self):
        self.runText('.yaml')
        self.runText('.ecsv')

    def test_Fits(self):
        filename = tempfile.mktemp()
        usedFilename = self.calib.writeFits(filename + '.fits')
        fromFits = IsrProvenance.readFits(usedFilename)
        self.assertEqual(self.calib, fromFits)

        fromFits.updateMetadata(setDate=True)
        self.assertNotEqual(self.calib, fromFits)
Example #5
0
    def run(self, inputRatios, inputFluxes=None, camera=None, inputDims=None, outputDims=None):
        """Combine ratios to produce crosstalk coefficients.

        Parameters
        ----------
        inputRatios : `list` [`dict` [`dict` [`dict` [`dict` [`list`]]]]]
            A list of nested dictionaries of ratios indexed by target
            and source chip, then by target and source amplifier.
        inputFluxes : `list` [`dict` [`dict` [`list`]]]
            A list of nested dictionaries of source pixel fluxes, indexed
            by source chip and amplifier.
        camera : `lsst.afw.cameraGeom.Camera`
            Input camera.
        inputDims : `list` [`lsst.daf.butler.DataCoordinate`]
            DataIds to use to construct provenance.
        outputDims : `list` [`lsst.daf.butler.DataCoordinate`]
            DataIds to use to populate the output calibration.

        Returns
        -------
        results : `lsst.pipe.base.Struct`
            The results struct containing:

            ``outputCrosstalk`` : `lsst.ip.isr.CrosstalkCalib`
                Final crosstalk calibration.
            ``outputProvenance`` : `lsst.ip.isr.IsrProvenance`
                Provenance data for the new calibration.

        Raises
        ------
        RuntimeError
            Raised if the input data contains multiple target detectors.

        Notes
        -----
        The lsstDebug.Info() method can be rewritten for __name__ =
        `lsst.ip.isr.measureCrosstalk`, and supports the parameters:

        debug.display['reduce'] : `bool`
            Display a histogram of the combined ratio measurements for
            a pair of source/target amplifiers from all input
            exposures/detectors.

        """
        if outputDims:
            calibChip = outputDims['detector']
            instrument = outputDims['instrument']
        else:
            # calibChip needs to be set manually in Gen2.
            calibChip = None
            instrument = None

        self.log.info("Combining measurements from %d ratios and %d fluxes",
                      len(inputRatios), len(inputFluxes) if inputFluxes else 0)

        if inputFluxes is None:
            inputFluxes = [None for exp in inputRatios]

        combinedRatios = defaultdict(lambda: defaultdict(list))
        combinedFluxes = defaultdict(lambda: defaultdict(list))
        for ratioDict, fluxDict in zip(inputRatios, inputFluxes):
            for targetChip in ratioDict:
                if calibChip and targetChip != calibChip:
                    raise RuntimeError("Received multiple target chips!")

                sourceChip = targetChip
                if sourceChip in ratioDict[targetChip]:
                    ratios = ratioDict[targetChip][sourceChip]

                    for targetAmp in ratios:
                        for sourceAmp in ratios[targetAmp]:
                            combinedRatios[targetAmp][sourceAmp].extend(ratios[targetAmp][sourceAmp])
                            if fluxDict:
                                combinedFluxes[targetAmp][sourceAmp].extend(fluxDict[sourceChip][sourceAmp])
                # TODO: DM-21904
                # Iterating over all other entries in ratioDict[targetChip] will yield
                # inter-chip terms.

        for targetAmp in combinedRatios:
            for sourceAmp in combinedRatios[targetAmp]:
                self.log.info("Read %d pixels for %s -> %s",
                              len(combinedRatios[targetAmp][sourceAmp]),
                              targetAmp, sourceAmp)
                if len(combinedRatios[targetAmp][sourceAmp]) > 1:
                    self.debugRatios('reduce', combinedRatios, targetAmp, sourceAmp)

        if self.config.fluxOrder == 0:
            self.log.info("Fitting crosstalk coefficients.")
            calib = self.measureCrosstalkCoefficients(combinedRatios,
                                                      self.config.rejIter, self.config.rejSigma)
        else:
            raise NotImplementedError("Non-linear crosstalk terms are not yet supported.")

        self.log.info("Number of valid coefficients: %d", np.sum(calib.coeffValid))

        if self.config.doFiltering:
            # This step will apply the calculated validity values to
            # censor poorly measured coefficients.
            self.log.info("Filtering measured crosstalk to remove invalid solutions.")
            calib = self.filterCrosstalkCalib(calib)

        # Populate the remainder of the calibration information.
        calib.hasCrosstalk = True
        calib.interChip = {}

        # calibChip is the detector dimension, which is the detector Id
        calib._detectorId = calibChip
        if camera:
            calib._detectorName = camera[calibChip].getName()
            calib._detectorSerial = camera[calibChip].getSerial()

        calib._instrument = instrument
        calib.updateMetadata(setCalibId=True, setDate=True)

        # Make an IsrProvenance().
        provenance = IsrProvenance(calibType="CROSSTALK")
        provenance._detectorName = calibChip
        if inputDims:
            provenance.fromDataIds(inputDims)
            provenance._instrument = instrument
        provenance.updateMetadata()

        return pipeBase.Struct(
            outputCrosstalk=calib,
            outputProvenance=provenance,
        )
Example #6
0
 def runText(self, textType):
     filename = tempfile.mktemp()
     usedFilename = self.calib.writeText(filename + textType)
     fromText = IsrProvenance.readText(usedFilename)
     self.assertEqual(self.calib, fromText)
Example #7
0
    def run(self, inputPtc, camera, inputDims):
        """Fit non-linearity to PTC data, returning the correct Linearizer
        object.

        Parameters
        ----------
        inputPtc : `lsst.cp.pipe.PtcDataset`
            Pre-measured PTC dataset.
        camera : `lsst.afw.cameraGeom.Camera`
            Camera geometry.
        inputDims : `lsst.daf.butler.DataCoordinate` or `dict`
            DataIds to use to populate the output calibration.

        Returns
        -------
        results : `lsst.pipe.base.Struct`
            The results struct containing:

            ``outputLinearizer`` : `lsst.ip.isr.Linearizer`
                Final linearizer calibration.
            ``outputProvenance`` : `lsst.ip.isr.IsrProvenance`
                Provenance data for the new calibration.

        Notes
        -----
        This task currently fits only polynomial-defined corrections,
        where the correction coefficients are defined such that:
            corrImage = uncorrImage + sum_i c_i uncorrImage^(2 + i)
        These `c_i` are defined in terms of the direct polynomial fit:
            meanVector ~ P(x=timeVector) = sum_j k_j x^j
        such that c_(j-2) = -k_j/(k_1^j) in units of DN^(1-j) (c.f.,
        Eq. 37 of 2003.05978). The `config.polynomialOrder` or
        `config.splineKnots` define the maximum order of x^j to fit.
        As k_0 and k_1 are degenerate with bias level and gain, they
        are not included in the non-linearity correction.
        """
        detector = camera[inputDims['detector']]
        if self.config.linearityType == 'LookupTable':
            table = np.zeros((len(detector), self.config.maxLookupTableAdu),
                             dtype=np.float32)
            tableIndex = 0
        else:
            table = None
            tableIndex = None  # This will fail if we increment it.

        if self.config.linearityType == 'Spline':
            fitOrder = self.config.splineKnots
        else:
            fitOrder = self.config.polynomialOrder

        # Initialize the linearizer.
        linearizer = Linearizer(detector=detector, table=table, log=self.log)

        for i, amp in enumerate(detector):
            ampName = amp.getName()
            if (len(inputPtc.expIdMask[ampName]) == 0):
                self.log.warn(
                    f"Mask not found for {ampName} in non-linearity fit. Using all points."
                )
                mask = np.repeat(True, len(inputPtc.expIdMask[ampName]))
            else:
                mask = inputPtc.expIdMask[ampName]

            inputAbscissa = np.array(inputPtc.rawExpTimes[ampName])[mask]
            inputOrdinate = np.array(inputPtc.rawMeans[ampName])[mask]

            # Determine proxy-to-linear-flux transformation
            fluxMask = inputOrdinate < self.config.maxLinearAdu
            lowMask = inputOrdinate > self.config.minLinearAdu
            fluxMask = fluxMask & lowMask
            linearAbscissa = inputAbscissa[fluxMask]
            linearOrdinate = inputOrdinate[fluxMask]

            linearFit, linearFitErr, chiSq, weights = irlsFit([0.0, 100.0],
                                                              linearAbscissa,
                                                              linearOrdinate,
                                                              funcPolynomial)
            # Convert this proxy-to-flux fit into an expected linear flux
            linearOrdinate = linearFit[0] + linearFit[1] * inputAbscissa

            # Exclude low end outliers
            threshold = self.config.nSigmaClipLinear * np.sqrt(linearOrdinate)
            fluxMask = np.abs(inputOrdinate - linearOrdinate) < threshold
            linearOrdinate = linearOrdinate[fluxMask]
            fitOrdinate = inputOrdinate[fluxMask]
            self.debugFit('linearFit', inputAbscissa, inputOrdinate,
                          linearOrdinate, fluxMask, ampName)
            # Do fits
            if self.config.linearityType in [
                    'Polynomial', 'Squared', 'LookupTable'
            ]:
                polyFit = np.zeros(fitOrder + 1)
                polyFit[1] = 1.0
                polyFit, polyFitErr, chiSq, weights = irlsFit(
                    polyFit, linearOrdinate, fitOrdinate, funcPolynomial)

                # Truncate the polynomial fit
                k1 = polyFit[1]
                linearityFit = [
                    -coeff / (k1**order) for order, coeff in enumerate(polyFit)
                ]
                significant = np.where(
                    np.abs(linearityFit) > 1e-10, True, False)
                self.log.info(f"Significant polynomial fits: {significant}")

                modelOrdinate = funcPolynomial(polyFit, linearAbscissa)
                self.debugFit('polyFit', linearAbscissa, fitOrdinate,
                              modelOrdinate, None, ampName)

                if self.config.linearityType == 'Squared':
                    linearityFit = [linearityFit[2]]
                elif self.config.linearityType == 'LookupTable':
                    # Use linear part to get time at wich signal is maxAduForLookupTableLinearizer DN
                    tMax = (self.config.maxLookupTableAdu -
                            polyFit[0]) / polyFit[1]
                    timeRange = np.linspace(0, tMax,
                                            self.config.maxLookupTableAdu)
                    signalIdeal = polyFit[0] + polyFit[1] * timeRange
                    signalUncorrected = funcPolynomial(polyFit, timeRange)
                    lookupTableRow = signalIdeal - signalUncorrected  # LinearizerLookupTable has correction

                    linearizer.tableData[tableIndex, :] = lookupTableRow
                    linearityFit = [tableIndex, 0]
                    tableIndex += 1
            elif self.config.linearityType in ['Spline']:
                # See discussion in `lsst.ip.isr.linearize.py` before modifying.
                numPerBin, binEdges = np.histogram(linearOrdinate,
                                                   bins=fitOrder)
                with np.errstate(invalid="ignore"):
                    # Algorithm note: With the counts of points per
                    # bin above, the next histogram calculates the
                    # values to put in each bin by weighting each
                    # point by the correction value.
                    values = np.histogram(
                        linearOrdinate,
                        bins=fitOrder,
                        weights=(inputOrdinate[fluxMask] -
                                 linearOrdinate))[0] / numPerBin

                    # After this is done, the binCenters are
                    # calculated by weighting by the value we're
                    # binning over.  This ensures that widely
                    # spaced/poorly sampled data aren't assigned to
                    # the midpoint of the bin (as could be done using
                    # the binEdges above), but to the weighted mean of
                    # the inputs.  Note that both histograms are
                    # scaled by the count per bin to normalize what
                    # the histogram returns (a sum of the points
                    # inside) into an average.
                    binCenters = np.histogram(
                        linearOrdinate, bins=fitOrder,
                        weights=linearOrdinate)[0] / numPerBin
                    values = values[numPerBin > 0]
                    binCenters = binCenters[numPerBin > 0]

                self.debugFit('splineFit', binCenters, np.abs(values), values,
                              None, ampName)
                interp = afwMath.makeInterpolate(
                    binCenters.tolist(), values.tolist(),
                    afwMath.stringToInterpStyle("AKIMA_SPLINE"))
                modelOrdinate = linearOrdinate + interp.interpolate(
                    linearOrdinate)
                self.debugFit('splineFit', linearOrdinate, fitOrdinate,
                              modelOrdinate, None, ampName)

                # If we exclude a lot of points, we may end up with
                # less than fitOrder points.  Pad out the low-flux end
                # to ensure equal lengths.
                if len(binCenters) != fitOrder:
                    padN = fitOrder - len(binCenters)
                    binCenters = np.pad(binCenters, (padN, 0),
                                        'linear_ramp',
                                        end_values=(binCenters.min() - 1.0, ))
                    # This stores the correction, which is zero at low values.
                    values = np.pad(values, (padN, 0))

                # Pack the spline into a single array.
                linearityFit = np.concatenate(
                    (binCenters.tolist(), values.tolist())).tolist()
                polyFit = [0.0]
                polyFitErr = [0.0]
                chiSq = np.nan
            else:
                polyFit = [0.0]
                polyFitErr = [0.0]
                chiSq = np.nan
                linearityFit = [0.0]

            linearizer.linearityType[ampName] = self.config.linearityType
            linearizer.linearityCoeffs[ampName] = np.array(linearityFit)
            linearizer.linearityBBox[ampName] = amp.getBBox()
            linearizer.fitParams[ampName] = np.array(polyFit)
            linearizer.fitParamsErr[ampName] = np.array(polyFitErr)
            linearizer.fitChiSq[ampName] = chiSq

            image = afwImage.ImageF(len(inputOrdinate), 1)
            image.getArray()[:, :] = inputOrdinate
            linearizeFunction = linearizer.getLinearityTypeByName(
                linearizer.linearityType[ampName])
            linearizeFunction()(image, **{
                'coeffs': linearizer.linearityCoeffs[ampName],
                'table': linearizer.tableData,
                'log': linearizer.log
            })
            linearizeModel = image.getArray()[0, :]

            self.debugFit('solution', inputOrdinate[fluxMask], linearOrdinate,
                          linearizeModel[fluxMask], None, ampName)

        linearizer.hasLinearity = True
        linearizer.validate()
        linearizer.updateMetadata(camera=camera,
                                  detector=detector,
                                  filterName='NONE')
        linearizer.updateMetadata(setDate=True, setCalibId=True)
        provenance = IsrProvenance(calibType='linearizer')

        return pipeBase.Struct(
            outputLinearizer=linearizer,
            outputProvenance=provenance,
        )