def transferAttribute(self, adinputs=None, source=None, attribute=None):
        """
        This primitive takes an attribute (e.g., "mask", or "OBJCAT") from
        the AD(s) in another ("source") stream and applies it to the ADs in
        this stream. There must be either the same number of ADs in each
        stream, or only 1 in the source stream.
        
        Parameters
        ----------
        source: str
            name of stream containing ADs whose attributes you want
        attribute: str
            attribute to transfer from ADs in other stream
        """
        log = self.log
        log.debug(gt.log_message("primitive", self.myself(), "starting"))

        if source not in self.streams.keys():
            log.info("Stream {} does not exist so nothing to transfer".format(source))
            return adinputs

        source_length = len(self.streams[source])
        if not (source_length == 1 or source_length == len(adinputs)):
            log.warning("Incompatible stream lengths: {} and {}".
                        format(len(adinputs), source_length))
            return adinputs

        log.stdinfo("Transferring attribute {} from stream {}".format(attribute, source))

        # Keep track of whether we find anything to transfer, as failing to
        # do so might indicate a problem and we should warn the user
        found = False

        for ad1, ad2 in zip(*gt.make_lists(adinputs, self.streams[source])):
            # Attribute could be top-level or extension-level
            # Use deepcopy so references to original object don't remain
            if hasattr(ad2, attribute):

                try:
                    setattr(ad1, attribute,
                            copy.deepcopy(getattr(ad2, attribute)))

                except ValueError:  # data, mask, are gettable not settable
                    pass

                else:
                    found = True
                    continue

            for ext1, ext2 in zip(ad1, ad2):

                if hasattr(ext2, attribute):
                    setattr(ext1, attribute,
                            copy.deepcopy(getattr(ext2, attribute)))
                    found = True

        if not found:
            log.warning("Did not find any {} attributes to transfer".format(attribute))

        return adinputs
    def addIllumMaskToDQ(self, adinputs=None, suffix=None, illum_mask=None):
        """
        Adds an illumination mask to each AD object

        Parameters
        ----------
        suffix: str
            suffix to be added to output files
        mask: str/None
            name of illumination mask mask (None -> use default)
        """
        log = self.log
        log.debug(gt.log_message("primitive", self.myself(), "starting"))
        timestamp_key = self.timestamp_keys[self.myself()]

        # Getting all the filenames first prevents reopening the same file
        # for each science AD
        if illum_mask is None:
            illum_mask = [self._get_illum_mask_filename(ad) for ad in adinputs]

        for ad, illum in zip(
                *gt.make_lists(adinputs, illum_mask, force_ad=True)):
            if ad.phu.get(timestamp_key):
                log.warning(
                    'No changes will be made to {}, since it has '
                    'already been processed by addIllumMaskToDQ'.format(
                        ad.filename))
                continue

            if illum is None:
                # So it can be zipped with the AD
                final_illum = [None] * len(ad)
            else:
                log.fullinfo("Using {} as illumination mask".format(
                    illum.filename))
                final_illum = gt.clip_auxiliary_data(ad,
                                                     aux=illum,
                                                     aux_type='bpm',
                                                     return_dtype=DQ.datatype)

            for ext, illum_ext in zip(ad, final_illum):
                if illum_ext is not None:
                    # Ensure we're only adding the unilluminated bit
                    iext = np.where(illum_ext.data > 0, DQ.unilluminated,
                                    0).astype(DQ.datatype)
                    ext.mask = iext if ext.mask is None else ext.mask | iext

            # Timestamp and update filename
            gt.mark_history(ad, primname=self.myself(), keyword=timestamp_key)
            ad.update_filename(suffix=suffix, strip=True)

        return adinputs
    def addIllumMaskToDQ(self, adinputs=None, suffix=None, illum_mask=None):
        """
        Adds an illumination mask to each AD object

        Parameters
        ----------
        suffix: str
            suffix to be added to output files
        mask: str/None
            name of illumination mask mask (None -> use default)
        """
        log = self.log
        log.debug(gt.log_message("primitive", self.myself(), "starting"))
        timestamp_key = self.timestamp_keys[self.myself()]

        # Getting all the filenames first prevents reopening the same file
        # for each science AD
        if illum_mask is None:
            illum_mask = [self._get_illum_mask_filename(ad) for ad in adinputs]

        for ad, illum in zip(*gt.make_lists(adinputs, illum_mask, force_ad=True)):
            if ad.phu.get(timestamp_key):
                log.warning('No changes will be made to {}, since it has '
                    'already been processed by addIllumMaskToDQ'.
                            format(ad.filename))
                continue

            if illum is None:
                # So it can be zipped with the AD
                final_illum = [None] * len(ad)
            else:
                log.fullinfo("Using {} as illumination mask".format(illum.filename))
                final_illum = gt.clip_auxiliary_data(ad, aux=illum, aux_type='bpm',
                                          return_dtype=DQ.datatype)

            for ext, illum_ext in zip(ad, final_illum):
                if illum_ext is not None:
                    # Ensure we're only adding the unilluminated bit
                    iext = np.where(illum_ext.data > 0, DQ.unilluminated,
                                    0).astype(DQ.datatype)
                    ext.mask = iext if ext.mask is None else ext.mask | iext

            # Timestamp and update filename
            gt.mark_history(ad, primname=self.myself(), keyword=timestamp_key)
            ad.update_filename(suffix=suffix, strip=True)

        return adinputs
Example #4
0
    def standardizeStructure(self, adinputs=None, **params):
        """
        This primitive is used to standardize the structure of F2 data,
        specifically.

        Parameters
        ----------
        suffix: str
            suffix to be added to output files
        attach_mdf: bool
            attach an MDF to the AD objects? (ignored if not tagged as SPECT)
        mdf: str
            full path of the MDF to attach
        """
        log = self.log
        log.debug(gt.log_message("primitive", self.myself(), "starting"))
        timestamp_key = self.timestamp_keys[self.myself()]

        adoutputs = []
        for ad, mdf in zip(*gt.make_lists(adinputs, params['mdf'])):
            if ad.phu.get(timestamp_key):
                log.warning("No changes will be made to {}, since it has "
                            "already been processed by standardizeStructure".
                            format(ad.filename))
                adoutputs.append(ad)
                continue

            # Attach an MDF to each input AstroData object
            if params["attach_mdf"] and 'SPECT' in ad.tags:
                ad = self.addMDF([ad], mdf=mdf)[0]

            # Raw FLAMINGOS-2 pixel data have three dimensions (2048x2048x1).
            # Remove the single length dimension from the pixel data.
            # CD3_3 keyword must also be removed or alignAndStack complains.
            ad = remove_single_length_dimension(ad)

            # Need to change dtype from int32 to float32, or else numpy will
            # promote to float64. There's no VAR or DQ at this stage.
            ad[0].data = ad[0].data.astype(np.float32)

            # Timestamp and update filename
            gt.mark_history(ad, primname=self.myself(), keyword=timestamp_key)
            ad.update_filename(suffix=params["suffix"], strip=True)
            adoutputs.append(ad)

        return adoutputs
    def standardizeStructure(self, adinputs=None, **params):
        """
        This primitive is used to standardize the structure of GMOS data,
        specifically.

        Parameters
        ----------
        suffix: str
            suffix to be added to output files
        attach_mdf: bool
            attach an MDF to the AD objects? (ignored if not tagged as SPECT)
        mdf: str
            full path of the MDF to attach
        """
        log = self.log
        log.debug(gt.log_message("primitive", self.myself(), "starting"))
        timestamp_key = self.timestamp_keys[self.myself()]

        adoutputs = []
        # If attach_mdf=False, this just zips up the ADs with a list of Nones,
        # which has no side-effects.
        for ad, mdf in zip(*gt.make_lists(adinputs, params['mdf'])):
            if ad.phu.get(timestamp_key):
                log.warning("No changes will be made to {}, since it has "
                            "already been processed by standardizeStructure".
                            format(ad.filename))
                adoutputs.append(ad)
                continue

            # Attach an MDF to each input AstroData object
            if params["attach_mdf"] and 'SPECT' in ad.tags:
                ad = self.addMDF([ad], mdf=mdf)[0]

            # Timestamp and update filename
            gt.mark_history(ad, primname=self.myself(), keyword=timestamp_key)
            ad.update_filename(suffix=params["suffix"], strip=True)
            adoutputs.append(ad)
        return adoutputs
    def addDQ(self, adinputs=None, **params):
        """
        This primitive is used to add a DQ extension to the input AstroData
        object. The value of a pixel in the DQ extension will be the sum of the
        following: (0=good, 1=bad pixel (found in bad pixel mask), 2=pixel is
        in the non-linear regime, 4=pixel is saturated). This primitive will
        trim the BPM to match the input AstroData object(s).

        Parameters
        ----------
        suffix: str
            suffix to be added to output files
        static_bpm: str
            Name of bad pixel mask ("default" -> use default from look-up table)
            If set to None, no static_bpm will be added.
        user_bpm: str
            Name of the bad pixel mask created by the user from flats and
            darks.  It is an optional BPM that can be added to the static one.
        illum_mask: bool
            add illumination mask?
        """
        log = self.log
        log.debug(gt.log_message("primitive", self.myself(), "starting"))
        timestamp_key = self.timestamp_keys["addDQ"]
        sfx = params["suffix"]

        # Getting all the filenames first prevents reopening the same file
        # for each science AD
        static_bpm_list = params['static_bpm']
        user_bpm_list = params['user_bpm']

        if static_bpm_list == "default":
            static_bpm_list = [self._get_bpm_filename(ad) for ad in adinputs]

        for ad, static, user in zip(*gt.make_lists(
                adinputs, static_bpm_list, user_bpm_list, force_ad=True)):
            if ad.phu.get(timestamp_key):
                log.warning('No changes will be made to {}, since it has '
                            'already been processed by addDQ'.format(
                                ad.filename))
                continue

            if static is None:
                # So it can be zipped with the AD
                final_static = [None] * len(ad)
            else:
                log.fullinfo("Using {} as static BPM".format(static.filename))
                final_static = gt.clip_auxiliary_data(ad,
                                                      aux=static,
                                                      aux_type='bpm',
                                                      return_dtype=DQ.datatype)

            if user is None:
                final_user = [None] * len(ad)
            else:
                log.fullinfo("Using {} as user BPM".format(user.filename))
                final_user = gt.clip_auxiliary_data(ad,
                                                    aux=user,
                                                    aux_type='bpm',
                                                    return_dtype=DQ.datatype)

            for ext, static_ext, user_ext in zip(ad, final_static, final_user):
                extver = ext.hdr['EXTVER']
                if ext.mask is not None:
                    log.warning(
                        'A mask already exists in extver {}'.format(extver))
                    continue

                non_linear_level = ext.non_linear_level()
                saturation_level = ext.saturation_level()

                # Need to create the array first for 3D raw F2 data, with 2D BPM
                ext.mask = np.zeros_like(ext.data, dtype=DQ.datatype)
                if static_ext is not None:
                    ext.mask |= static_ext.data
                if user_ext is not None:
                    ext.mask |= user_ext.data

                if saturation_level:
                    log.fullinfo('Flagging saturated pixels in {}:{} '
                                 'above level {:.2f}'.format(
                                     ad.filename, extver, saturation_level))
                    ext.mask |= np.where(ext.data >= saturation_level,
                                         DQ.saturated, 0).astype(DQ.datatype)

                if non_linear_level:
                    if saturation_level:
                        if saturation_level > non_linear_level:
                            log.fullinfo('Flagging non-linear pixels in {}:{} '
                                         'above level {:.2f}'.format(
                                             ad.filename, extver,
                                             non_linear_level))
                            ext.mask |= np.where(
                                (ext.data >= non_linear_level) &
                                (ext.data < saturation_level), DQ.non_linear,
                                0).astype(DQ.datatype)
                            # Readout modes of IR detectors can result in
                            # saturated pixels having values below the
                            # saturation level. Flag those. Assume we have an
                            # IR detector here because both non-linear and
                            # saturation levels are defined and nonlin<sat
                            regions, nregions = measurements.label(
                                ext.data < non_linear_level)
                            # In all my tests, region 1 has been the majority
                            # of the image; however, I cannot guarantee that
                            # this is always the case and therefore we should
                            # check the size of each region
                            region_sizes = measurements.labeled_comprehension(
                                ext.data, regions, np.arange(1, nregions + 1),
                                len, int, 0)
                            # First, assume all regions are saturated, and
                            # remove any very large ones. This is much faster
                            # than progressively adding each region to DQ
                            hidden_saturation_array = np.where(
                                regions > 0, 4, 0).astype(DQ.datatype)
                            for region in range(1, nregions + 1):
                                # Limit of 10000 pixels for a hole is a bit arbitrary
                                if region_sizes[region - 1] > 10000:
                                    hidden_saturation_array[regions ==
                                                            region] = 0
                            ext.mask |= hidden_saturation_array

                        elif saturation_level < non_linear_level:
                            log.warning('{}:{} has saturation level less than '
                                        'non-linear level'.format(
                                            ad.filename, extver))
                        else:
                            log.fullinfo('Saturation and non-linear levels '
                                         'are the same for {}:{}. Only '
                                         'flagging saturated pixels'.format(
                                             ad.filename, extver))
                    else:
                        log.fullinfo('Flagging non-linear pixels in {}:{} '
                                     'above level {:.2f}'.format(
                                         ad.filename, extver,
                                         non_linear_level))
                        ext.mask |= np.where(ext.data >= non_linear_level,
                                             DQ.non_linear,
                                             0).astype(DQ.datatype)

        # Handle latency if reqested
        if params.get("latency", False):
            try:
                adinputs = self.addLatencyToDQ(adinputs,
                                               time=params["time"],
                                               non_linear=params["non_linear"])
            except AttributeError:
                log.warning(
                    "addLatencyToDQ() not defined in primitivesClass " +
                    self.__class__.__name__)

        # Add the illumination mask if requested
        if params['add_illum_mask']:
            adinputs = self.addIllumMaskToDQ(adinputs,
                                             illum_mask=params["illum_mask"])

        # Timestamp and update filenames
        for ad in adinputs:
            gt.mark_history(ad, primname=self.myself(), keyword=timestamp_key)
            ad.update_filename(suffix=sfx, strip=True)

        return adinputs
Example #7
0
    def QECorrect(self, adinputs=None, **params):
        """
        This primitive applies a wavelength-dependent QE correction to
        a 2D spectral image, based on the wavelength solution of an
        associated processed_arc.

        It is only designed to work on FLATs, and therefore unmosaicked data.

        Parameters
        ----------
        suffix: str
            suffix to be added to output files
        arc : {None, AstroData, str}
            Arc(s) with distortion map.
        """
        log = self.log
        log.debug(gt.log_message("primitive", self.myself(), "starting"))
        timestamp_key = self.timestamp_keys[self.myself()]

        sfx = params["suffix"]
        arc = params["arc"]
        use_iraf = params["use_iraf"]
        do_cal = params["do_cal"]

        if do_cal == 'skip':
            log.warning("QE correction has been turned off.")
            return adinputs

        # Get a suitable arc frame (with distortion map) for every science AD
        if arc is None:
            arc_list = self.caldb.get_processed_arc(adinputs)
        else:
            arc_list = (arc, None)

        # Provide an arc AD object for every science frame, and an origin
        for ad, arc, origin in zip(
                *gt.make_lists(adinputs, *arc_list, force_ad=(1, ))):
            if ad.phu.get(timestamp_key):
                log.warning(f"{ad.filename}: already processed by QECorrect. "
                            "Continuing.")
                continue

            if 'e2v' in ad.detector_name(pretty=True):
                log.stdinfo(f"{ad.filename} has the e2v CCDs, so no QE "
                            "correction is necessary")
                continue

            if self.timestamp_keys['mosaicDetectors'] in ad.phu:
                log.warning(f"{ad.filename} has been processed by mosaic"
                            "Detectors so QECorrect cannot be run")
                continue

            # Determines whether to multiply or divide by QE correction
            is_flat = 'FLAT' in ad.tags

            # If the arc's binning doesn't match, we may still be able to
            # fall back to the approximate solution
            xbin, ybin = ad.detector_x_bin(), ad.detector_y_bin()
            if arc is not None and (arc.detector_x_bin() != xbin
                                    or arc.detector_y_bin() != ybin):
                log.warning("Science frame and arc have different binnings.")
                arc = None

            # The plan here is to attach the mosaic gWCS to the science frame,
            # apply an origin shift to put it in the frame of the arc, and
            # then use the arc's WCS to get the wavelength. If there's no arc,
            # we just use the science frame's WCS.
            # Since we're going to change that WCS, store it for restoration.
            original_wcs = [ext.wcs for ext in ad]
            try:
                transform.add_mosaic_wcs(ad, geotable)
            except ValueError:
                log.warning(f"{ad.filename} already has a 'mosaic' coordinate"
                            "frame. This is unexpected but I'll continue.")

            if arc is None:
                if 'sq' in self.mode or do_cal == 'force':
                    raise OSError(f"No processed arc listed for {ad.filename}")
                else:
                    log.warning(f"{ad.filename}: no arc was specified. Using "
                                "wavelength solution in science frame.")
            else:
                # OK, we definitely want to try to do this, get a wavelength solution
                origin_str = f" (obtained from {origin})" if origin else ""
                log.stdinfo(f"{ad.filename}: using the arc {arc.filename}"
                            f"{origin_str}")
                if self.timestamp_keys[
                        'determineWavelengthSolution'] not in arc.phu:
                    msg = f"Arc {arc.filename} (for {ad.filename} has not been wavelength calibrated."
                    if 'sq' in self.mode or do_cal == 'force':
                        raise IOError(msg)
                    else:
                        log.warning(msg)

                # We'll be modifying this
                arc_wcs = deepcopy(arc[0].wcs)
                if 'distortion_corrected' not in arc_wcs.available_frames:
                    msg = f"Arc {arc.filename} (for {ad.filename}) has no distortion model."
                    if 'sq' in self.mode or do_cal == 'force':
                        raise OSError(msg)
                    else:
                        log.warning(msg)

                # NB. At this point, we could have an arc that has no good
                # wavelength solution nor distortion correction. But we will
                # use its WCS rather than the science frame's because it must
                # have been supplied by the user.

                # This is GMOS so no need to be as generic as distortionCorrect
                ad_detsec = ad.detector_section()
                arc_detsec = arc.detector_section()[0]
                if (ad_detsec[0].x1, ad_detsec[-1].x2) != (arc_detsec.x1,
                                                           arc_detsec.x2):
                    raise ValueError("Cannot process the offsets between "
                                     f"{ad.filename} and {arc.filename}")

                yoff1 = arc_detsec.y1 - ad_detsec[0].y1
                yoff2 = arc_detsec.y2 - ad_detsec[0].y2
                arc_ext_shapes = [(ext.shape[0] - yoff1 + yoff2, ext.shape[1])
                                  for ext in ad]
                arc_corners = np.concatenate([
                    transform.get_output_corners(ext.wcs.get_transform(
                        ext.wcs.input_frame, 'mosaic'),
                                                 input_shape=arc_shape,
                                                 origin=(yoff1, 0))
                    for ext, arc_shape in zip(ad, arc_ext_shapes)
                ],
                                             axis=1)
                arc_origin = tuple(
                    np.ceil(min(corners)) for corners in arc_corners)

                # So this is what was applied to the ARC to get the
                # mosaic frame to its pixel frame, in which the distortion
                # correction model was calculated. Convert coordinates
                # from python order to Model order.
                origin_shift = reduce(
                    Model.__and__,
                    [models.Shift(-origin) for origin in arc_origin[::-1]])
                arc_wcs.insert_transform(arc_wcs.input_frame,
                                         origin_shift,
                                         after=True)

            array_info = gt.array_information(ad)
            if array_info.detector_shape == (1, 3):
                ccd2_indices = array_info.extensions[1]
            else:
                raise ValueError(
                    f"{ad.filename} does not have 3 separate detectors")

            for index, ext in enumerate(ad):
                if index in ccd2_indices:
                    continue

                # Use the WCS in the extension if we don't have an arc,
                # otherwise use the arc's mosaic->world transformation
                if arc is None:
                    trans = ext.wcs.forward_transform
                else:
                    trans = (ext.wcs.get_transform(ext.wcs.input_frame,
                                                   'mosaic')
                             | arc_wcs.forward_transform)

                ygrid, xgrid = np.indices(ext.shape)
                # TODO: want with_units
                waves = trans(xgrid,
                              ygrid)[0] * u.nm  # Wavelength always axis 0

                # Tapering required to prevent QE correction from blowing up
                # at the extremes (remember, this is a ratio, not the actual QE)
                # We use half-Gaussians to taper
                taper = np.ones_like(ext.data)
                taper_locut, taper_losig = 350 * u.nm, 25 * u.nm
                taper_hicut, taper_hisig = 1200 * u.nm, 200 * u.nm
                taper[waves < taper_locut] = np.exp(-(
                    (waves[waves < taper_locut] - taper_locut) /
                    taper_losig)**2)
                taper[waves > taper_hicut] = np.exp(-(
                    (waves[waves > taper_hicut] - taper_hicut) /
                    taper_hisig)**2)
                try:
                    qe_correction = (qeModel(ext, use_iraf=use_iraf)(
                        (waves / u.nm).to(u.dimensionless_unscaled).value).
                                     astype(np.float32) - 1) * taper + 1
                except TypeError:  # qeModel() returns None
                    msg = f"No QE correction found for {ad.filename} extension {ext.id}"
                    if 'sq' in self.mode:
                        raise ValueError(msg)
                    else:
                        log.warning(msg)
                        continue
                log.stdinfo(f"Mean relative QE of extension {ext.id} is "
                            f"{qe_correction.mean():.5f}")
                if not is_flat:
                    qe_correction = 1. / qe_correction
                ext.multiply(qe_correction)

            for ext, orig_wcs in zip(ad, original_wcs):
                ext.wcs = orig_wcs

            # Timestamp and update the filename
            gt.mark_history(ad, primname=self.myself(), keyword=timestamp_key)
            ad.update_filename(suffix=sfx, strip=True)

        return adinputs
    def slitIllumCorrect(self,
                         adinputs=None,
                         slit_illum=None,
                         do_illum=True,
                         suffix="_illumCorrected"):
        """
        This primitive will divide each SCI extension of the inputs by those
        of the corresponding slit illumination image. If the inputs contain
        VAR or DQ frames, those will also be updated accordingly due to the
        division on the data.

        Parameters
        ----------
        adinputs : list of AstroData
            Data to be corrected.
        slit_illum : str or AstroData
            Slit illumination path or AstroData object.
        do_illum: bool, optional
            Perform slit illumination correction? (Default: True)
        """
        log = self.log
        log.debug(gt.log_message("primitive", self.myself(), "starting"))
        timestamp_key = self.timestamp_keys[self.myself()]
        qecorr_key = self.timestamp_keys['QECorrect']

        if not do_illum:
            log.warning("Slit Illumination correction has been turned off.")
            return adinputs

        if slit_illum is None:
            raise NotImplementedError
        else:
            slit_illum_list = slit_illum

        # Provide a Slit Illum Ad object for every science frame
        ad_outputs = []
        for ad, slit_illum_ad in zip(
                *gt.make_lists(adinputs, slit_illum_list, force_ad=True)):

            if ad.phu.get(timestamp_key):
                log.warning("No changes will be made to {}, since it has "
                            "already been processed by flatCorrect".format(
                                ad.filename))
                continue

            if slit_illum_ad is None:
                if self.mode in ['sq']:
                    raise OSError(
                        "No processed slit illumination listed for {}".format(
                            ad.filename))
                else:
                    log.warning("No changes will be made to {}, since no slit "
                                "illumination has been specified".format(
                                    ad.filename))
                    continue

            gt.check_inputs_match(ad, slit_illum_ad, check_shape=False)

            if not all(
                [e1.shape == e2.shape for (e1, e2) in zip(ad, slit_illum_ad)]):
                slit_illum_ad = gt.clip_auxiliary_data(adinput=[ad],
                                                       aux=[slit_illum_ad])[0]

            log.info("Dividing the input AstroData object {} by this \n"
                     "slit illumination file:  \n{}".format(
                         ad.filename, slit_illum_ad.filename))

            ad_out = deepcopy(ad)
            ad_out.divide(slit_illum_ad)

            # Update the header and filename, copying QECORR keyword from flat
            ad_out.phu.set("SLTILLIM", slit_illum_ad.filename,
                           self.keyword_comments["SLTILLIM"])

            try:
                qecorr_value = slit_illum_ad.phu[qecorr_key]
            except KeyError:
                pass
            else:
                log.fullinfo(
                    "Copying {} keyword from slit illumination".format(
                        qecorr_key))
                ad_out.phu.set(qecorr_key, qecorr_value,
                               slit_illum_ad.phu.comments[qecorr_key])

            gt.mark_history(ad_out,
                            primname=self.myself(),
                            keyword=timestamp_key)
            ad_out.update_filename(suffix=suffix, strip=True)

            if slit_illum_ad.path:
                add_provenance(ad_out, slit_illum_ad.filename,
                               md5sum(slit_illum_ad.path) or "", self.myself())

            ad_outputs.append(ad_out)

        return ad_outputs
Example #9
0
    def transferAttribute(self, adinputs=None, source=None, attribute=None):
        """
        This primitive takes an attribute (e.g., "mask", or "OBJCAT") from
        the AD(s) in another ("source") stream and applies it to the ADs in
        this stream. There must be either the same number of ADs in each
        stream, or only 1 in the source stream.
        
        Parameters
        ----------
        source: str
            name of stream containing ADs whose attributes you want
        attribute: str
            attribute to transfer from ADs in other stream
        """
        log = self.log
        log.debug(gt.log_message("primitive", self.myself(), "starting"))

        if source not in self.streams.keys():
            log.info("Stream {} does not exist so nothing to transfer".format(
                source))
            return adinputs

        source_length = len(self.streams[source])
        if not (source_length == 1 or source_length == len(adinputs)):
            log.warning("Incompatible stream lengths: {} and {}".format(
                len(adinputs), source_length))
            return adinputs

        log.stdinfo("Transferring attribute {} from stream {}".format(
            attribute, source))

        # Keep track of whether we find anything to transfer, as failing to
        # do so might indicate a problem and we should warn the user
        found = False

        for ad1, ad2 in zip(*gt.make_lists(adinputs, self.streams[source])):
            # Attribute could be top-level or extension-level
            # Use deepcopy so references to original object don't remain
            if hasattr(ad2, attribute):

                try:
                    setattr(ad1, attribute,
                            copy.deepcopy(getattr(ad2, attribute)))

                except ValueError:  # data, mask, are gettable not settable
                    pass

                else:
                    found = True
                    continue

            for ext1, ext2 in zip(ad1, ad2):

                if hasattr(ext2, attribute):
                    setattr(ext1, attribute,
                            copy.deepcopy(getattr(ext2, attribute)))
                    found = True

        if not found:
            log.warning(
                "Did not find any {} attributes to transfer".format(attribute))

        return adinputs
Example #10
0
    def fringeCorrect(self, adinputs=None, **params):
        """
        Correct science frames for the effects of fringing, using a fringe
        frame. The fringe frame is obtained either from a specified parameter,
        or the "fringe" stream, or the calibration database. This is basically
        a bookkeeping wrapper for subtractFringe(), which does all the work.

        Parameters
        ----------
        suffix: str
            suffix to be added to output files
        fringe: list/str/AstroData/None
            fringe frame(s) to subtract
        """
        log = self.log
        log.debug(gt.log_message("primitive", self.myself(), "starting"))
        timestamp_key = self.timestamp_keys[self.myself()]

        # Exit now if nothing needs a correction, to avoid an error when the
        # calibration search fails. If images with different exposure times
        # are used, some frames may not require a correction (but the calibration
        # search will succeed), so still need to check individual inputs later.
        if not any(self._needs_fringe_correction(ad) for ad in adinputs):
            log.stdinfo("No input images require a fringe correction.")
            return adinputs

        fringe = params["fringe"]
        scale = params["scale"]
        if fringe is None:
            try:
                fringe_list = self.streams['fringe']
                assert len(fringe_list) == 1
                scale = False
                log.stdinfo("Using fringe frame in 'fringe' stream. "
                            "Setting scale=False")
            except (KeyError, AssertionError):
                self.getProcessedFringe(adinputs)
                fringe_list = self._get_cal(adinputs, "processed_fringe")
        else:
            fringe_list = fringe

        # Usual stuff to ensure that we have an iterable of the correct length
        # for the scale factors regardless of what the input is
        scale_factor = params["scale_factor"]
        try:
            factors = iter(scale_factor)
        except TypeError:
            factors = iter([scale_factor] * len(adinputs))
        else:
            # In case a single-element list was passed
            if len(scale_factor) == 1:
                factors = iter(scale_factor * len(adinputs))

        # Get a fringe AD object for every science frame
        for ad, fringe in zip(*gt.make_lists(adinputs, fringe_list, force_ad=True)):
            if ad.phu.get(timestamp_key):
                log.warning("No changes will be made to {}, since it has "
                            "already been processed by subtractFringe".
                            format(ad.filename))
                continue

            # Check the inputs have matching filters, binning, and shapes
            try:
                gt.check_inputs_match(ad, fringe)
            except ValueError:
                fringe = gt.clip_auxiliary_data(adinput=ad, aux=fringe,
                                                aux_type="cal")
                gt.check_inputs_match(ad, fringe)

            if scale:
                factor = next(factors)
                if factor is None:
                    factor = self._calculate_fringe_scaling(ad, fringe)
                log.stdinfo("Scaling fringe frame by factor {:.3f} before "
                            "subtracting from {}".format(factor, ad.filename))
                # Since all elements of fringe_list might be references to the
                # same AD, need to make a copy before multiplying
                fringe_copy = deepcopy(fringe)
                fringe_copy.multiply(factor)
                ad.subtract(fringe_copy)
            else:
                ad.subtract(fringe)

            # Timestamp and update header and filename
            ad.phu.set("FRINGEIM", fringe.filename, self.keyword_comments["FRINGEIM"])
            gt.mark_history(ad, primname=self.myself(), keyword=timestamp_key)
            ad.update_filename(suffix=params["suffix"], strip=True)
        return adinputs
Example #11
0
    def processSlits(self, adinputs=None, **params):
        """
        Compute and record the mean exposure epoch for a slit viewer image

        The 'slit viewer image' for each observation will almost certainly
        be a sequence of short exposures of the slit viewer camera,
        collected together for convenience. However, it cannot be guaranteed
        that slit viewer exposures will be taken throughout an entire
        science exposure; therefore, it is necessary to be able to compute
        the mean exposure epoch (i.e. the effective time that the combined
        slit viewer exposures were taken at). This allows a single science
        observation to be calibrated using multiple packets of slit viewer
        exposures, with appropriate weighting for the time delay between them.

        ``processSlits`` effectively computes a weighted average of the
        exposure epoch of all constituent slit viewer exposures, taking into
        account:

        - Length of each exposure;
        - Whether there is any overlap between the start/end of the
          exposure and the start/end of the overall 'image';
        - Time of each exposure, relative to the start of the 'image'.

        Parameters
        ----------
        suffix: str
            suffix to be added to output files
        slitflat: str/None
            name of the slitflat to use (if None, use the calibration
            system)
        """
        log = self.log
        log.debug(gt.log_message("primitive", self.myself(), "starting"))
        timestamp_key = self.timestamp_keys[self.myself()]

        flat_list = params["flat"]
        if flat_list is None:
            self.getProcessedSlitFlat(adinputs)
            flat_list = [
                self._get_cal(ad, 'processed_slitflat') for ad in adinputs
            ]

        for ad, slitflat in zip(
                *gt.make_lists(adinputs, flat_list, force_ad=True)):
            if ad.phu.get(timestamp_key):
                log.warning("No changes will be made to {}, since it has "
                            "already been processed by processSlits".format(
                                ad.filename))
                continue

            if slitflat is None:
                log.warning("Unable to find slitflat calibration for {}; "
                            "skipping".format(ad.filename))
                continue
            else:
                sv_flat = slitflat[0].data

            # accumulators for computing the mean epoch
            sum_of_weights = 0.0
            accum_weighted_time = 0.0

            # Check the inputs have matching binning and SCI shapes.
            try:
                gt.check_inputs_match(adinput1=ad,
                                      adinput2=slitflat,
                                      check_filter=False)
            except ValueError:
                # This is most likely because the science frame has multiple
                # extensions and the slitflat needs to be copied
                slitflat = gt.clip_auxiliary_data(ad, slitflat, aux_type='cal')
                # An Error will be raised if they don't match now
                gt.check_inputs_match(ad, slitflat, check_filter=False)

            # get science start/end times
            sc_start = parse_timestr(ad.phu['UTSTART'])
            sc_end = parse_timestr(ad.phu['UTEND'])

            res = ad.res_mode()
            for ext in ad:
                sv_start = parse_timestr(ext.hdr['EXPUTST'])
                sv_end = parse_timestr(ext.hdr['EXPUTEND'])

                # compute overlap percentage and slit view image duration
                latest_start = max(sc_start, sv_start)
                earliest_end = min(sc_end, sv_end)
                overlap = (earliest_end - latest_start).seconds
                overlap = 0.0 if overlap < 0.0 else overlap  # no overlap edge case
                sv_duration = (sv_end - sv_start).seconds
                overlap /= sv_duration  # convert into a percentage

                # compute the offset (the value to be weighted), in seconds,
                # from the start of the science exposure
                offset = 42.0  # init value: overridden if overlap, else 0-scaled
                if sc_start <= sv_start and sv_end <= sc_end:
                    offset = (sv_start - sc_start).seconds + sv_duration / 2.0
                elif sv_start < sc_start:
                    offset = overlap * sv_duration / 2.0
                elif sv_end > sc_end:
                    offset = overlap * sv_duration / 2.0
                    offset += (sv_start - sc_start).seconds

                # add flux-weighted offset (plus weight itself) to accumulators
                flux = _total_obj_flux(res, ext.data, sv_flat)
                weight = flux * overlap
                sum_of_weights += weight
                accum_weighted_time += weight * offset

            # final mean exposure epoch computation
            if sum_of_weights > 0.0:
                mean_offset = accum_weighted_time / sum_of_weights
                mean_offset = timedelta(seconds=mean_offset)
                # write the mean exposure epoch into the PHU
                sc_start = parse_timestr(ad.phu['UTSTART'])
                mean_epoch = sc_start + mean_offset
                ad.phu['AVGEPOCH'] = (  # hope this keyword string is ok
                    mean_epoch.strftime("%H:%M:%S.%f")[:-3],
                    'Mean Exposure Epoch')

            # Timestamp and update filename
            gt.mark_history(ad, primname=self.myself(), keyword=timestamp_key)
            ad.update_filename(suffix=params["suffix"], strip=True)
        return adinputs
Example #12
0
    def addIllumMaskToDQ(self,
                         adinputs=None,
                         suffix=None,
                         illum_mask=None,
                         shift=None,
                         max_shift=20):
        """
        Adds an illumination mask to each AD object. This is only done for
        full-frame (not Central Spectrum) GMOS spectra, and is calculated by
        making a model illumination patter from the attached MDF and cross-
        correlating it with the spatial profile of the data.

        Parameters
        ----------
        suffix : str
            suffix to be added to output files
        illum_mask : str/None
            name of illumination mask mask (None -> use default)
        shift : int/None
            user-defined shift to apply to illumination mask
        max_shift : int
            maximum shift (in unbinned pixels) allowable for the cross-
            correlation
        """
        offset_dict = {
            ("GMOS-N", "Hamamatsu-N"): 1.5,
            ("GMOS-N", "e2vDD"): -0.2,
            ("GMOS-N", "EEV"): 0.7,
            ("GMOS-S", "Hamamatsu-S"): 5.5,
            ("GMOS-S", "EEV"): 3.8
        }
        edges = 50  # try to eliminate issues at the very edges

        log = self.log
        log.debug(gt.log_message("primitive", self.myself(), "starting"))
        timestamp_key = self.timestamp_keys[self.myself()]

        # Do this now for memory management reasons. We'll be creating large
        # arrays temporarily and don't want the permanent mask arrays to
        # fragment the free memory.
        for ad in adinputs:
            for ext in ad:
                if ext.mask is None:
                    ext.mask = np.zeros_like(ext.data).astype(DQ.datatype)

        for ad, illum in zip(
                *gt.make_lists(adinputs, illum_mask, force_ad=True)):
            if ad.phu.get(timestamp_key):
                log.warning(
                    'No changes will be made to {}, since it has '
                    'already been processed by addIllumMaskToDQ'.format(
                        ad.filename))
                continue

            ybin = ad.detector_y_bin()
            ad_detsec = ad.detector_section()
            no_bridges = all(detsec.y1 > 1600 and detsec.y2 < 2900
                             for detsec in ad_detsec)
            has_48rows = (all(detsec.y2 == 4224 for detsec in ad_detsec)
                          and 'Hamamatsu' in ad.detector_name(pretty=True))

            if illum:
                log.fullinfo("Using {} as illumination mask".format(
                    illum.filename))
                final_illum = gt.clip_auxiliary_data(ad,
                                                     aux=illum,
                                                     aux_type='bpm',
                                                     return_dtype=DQ.datatype)

                for ext, illum_ext in zip(ad, final_illum):
                    if illum_ext is not None:
                        # Ensure we're only adding the unilluminated bit
                        iext = np.where(illum_ext.data > 0, DQ.unilluminated,
                                        0).astype(DQ.datatype)
                        ext.mask |= iext
            elif not no_bridges:  # i.e. there are bridges.
                try:
                    mdf = ad.MDF
                except AttributeError:
                    log.warning(f"MDF not found for {ad.filename} - cannot "
                                "add illumination mask.")
                    continue

                # Default operation for GMOS full-frame LS
                # Sadly, we cannot do this reliably without concatenating the
                # arrays and using a big chunk of memory.
                row_medians = np.percentile(np.concatenate(
                    [ext.data for ext in ad], axis=1),
                                            95,
                                            axis=1)
                row_medians -= at.boxcar(row_medians, size=50 // ybin)

                # Construct a model of the slit illumination from the MDF
                # coefficients are from G-IRAF except c0, approx. from data
                model = np.zeros_like(row_medians, dtype=int)
                for ypos, ysize in mdf['slitpos_my', 'slitsize_my']:
                    y = ypos + np.array([-0.5, 0.5]) * ysize
                    c0 = offset_dict[ad.instrument(),
                                     ad.detector_name(pretty=True)]
                    if ad.instrument() == "GMOS-S":
                        c1, c2, c3 = (0.99911, -1.7465e-5, 3.0494e-7)
                    else:
                        c1, c2, c3 = (0.99591859227, 5.3042211333437e-8,
                                      1.7447902551997e-7)
                    yccd = ((c0 + y *
                             (c1 + y *
                              (c2 + y * c3))) * 1.611444 / ad.pixel_scale() +
                            0.5 * model.size).astype(int)
                    model[yccd[0]:yccd[1] + 1] = 1
                    log.stdinfo("Expected slit location from pixels "
                                f"{yccd[0]+1} to {yccd[1]+1}")

                if shift is None:
                    max_shift = 50
                    mshift = max_shift // ybin + 2
                    mshift2 = mshift + edges
                    # model[] indexing avoids reduction in signal as slit
                    # is shifted off the top of the image
                    cntr = model.size - edges - mshift2 - 1
                    xcorr = correlate(row_medians[edges:-edges],
                                      model[mshift2:-mshift2],
                                      mode='full')[cntr - mshift:cntr + mshift]
                    # This line avoids numerical errors in the spline fit
                    xcorr -= np.median(xcorr)
                    # This calculates the offsets of each point from the
                    # straight line between its neighbours
                    std = (xcorr[1:-1] - 0.5 *
                           (xcorr + np.roll(xcorr, 2))[2:]).std()
                    xspline = fit_1D(xcorr,
                                     function="spline3",
                                     order=None,
                                     weights=np.full(len(xcorr),
                                                     1. / std)).evaluate()
                    yshift = xspline.argmax() - mshift
                    maxima = xspline[1:-1][np.logical_and(
                        np.diff(xspline[:-1]) > 0,
                        np.diff(xspline[1:]) < 0)]
                    significant_maxima = (maxima >
                                          xspline.max() - 3 * std).sum()
                    if significant_maxima > 1 or abs(
                            yshift // ybin) > max_shift:
                        log.warning(
                            f"{ad.filename}: cross-correlation peak is"
                            " untrustworthy so not adding illumination "
                            "mask. Please re-run with a specified shift.")
                        yshift = None
                else:
                    yshift = shift

                if yshift is not None:
                    log.stdinfo(
                        f"{ad.filename}: Shifting mask by {yshift} pixels")
                    row_mask = np.ones_like(model, dtype=int)
                    if yshift < 0:
                        row_mask[:yshift] = 1 - model[-yshift:]
                    elif yshift > 0:
                        row_mask[yshift:] = 1 - model[:-yshift]
                    else:
                        row_mask[:] = 1 - model
                    for ext in ad:
                        ext.mask |= (row_mask * DQ.unilluminated).astype(
                            DQ.datatype)[:, np.newaxis]

            if has_48rows:
                actual_rows = 48 // ybin
                for ext in ad:
                    ext.mask[:actual_rows] |= DQ.unilluminated

            # Timestamp and update filename
            gt.mark_history(ad, primname=self.myself(), keyword=timestamp_key)
            ad.update_filename(suffix=suffix, strip=True)

        return adinputs
Example #13
0
    def biasCorrect(self, adinputs=None, suffix=None, bias=None, do_cal=None):
        """
        The biasCorrect primitive will subtract the science extension of the
        input bias frames from the science extension of the input science
        frames. The variance and data quality extension will be updated, if
        they exist. If no bias is provided, the calibration database(s) will
        be queried.

        Parameters
        ----------
        suffix: str
            suffix to be added to output files
        bias: str/list of str
            bias(es) to subtract
        do_cal: str
            perform bias subtraction?
        """
        log = self.log
        log.debug(gt.log_message("primitive", self.myself(), "starting"))
        timestamp_key = self.timestamp_keys[self.myself()]

        if do_cal == 'skip':
            log.warning("Bias correction has been turned off.")
            return adinputs

        if bias is None:
            bias_list = self.caldb.get_processed_bias(adinputs)
        else:
            bias_list = (bias, None)

        # Provide a bias AD object for every science frame, and an origin
        for ad, bias, origin in zip(*gt.make_lists(adinputs, *bias_list,
                                    force_ad=(1,))):
            if ad.phu.get(timestamp_key):
                log.warning(f"{ad.filename}: already processed by "
                            "biasCorrect. Continuing.")
                continue

            if bias is None:
                if 'sq' not in self.mode and do_cal != 'force':
                    log.warning("No changes will be made to {}, since no "
                                "bias was specified".format(ad.filename))
                    continue
                else:
                    log.warning(f"{ad.filename}: no bias was specified. "
                                "Continuing.")
                    continue

            try:
                gt.check_inputs_match(ad, bias, check_filter=False,
                                      check_units=True)
            except ValueError:
                bias = gt.clip_auxiliary_data(ad, aux=bias, aux_type='cal')
                # An Error will be raised if they don't match now
                gt.check_inputs_match(ad, bias, check_filter=False,
                                      check_units=True)

            origin_str = f" (obtained from {origin})" if origin else ""
            log.stdinfo(f"{ad.filename}: subtracting the bias "
                         f"{bias.filename}{origin_str}")
            ad.subtract(bias)

            # Record bias used, timestamp, and update filename
            ad.phu.set('BIASIM', bias.filename, self.keyword_comments['BIASIM'])
            gt.mark_history(ad, primname=self.myself(), keyword=timestamp_key)
            ad.update_filename(suffix=suffix, strip=True)
            if bias.path:
                add_provenance(ad, bias.filename, md5sum(bias.path) or "", self.myself())

            timestamp = datetime.now()
        return adinputs
Example #14
0
    def biasCorrect(self, adinputs=None, suffix=None, bias=None, do_bias=True):
        """
        The biasCorrect primitive will subtract the science extension of the
        input bias frames from the science extension of the input science
        frames. The variance and data quality extension will be updated, if
        they exist. If no bias is provided, getProcessedBias will be called
        to ensure a bias exists for every adinput.

        Parameters
        ----------
        suffix: str
            suffix to be added to output files
        bias: str/list of str
            bias(es) to subtract
        do_bias: bool
            perform bias subtraction?
        """
        log = self.log
        log.debug(gt.log_message("primitive", self.myself(), "starting"))
        timestamp_key = self.timestamp_keys[self.myself()]

        if not do_bias:
            log.warning("Bias correction has been turned off.")
            return adinputs

        if bias is None:
            self.getProcessedBias(adinputs, refresh=False)
            bias_list = self._get_cal(adinputs, 'processed_bias')
        else:
            bias_list = bias

        # Provide a bias AD object for every science frame
        for ad, bias in zip(*gt.make_lists(adinputs, bias_list, force_ad=True)):
            if ad.phu.get(timestamp_key):
                log.warning("No changes will be made to {}, since it has "
                            "already been processed by biasCorrect".
                            format(ad.filename))
                continue

            if bias is None:
                if 'qa' in self.mode:
                    log.warning("No changes will be made to {}, since no "
                                "bias was specified".format(ad.filename))
                    continue
                else:
                    raise IOError('No processed bias listed for {}'.
                                  format(ad.filename))

            try:
                gt.check_inputs_match(ad, bias, check_filter=False,
                                      check_units=True)
            except ValueError:
                bias = gt.clip_auxiliary_data(ad, aux=bias, aux_type='cal')
                # An Error will be raised if they don't match now
                gt.check_inputs_match(ad, bias, check_filter=False,
                                      check_units=True)

            log.fullinfo('Subtracting this bias from {}:\n{}'.
                         format(ad.filename, bias.filename))
            ad.subtract(bias)

            # Record bias used, timestamp, and update filename
            ad.phu.set('BIASIM', bias.filename, self.keyword_comments['BIASIM'])
            gt.mark_history(ad, primname=self.myself(), keyword=timestamp_key)
            ad.update_filename(suffix=suffix, strip=True)
        return adinputs
Example #15
0
    def addMDF(self, adinputs=None, suffix=None, mdf=None):
        """
        This primitive is used to add an Mask Definition File (MDF) extension to
        the input AstroData object. This MDF extension consists of a FITS binary
        table with information about where the spectroscopy slits are in
        the focal plane mask. In IFU, it is the position of the fibers. In
        Multi-Object Spectroscopy, it is the position of the multiple slits.
        In longslit is it the position of the single slit.

        If only one MDF is provided, that MDF will be add to all input AstroData
        object(s). If more than one MDF is provided, the number of MDF AstroData
        objects must match the number of input AstroData objects.

        If no MDF is provided, the primitive will attempt to determine an
        appropriate MDF.

        Parameters
        ----------
        suffix: str
            suffix to be added to output files
        mdf: str/None
            name of MDF to add (None => use default)
        """
        log = self.log
        log.debug(gt.log_message("primitive", self.myself(), "starting"))
        timestamp_key = self.timestamp_keys[self.myself()]

        mdf_list = mdf or self.caldb.get_calibrations(adinputs,
                                                      caltype="mask").files

        for ad, mdf in zip(*gt.make_lists(adinputs, mdf_list, force_ad=True)):
            if ad.phu.get(timestamp_key):
                log.warning('No changes will be made to {}, since it has '
                            'already been processed by addMDF'.format(
                                ad.filename))
                continue
            if hasattr(ad, 'MDF'):
                log.warning('An MDF extension already exists in {}, so no '
                            'MDF will be added'.format(ad.filename))
                continue

            if mdf is None:
                log.stdinfo('No MDF could be retrieved for {}'.format(
                    ad.filename))
                continue

            try:
                # This will raise some sort of exception unless the MDF file
                # has a single MDF Table extension
                ad.MDF = mdf.MDF
            except:
                if len(mdf.tables) == 1:
                    ad.MDF = getattr(mdf, mdf.tables.pop())
                else:
                    log.warning('Cannot find MDF in {}, so no MDF will be '
                                'added'.format(mdf.filename))
                continue

            log.fullinfo('Attaching the MDF {} to {}'.format(
                mdf.filename, ad.filename))

            gt.mark_history(ad, primname=self.myself(), keyword=timestamp_key)
            ad.update_filename(suffix=suffix, strip=True)
        return adinputs
Example #16
0
    def fringeCorrect(self, adinputs=None, **params):
        """
        Correct science frames for the effects of fringing, using a fringe
        frame. The fringe frame is obtained either from a specified parameter,
        or the "fringe" stream, or the calibration database. This is basically
        a bookkeeping wrapper for subtractFringe(), which does all the work.

        Parameters
        ----------
        suffix: str
            suffix to be added to output files
        fringe: list/str/AstroData/None
            fringe frame(s) to subtract
        do_fringe: bool/None
            apply fringe correction? (None => use pipeline default for data)
        scale: bool/None
            scale fringe frame? (None => False if fringe frame has same
            group_id() as data
        scale_factor: float/sequence/None
            factor(s) to scale fringe
        """
        log = self.log
        log.debug(gt.log_message("primitive", self.myself(), "starting"))
        timestamp_key = self.timestamp_keys[self.myself()]
        fringe = params["fringe"]
        scale = params["scale"]
        do_cal = params["do_cal"]

        # Exit now if nothing needs a correction, to avoid an error when the
        # calibration search fails. If images with different exposure times
        # are used, some frames may not require a correction (but the calibration
        # search will succeed), so still need to check individual inputs later.
        needs_correction = [self._needs_fringe_correction(ad) for ad in adinputs]
        if any(needs_correction):
            if do_cal == 'skip':
                log.warning("Fringe correction has been turned off but is "
                            "recommended.")
                return adinputs
        else:
            if do_cal == 'procmode' or do_cal == 'skip':
                log.stdinfo("No input images require a fringe correction.")
                return adinputs
            else:  # do_cal == 'force':
                log.warning("Fringe correction has been forced on but may not "
                            "be required.")


        if fringe is None:
            # This logic is for QAP
            try:
                fringe_list = self.streams['fringe']
                assert len(fringe_list) == 1
                scale = False
                log.stdinfo("Using fringe frame in 'fringe' stream. "
                            "Setting scale=False")
                fringe_list = (fringe_list[0], "stream")
            except (KeyError, AssertionError):
                fringe_list = self.caldb.get_processed_fringe(adinputs)
        else:
            fringe_list = (fringe, None)

        # Usual stuff to ensure that we have an iterable of the correct length
        # for the scale factors regardless of what the input is
        scale_factor = params["scale_factor"]
        try:
            factors = iter(scale_factor)
        except TypeError:
            factors = iter([scale_factor] * len(adinputs))
        else:
            # In case a single-element list was passed
            if len(scale_factor) == 1:
                factors = iter(scale_factor * len(adinputs))

        # Get a fringe AD object for every science frame
        for ad, fringe, origin, correct in zip(*gt.make_lists(
                adinputs, *fringe_list, needs_correction, force_ad=(1,))):
            if ad.phu.get(timestamp_key):
                log.warning(f"{ad.filename}: already processed by "
                            "fringeCorrect. Continuing.")
                continue

            # Logic to deal with different exposure times where only
            # some inputs might require fringe correction
            # KL: for now, I'm not allowing the "force" to do anything when
            #     the correction is not needed.
            if (do_cal == 'procmode' or do_cal == 'force') and not correct:
                log.stdinfo("{} does not require a fringe correction".
                            format(ad.filename))
                ad.update_filename(suffix=params["suffix"], strip=True)
                continue

            # At this point, we definitely want to do a fringe correction
            # so we'd better have a fringe frame!
            if fringe is None:
                if 'sq' not in self.mode and do_cal != 'force':
                    log.warning("No changes will be made to {}, since no "
                                "fringe frame has been specified".
                                format(ad.filename))
                    continue
                else:
                    log.warning(f"{ad.filename}: no fringe was specified. "
                                "Continuing.")
                    continue

            # Check the inputs have matching filters, binning, and shapes
            try:
                gt.check_inputs_match(ad, fringe)
            except ValueError:
                fringe = gt.clip_auxiliary_data(adinput=ad, aux=fringe,
                                                aux_type="cal")
                gt.check_inputs_match(ad, fringe)

            #
            origin_str = f" (obtained from {origin})" if origin else ""
            log.stdinfo(f"{ad.filename}: using the fringe frame "
                         f"{fringe.filename}{origin_str}")
            matched_groups = (ad.group_id() == fringe.group_id())
            if scale or (scale is None and not matched_groups):
                factor = next(factors)
                if factor is None:
                    factor = self._calculate_fringe_scaling(ad, fringe)
                log.stdinfo("Scaling fringe frame by factor {:.3f} before "
                            "subtracting from {}".format(factor, ad.filename))
                # Since all elements of fringe_list might be references to the
                # same AD, need to make a copy before multiplying
                fringe_copy = deepcopy(fringe)
                fringe_copy.multiply(factor)
                ad.subtract(fringe_copy)
            else:
                if scale is None:
                    log.stdinfo("Not scaling fringe frame with same group ID "
                                "as {}".format(ad.filename))
                ad.subtract(fringe)

            # Timestamp and update header and filename
            ad.phu.set("FRINGEIM", fringe.filename, self.keyword_comments["FRINGEIM"])
            gt.mark_history(ad, primname=self.myself(), keyword=timestamp_key)
            ad.update_filename(suffix=params["suffix"], strip=True)
            if fringe.path:
                add_provenance(ad, fringe.filename, md5sum(fringe.path) or "", self.myself())
        return adinputs
Example #17
0
    def QECorrect(self, adinputs=None, **params):
        """
        This primitive applies a wavelength-dependent QE correction to
        a 2D spectral image, based on the wavelength solution of an
        associated processed_arc.

        It is only designed to work on FLATs, and therefore unmosaicked data.

        Parameters
        ----------
        suffix: str
            suffix to be added to output files
        arc : {None, AstroData, str}
            Arc(s) with distortion map.
        """
        log = self.log
        log.debug(gt.log_message("primitive", self.myself(), "starting"))
        timestamp_key = self.timestamp_keys[self.myself()]

        sfx = params["suffix"]
        arc = params["arc"]

        # Get a suitable arc frame (with distortion map) for every science AD
        if arc is None:
            self.getProcessedArc(adinputs, refresh=False)
            arc_list = self._get_cal(adinputs, 'processed_arc')
        else:
            arc_list = arc

        for ad, arc in zip(*gt.make_lists(adinputs, arc_list, force_ad=True)):
            if ad.phu.get(timestamp_key):
                log.warning("No changes will be made to {}, since it has "
                            "already been processed by QECorrect".
                            format(ad.filename))
                continue

            if 'e2v' in ad.detector_name(pretty=True):
                log.stdinfo(f"{ad.filename} has the e2v CCDs, so no QE "
                            "correction is necessary")
                continue

            if self.timestamp_keys['mosaicDetectors'] in ad.phu:
                log.warning(f"{ad.filename} has been processed by mosaic"
                            "Detectors so QECorrect cannot be run")
                continue

            # Determines whether to multiply or divide by QE correction
            is_flat = 'FLAT' in ad.tags

            # If the arc's binning doesn't match, we may still be able to
            # fall back to the approximate solution
            xbin, ybin = ad.detector_x_bin(), ad.detector_y_bin()
            if arc is not None and (arc.detector_x_bin() != xbin or
                                    arc.detector_y_bin() != ybin):
                log.warning("Science frame {} and arc {} have different binnings,"
                            "so cannot use arc".format(ad.filename, arc.filename))
                arc = None

            # The plan here is to attach the mosaic gWCS to the science frame,
            # apply an origin shift to put it in the frame of the arc, and
            # then use the arc's WCS to get the wavelength. If there's no arc,
            # we just use the science frame's WCS.
            # Since we're going to change that WCS, store it for restoration.
            original_wcs = [ext.wcs for ext in ad]
            try:
                transform.add_mosaic_wcs(ad, geotable)
            except ValueError:
                log.warning(f"{ad.filename} already has a 'mosaic' coordinate"
                            "frame. This is unexpected but I'll continue.")

            if arc is None:
                if 'sq' in self.mode:
                    raise OSError(f"No processed arc listed for {ad.filename}")
                else:
                    log.warning(f"No arc supplied for {ad.filename}")
            else:
                # OK, we definitely want to try to do this, get a wavelength solution
                if self.timestamp_keys['determineWavelengthSolution'] not in arc.phu:
                    msg = f"Arc {arc.filename} (for {ad.filename} has not been wavelength calibrated."
                    if 'sq' in self.mode:
                        raise IOError(msg)
                    else:
                        log.warning(msg)

                # We'll be modifying this
                arc_wcs = deepcopy(arc[0].wcs)
                if 'distortion_corrected' not in arc_wcs.available_frames:
                    msg = f"Arc {arc.filename} (for {ad.filename}) has no distortion model."
                    if 'sq' in self.mode:
                        raise OSError(msg)
                    else:
                        log.warning(msg)

                # NB. At this point, we could have an arc that has no good
                # wavelength solution nor distortion correction. But we will
                # use its WCS rather than the science frame's because it must
                # have been supplied by the user.

                # This is GMOS so no need to be as generic as distortionCorrect
                ad_detsec = ad.detector_section()
                arc_detsec = arc.detector_section()[0]
                if (ad_detsec[0].x1, ad_detsec[-1].x2) != (arc_detsec.x1, arc_detsec.x2):
                    raise ValueError("I don't know how to process the "
                                     f"offsets between {ad.filename} "
                                     f"and {arc.filename}")

                yoff1 = arc_detsec.y1 - ad_detsec[0].y1
                yoff2 = arc_detsec.y2 - ad_detsec[0].y2
                arc_ext_shapes = [(ext.shape[0] - yoff1 + yoff2,
                                   ext.shape[1]) for ext in ad]
                arc_corners = np.concatenate([transform.get_output_corners(
                    ext.wcs.get_transform(ext.wcs.input_frame, 'mosaic'),
                    input_shape=arc_shape, origin=(yoff1, 0))
                    for ext, arc_shape in zip(ad, arc_ext_shapes)], axis=1)
                arc_origin = tuple(np.ceil(min(corners)) for corners in arc_corners)

                # So this is what was applied to the ARC to get the
                # mosaic frame to its pixel frame, in which the distortion
                # correction model was calculated. Convert coordinates
                # from python order to Model order.
                origin_shift = reduce(Model.__and__, [models.Shift(-origin)
                                                      for origin in arc_origin[::-1]])
                arc_wcs.insert_transform(arc_wcs.input_frame, origin_shift, after=True)

            array_info = gt.array_information(ad)
            if array_info.detector_shape == (1, 3):
                ccd2_indices = array_info.extensions[1]
            else:
                raise ValueError(f"{ad.filename} does not have 3 separate detectors")

            for index, ext in enumerate(ad):
                if index in ccd2_indices:
                    continue

                # Use the WCS in the extension if we don't have an arc,
                # otherwise use the arc's mosaic->world transformation
                if arc is None:
                    trans = ext.wcs.forward_transform
                else:
                    trans = (ext.wcs.get_transform(ext.wcs.input_frame, 'mosaic') |
                             arc_wcs.forward_transform)

                ygrid, xgrid = np.indices(ext.shape)
                # TODO: want with_units
                waves = trans(xgrid, ygrid)[0] * u.nm  # Wavelength always axis 0
                try:
                    qe_correction = qeModel(ext)((waves / u.nm).to(u.dimensionless_unscaled).value).astype(np.float32)
                except TypeError:  # qeModel() returns None
                    msg = "No QE correction found for {}:{}".format(ad.filename, ext.hdr['EXTVER'])
                    if 'sq' in self.mode:
                        raise ValueError(msg)
                    else:
                        log.warning(msg)
                log.stdinfo("Mean relative QE of EXTVER {} is {:.5f}".
                             format(ext.hdr['EXTVER'], qe_correction.mean()))
                if not is_flat:
                    qe_correction = 1. / qe_correction
                qe_correction[qe_correction < 0] = 0
                qe_correction[qe_correction > 10] = 0
                ext.multiply(qe_correction)

            for ext, orig_wcs in zip(ad, original_wcs):
                ext.wcs = orig_wcs

            # Timestamp and update the filename
            gt.mark_history(ad, primname=self.myself(), keyword=timestamp_key)
            ad.update_filename(suffix=sfx, strip=True)

        return adinputs
Example #18
0
    def addMDF(self, adinputs=None, suffix=None, mdf=None):
        """
        This primitive is used to add an MDF extension to the input AstroData
        object. If only one MDF is provided, that MDF will be add to all input
        AstroData object(s). If more than one MDF is provided, the number of
        MDF AstroData objects must match the number of input AstroData objects.
        If no MDF is provided, the primitive will attempt to determine an
        appropriate MDF.

        Parameters
        ----------
        suffix: str
            suffix to be added to output files
        mdf: str/None
            name of MDF to add (None => use default)
        """
        log = self.log
        log.debug(gt.log_message("primitive", self.myself(), "starting"))
        timestamp_key = self.timestamp_keys[self.myself()]

        if mdf is None:
            self.getMDF(adinputs)
            mdf_list = [self._get_cal(ad, 'mask') for ad in adinputs]
        else:
            mdf_list = mdf

        for ad, mdf in zip(*gt.make_lists(adinputs, mdf_list, force_ad=True)):
            if ad.phu.get(timestamp_key):
                log.warning('No changes will be made to {}, since it has '
                            'already been processed by addMDF'.
                            format(ad.filename))
                continue
            if hasattr(ad, 'MDF'):
                log.warning('An MDF extension already exists in {}, so no '
                            'MDF will be added'.format(ad.filename))
                continue

            if 'SPECT' not in ad.tags:
                log.stdinfo('{} is not spectroscopic data, so no MDF will '
                            'be added'.format(ad.filename))
                continue

            if mdf is None:
                log.stdinfo('No MDF could be retrieved for {}'.
                            format(ad.filename))
                continue

            try:
                # This will raise some sort of exception unless the MDF file
                # has a single MDF Table extension
                ad.MDF = mdf.MDF
            except:
                if len(mdf.tables) == 1:
                    ad.MDF = getattr(mdf, mdf.tables.pop())
                else:
                    log.warning('Cannot find MDF in {}, so no MDF will be '
                                'added'.format(mdf.filename))
                continue

            log.fullinfo('Attaching the MDF {} to {}'.format(mdf.filename,
                                                             ad.filename))

            gt.mark_history(ad, primname=self.myself(), keyword=timestamp_key)
            ad.update_filename(suffix=suffix, strip=True)
        return adinputs
    def addIllumMaskToDQ(self, adinputs=None, suffix=None, illum_mask=None):
        """
        Adds an illumination mask to each AD object

        Parameters
        ----------
        suffix: str
            suffix to be added to output files
        illum_mask: str/None
            name of illumination mask mask (None -> use default)
        """
        log = self.log
        log.debug(gt.log_message("primitive", self.myself(), "starting"))
        timestamp_key = self.timestamp_keys[self.myself()]

        for ad, illum in zip(
                *gt.make_lists(adinputs, illum_mask, force_ad=True)):
            if ad.phu.get(timestamp_key):
                log.warning(
                    'No changes will be made to {}, since it has '
                    'already been processed by addIllumMaskToDQ'.format(
                        ad.filename))
                continue

            ad_detsec = ad.detector_section()
            no_bridges = all(detsec.y1 > 1600 and detsec.y2 < 2900
                             for detsec in ad_detsec)
            has_48rows = (all(detsec.y2 == 4224 for detsec in ad_detsec)
                          and 'Hamamatsu' in ad.detector_name(pretty=True))

            if illum:
                log.fullinfo("Using {} as illumination mask".format(
                    illum.filename))
                final_illum = gt.clip_auxiliary_data(ad,
                                                     aux=illum,
                                                     aux_type='bpm',
                                                     return_dtype=DQ.datatype)

                for ext, illum_ext in zip(ad, final_illum):
                    if illum_ext is not None:
                        # Ensure we're only adding the unilluminated bit
                        iext = np.where(illum_ext.data > 0, DQ.unilluminated,
                                        0).astype(DQ.datatype)
                        ext.mask = iext if ext.mask is None else ext.mask | iext
            elif not no_bridges:  # i.e. there are bridges.
                # Default operation for GMOS full-frame LS
                # The 95% cut should ensure that we're sampling something
                # bright (even for an arc)
                # The max is intended to handle R150 data, where many of
                # the extensions are unilluminated

                row_medians = np.max(np.array(
                    [np.percentile(ext.data, 95, axis=1) for ext in ad]),
                                     axis=0)
                rows = np.arange(len(row_medians))
                m_init = models.Polynomial1D(degree=3)
                fit_it = fitting.FittingWithOutlierRemoval(
                    fitting.LinearLSQFitter(),
                    outlier_func=sigma_clip,
                    sigma_upper=1,
                    sigma_lower=3)
                m_final, _ = fit_it(m_init, rows, row_medians)
                model_fit = m_final(rows)
                # Find points which are significantly below the smooth illumination fit
                # First ensure we don't worry about single rows
                row_mask = at.boxcar(model_fit - row_medians > 0.1 * model_fit,
                                     operation=np.logical_and,
                                     size=1)
                row_mask = at.boxcar(row_mask, operation=np.logical_or, size=3)
                for ext in ad:
                    ext.mask |= (row_mask * DQ.unilluminated).astype(
                        DQ.datatype)[:, np.newaxis]

                if has_48rows:
                    actual_rows = 48 // ad.detector_y_bin()
                    for ext in ad:
                        ext.mask[:actual_rows] |= DQ.unilluminated

            # Timestamp and update filename
            gt.mark_history(ad, primname=self.myself(), keyword=timestamp_key)
            ad.update_filename(suffix=suffix, strip=True)

        return adinputs
Example #20
0
    def addDQ(self, adinputs=None, **params):
        """
        This primitive is used to add a DQ extension to the input AstroData
        object. The value of a pixel in the DQ extension will be the sum of the
        following: (0=good, 1=bad pixel (found in bad pixel mask), 2=pixel is
        in the non-linear regime, 4=pixel is saturated). This primitive will
        trim the BPM to match the input AstroData object(s).

        Parameters
        ----------
        suffix: str
            suffix to be added to output files
        static_bpm: str
            Name of bad pixel mask ("default" -> use default from look-up table)
            If set to None, no static_bpm will be added.
        user_bpm: str
            Name of the bad pixel mask created by the user from flats and
            darks.  It is an optional BPM that can be added to the static one.
        illum_mask: bool
            add illumination mask?
        """
        log = self.log
        log.debug(gt.log_message("primitive", self.myself(), "starting"))
        timestamp_key = self.timestamp_keys["addDQ"]
        sfx = params["suffix"]

        # Getting all the filenames first prevents reopening the same file
        # for each science AD
        static_bpm_list = params['static_bpm']
        user_bpm_list = params['user_bpm']

        if static_bpm_list == "default":
            static_bpm_list = [self._get_bpm_filename(ad) for ad in adinputs]

        for ad, static, user in zip(*gt.make_lists(adinputs, static_bpm_list,
                                                   user_bpm_list, force_ad=True)):
            if ad.phu.get(timestamp_key):
                log.warning('No changes will be made to {}, since it has '
                    'already been processed by addDQ'.format(ad.filename))
                continue

            if static is None:
                # So it can be zipped with the AD
                final_static = [None] * len(ad)
            else:
                log.fullinfo("Using {} as static BPM".format(static.filename))
                final_static = gt.clip_auxiliary_data(ad, aux=static,
                                        aux_type='bpm', return_dtype=DQ.datatype)

            if user is None:
                final_user = [None] * len(ad)
            else:
                log.fullinfo("Using {} as user BPM".format(user.filename))
                final_user = gt.clip_auxiliary_data(ad, aux=user,
                                        aux_type='bpm', return_dtype=DQ.datatype)

            for ext, static_ext, user_ext in zip(ad, final_static, final_user):
                extver = ext.hdr['EXTVER']
                if ext.mask is not None:
                    log.warning('A mask already exists in extver {}'.
                                format(extver))
                    continue

                non_linear_level = ext.non_linear_level()
                saturation_level = ext.saturation_level()

                # Need to create the array first for 3D raw F2 data, with 2D BPM
                ext.mask = np.zeros_like(ext.data, dtype=DQ.datatype)
                if static_ext is not None:
                    ext.mask |= static_ext.data
                if user_ext is not None:
                    ext.mask |= user_ext.data

                if saturation_level:
                    log.fullinfo('Flagging saturated pixels in {}:{} '
                                 'above level {:.2f}'.
                                 format(ad.filename, extver, saturation_level))
                    ext.mask |= np.where(ext.data >= saturation_level,
                                         DQ.saturated, 0).astype(DQ.datatype)

                if non_linear_level:
                    if saturation_level:
                        if saturation_level > non_linear_level:
                            log.fullinfo('Flagging non-linear pixels in {}:{} '
                                         'above level {:.2f}'.
                                         format(ad.filename, extver,
                                                non_linear_level))
                            ext.mask |= np.where((ext.data >= non_linear_level) &
                                                 (ext.data < saturation_level),
                                                 DQ.non_linear, 0).astype(DQ.datatype)
                            # Readout modes of IR detectors can result in
                            # saturated pixels having values below the
                            # saturation level. Flag those. Assume we have an
                            # IR detector here because both non-linear and
                            # saturation levels are defined and nonlin<sat
                            regions, nregions = measurements.label(
                                                ext.data < non_linear_level)
                            # In all my tests, region 1 has been the majority
                            # of the image; however, I cannot guarantee that
                            # this is always the case and therefore we should
                            # check the size of each region
                            region_sizes = measurements.labeled_comprehension(
                                ext.data, regions, np.arange(1, nregions+1),
                                len, int, 0)
                            # First, assume all regions are saturated, and
                            # remove any very large ones. This is much faster
                            # than progressively adding each region to DQ
                            hidden_saturation_array = np.where(regions > 0,
                                                    4, 0).astype(DQ.datatype)
                            for region in range(1, nregions+1):
                                # Limit of 10000 pixels for a hole is a bit arbitrary
                                if region_sizes[region-1] > 10000:
                                    hidden_saturation_array[regions==region] = 0
                            ext.mask |= hidden_saturation_array

                        elif saturation_level < non_linear_level:
                            log.warning('{}:{} has saturation level less than '
                                'non-linear level'.format(ad.filename, extver))
                        else:
                            log.fullinfo('Saturation and non-linear levels '
                                         'are the same for {}:{}. Only '
                                         'flagging saturated pixels'.
                                format(ad.filename, extver))
                    else:
                        log.fullinfo('Flagging non-linear pixels in {}:{} '
                                     'above level {:.2f}'.
                                     format(ad.filename, extver,
                                            non_linear_level))
                        ext.mask |= np.where(ext.data >= non_linear_level,
                                             DQ.non_linear, 0).astype(DQ.datatype)


        # Handle latency if reqested
        if params.get("latency", False):
            try:
                adinputs = self.addLatencyToDQ(adinputs, time=params["time"],
                                               non_linear=params["non_linear"])
            except AttributeError:
                log.warning("addLatencyToDQ() not defined in primitivesClass "
                            + self.__class__.__name__)

        # Add the illumination mask if requested
        if params['add_illum_mask']:
            adinputs = self.addIllumMaskToDQ(adinputs, illum_mask=params["illum_mask"])

        # Timestamp and update filenames
        for ad in adinputs:
            gt.mark_history(ad, primname=self.myself(), keyword=timestamp_key)
            ad.update_filename(suffix=sfx, strip=True)

        return adinputs
Example #21
0
    def biasCorrect(self, adinputs=None, suffix=None, bias=None, do_bias=True):
        """
        The biasCorrect primitive will subtract the science extension of the
        input bias frames from the science extension of the input science
        frames. The variance and data quality extension will be updated, if
        they exist. If no bias is provided, getProcessedBias will be called
        to ensure a bias exists for every adinput.

        Parameters
        ----------
        suffix: str
            suffix to be added to output files
        bias: str/list of str
            bias(es) to subtract
        do_bias: bool
            perform bias subtraction?
        """
        log = self.log
        log.debug(gt.log_message("primitive", self.myself(), "starting"))
        timestamp_key = self.timestamp_keys[self.myself()]

        if not do_bias:
            log.warning("Bias correction has been turned off.")
            return adinputs

        if bias is None:
            self.getProcessedBias(adinputs, refresh=False)
            bias_list = self._get_cal(adinputs, 'processed_bias')
        else:
            bias_list = bias

        # Provide a bias AD object for every science frame
        for ad, bias in zip(
                *gt.make_lists(adinputs, bias_list, force_ad=True)):
            if ad.phu.get(timestamp_key):
                log.warning("No changes will be made to {}, since it has "
                            "already been processed by biasCorrect".format(
                                ad.filename))
                continue

            if bias is None:
                if 'qa' in self.mode:
                    log.warning("No changes will be made to {}, since no "
                                "bias was specified".format(ad.filename))
                    continue
                else:
                    raise OSError('No processed bias listed for {}'.format(
                        ad.filename))

            try:
                gt.check_inputs_match(ad,
                                      bias,
                                      check_filter=False,
                                      check_units=True)
            except ValueError:
                bias = gt.clip_auxiliary_data(ad, aux=bias, aux_type='cal')
                # An Error will be raised if they don't match now
                gt.check_inputs_match(ad,
                                      bias,
                                      check_filter=False,
                                      check_units=True)

            log.fullinfo('Subtracting this bias from {}:\n{}'.format(
                ad.filename, bias.filename))
            ad.subtract(bias)

            # Record bias used, timestamp, and update filename
            ad.phu.set('BIASIM', bias.filename,
                       self.keyword_comments['BIASIM'])
            gt.mark_history(ad, primname=self.myself(), keyword=timestamp_key)
            ad.update_filename(suffix=suffix, strip=True)
            if bias.path:
                add_provenance(ad, bias.filename,
                               md5sum(bias.path) or "", self.myself())

            timestamp = datetime.now()
        return adinputs
    def applyQECorrection(self, adinputs=None, **params):
        """
        This primitive applies a wavelength-dependent QE correction to
        a 2D spectral image, based on the wavelength solution of an
        associated processed_arc.

        It is only designed to work on FLATs, and therefore unmosaicked data.

        Parameters
        ----------
        suffix: str
            suffix to be added to output files
        """
        log = self.log
        log.debug(gt.log_message("primitive", self.myself(), "starting"))
        timestamp_key = self.timestamp_keys[self.myself()]

        sfx = params["suffix"]
        arc = params["arc"]

        # Get a suitable arc frame (with distortion map) for every science AD
        if arc is None:
            self.getProcessedArc(adinputs, refresh=False)
            arc_list = self._get_cal(adinputs, 'processed_arc')
        else:
            arc_list = arc

        distort_model = models.Identity(2)

        for ad, arc in zip(*gt.make_lists(adinputs, arc_list, force_ad=True)):
            if ad.phu.get(timestamp_key):
                log.warning(
                    "No changes will be made to {}, since it has "
                    "already been processed by applyQECorrection".format(
                        ad.filename))
                continue

            if 'e2v' in ad.detector_name(pretty=True):
                log.warning("{} has the e2v CCDs, so no QE correction "
                            "is necessary".format(ad.filename))
                continue

            # Determines whether to multiply or divide by QE correction
            is_flat = 'FLAT' in ad.tags

            # If the arc's binning doesn't match, we may still be able to
            # fall back to the approximate solution
            xbin, ybin = ad.detector_x_bin(), ad.detector_y_bin()
            if arc is not None and (arc.detector_x_bin() != xbin
                                    or arc.detector_y_bin() != ybin):
                log.warning(
                    "Science frame {} and arc {} have different binnings,"
                    "so cannot use arc".format(ad.filename, arc.filename))
                arc = None

            # OK, we definitely want to try to do this, get a wavelength solution
            try:
                wavecal = arc[0].WAVECAL
            except (TypeError, AttributeError):
                wave_model = None
            else:
                model_dict = dict(zip(wavecal['name'],
                                      wavecal['coefficients']))
                wave_model = astromodels.dict_to_chebyshev(model_dict)
                if not isinstance(wave_model, models.Chebyshev1D):
                    log.warning("Problem reading wavelength solution from arc "
                                "{}".format(arc.filename))

            if wave_model is None:
                if 'sq' in self.mode:
                    raise OSError("No wavelength solution for {}".format(
                        ad.filename))
                else:
                    log.warning("Using approximate wavelength solution for "
                                "{}".format(ad.filename))

            try:
                fitcoord = arc[0].FITCOORD
            except (TypeError, AttributeError):
                # distort_model already has Identity inverse so nothing required
                pass
            else:
                # TODO: This is copied from determineDistortion() and will need
                # to be refactored out. Or we might be able to simply replace it
                # with a gWCS.pixel_to_world() call
                model_dict = dict(
                    zip(fitcoord['inv_name'], fitcoord['inv_coefficients']))
                m_inverse = astromodels.dict_to_chebyshev(model_dict)
                if not isinstance(m_inverse, models.Chebyshev2D):
                    log.warning("Problem reading distortion model from arc "
                                "{}".format(arc.filename))
                else:
                    distort_model.inverse = models.Mapping(
                        (0, 1, 1)) | (m_inverse & models.Identity(1))

            if distort_model.inverse == distort_model:  # Identity(2)
                if 'sq' in self.mode:
                    raise OSError("No distortion model for {}".format(
                        ad.filename))
                else:
                    log.warning(
                        "Proceeding without a disortion correction for "
                        "{}".format(ad.filename))

            ad_detsec = ad.detector_section()
            adg = transform.create_mosaic_transform(ad, geotable)
            if arc is not None:
                arc_detsec = arc.detector_section()[0]
                shifts = [
                    c1 - c2 for c1, c2 in zip(
                        np.array(ad_detsec).min(axis=0), arc_detsec)
                ]
                xshift, yshift = shifts[0] / xbin, shifts[2] / ybin  # x1, y1
                if xshift or yshift:
                    log.stdinfo("Found a shift of ({},{}) pixels between "
                                "{} and the calibration.".format(
                                    xshift, yshift, ad.filename))
                add_shapes, add_transforms = [], []
                for (arr, trans) in adg:
                    # Try to work out shape of this Block in the unmosaicked
                    # arc, and then apply a shift to align it with the
                    # science Block before applying the same transform.
                    if xshift == 0:
                        add_shapes.append(
                            ((arc_detsec.y2 - arc_detsec.y1) // ybin,
                             arr.shape[1]))
                    else:
                        add_shapes.append(
                            (arr.shape[0],
                             (arc_detsec.x2 - arc_detsec.x1) // xbin))
                    t = transform.Transform(
                        models.Shift(-xshift) & models.Shift(-yshift))
                    t.append(trans)
                    add_transforms.append(t)
                adg.calculate_output_shape(
                    additional_array_shapes=add_shapes,
                    additional_transforms=add_transforms)
                origin_shift = models.Shift(-adg.origin[1]) & models.Shift(
                    -adg.origin[0])
                for t in adg.transforms:
                    t.append(origin_shift)

            # Irrespective of arc or not, apply the distortion model (it may
            # be Identity), recalculate output_shape and reset the origin
            for t in adg.transforms:
                t.append(distort_model.copy())
            adg.calculate_output_shape()
            adg.reset_origin()

            # Now we know the shape of the output, we can construct the
            # approximate wavelength solution; ad.dispersion() returns a list!
            if wave_model is None:
                wave_model = (
                    models.Shift(-0.5 * adg.output_shape[1])
                    | models.Scale(ad.dispersion(asNanometers=True)[0])
                    | models.Shift(ad.central_wavelength(asNanometers=True)))

            for ccd, (block, trans) in enumerate(adg, start=1):
                if ccd == 2:
                    continue
                for ext, corner in zip(block, block.corners):
                    ygrid, xgrid = np.indices(ext.shape)
                    xgrid += corner[1]  # No need for ygrid
                    xnew = trans(xgrid, ygrid)[0]
                    # Some unit-based stuff here to prepare for gWCS
                    waves = wave_model(xnew) * u.nm
                    try:
                        qe_correction = qeModel(ext)(
                            (waves / u.nm).to(u.dimensionless_unscaled).value)
                    except TypeError:  # qeModel() returns None
                        msg = "No QE correction found for {}:{}".format(
                            ad.filename, ext.hdr['EXTVER'])
                        if 'sq' in self.mode:
                            raise ValueError(msg)
                        else:
                            log.warning(msg)
                    log.fullinfo(
                        "Mean relative QE of EXTVER {} is {:.5f}".format(
                            ext.hdr['EXTVER'], qe_correction.mean()))
                    if not is_flat:
                        qe_correction = 1. / qe_correction
                    qe_correction[qe_correction < 0] = 0
                    qe_correction[qe_correction > 10] = 0
                    ext.multiply(qe_correction)

            # Timestamp and update the filename
            gt.mark_history(ad, primname=self.myself(), keyword=timestamp_key)
            ad.update_filename(suffix=sfx, strip=True)

        return adinputs